From 6c6a9eddc882eaf97f9d1aaac82bae54050cbbe2 Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Tue, 30 Apr 2024 20:28:35 +0200 Subject: [PATCH 1/3] Remove chunktool Chunk storage was deprecated after v1.13.2 Signed-off-by: Friedrich Gonzalez --- Makefile | 2 +- cmd/chunktool/Dockerfile | 12 - cmd/chunktool/main.go | 25 - go.mod | 46 +- go.sum | 25 - pkg/chunk/cassandra/scanner.go | 180 ------- pkg/chunk/cassandra/schema_util.go | 117 ----- pkg/chunk/cassandra/storage_client.go | 199 -------- pkg/chunk/deleter.go | 12 - pkg/chunk/filter/filter.go | 69 --- pkg/chunk/gcp/bigtable_delete.go | 117 ----- pkg/chunk/gcp/bigtable_scanner.go | 74 --- pkg/chunk/gcp/fnv.go | 36 -- pkg/chunk/gcp/gcs_scanner.go | 92 ---- pkg/chunk/migrate/README.md | 111 ----- pkg/chunk/migrate/migrator.go | 69 --- pkg/chunk/migrate/reader/planner.go | 94 ---- pkg/chunk/migrate/reader/reader.go | 166 ------- pkg/chunk/migrate/writer/mapper.go | 38 -- pkg/chunk/migrate/writer/writer.go | 135 ------ pkg/chunk/scanner.go | 42 -- pkg/chunk/storage/factory.go | 23 - pkg/commands/chunks.go | 658 -------------------------- pkg/commands/migrate.go | 46 -- 24 files changed, 4 insertions(+), 2384 deletions(-) delete mode 100644 cmd/chunktool/Dockerfile delete mode 100644 cmd/chunktool/main.go delete mode 100644 pkg/chunk/cassandra/scanner.go delete mode 100644 pkg/chunk/cassandra/schema_util.go delete mode 100644 pkg/chunk/cassandra/storage_client.go delete mode 100644 pkg/chunk/deleter.go delete mode 100644 pkg/chunk/filter/filter.go delete mode 100644 pkg/chunk/gcp/bigtable_delete.go delete mode 100644 pkg/chunk/gcp/bigtable_scanner.go delete mode 100644 pkg/chunk/gcp/fnv.go delete mode 100644 pkg/chunk/gcp/gcs_scanner.go delete mode 100644 pkg/chunk/migrate/README.md delete mode 100644 pkg/chunk/migrate/migrator.go delete mode 100644 pkg/chunk/migrate/reader/planner.go delete mode 100644 pkg/chunk/migrate/reader/reader.go delete mode 100644 pkg/chunk/migrate/writer/mapper.go delete mode 100644 pkg/chunk/migrate/writer/writer.go delete mode 100644 pkg/chunk/scanner.go delete mode 100644 pkg/chunk/storage/factory.go delete mode 100644 pkg/commands/chunks.go delete mode 100644 pkg/commands/migrate.go diff --git a/Makefile b/Makefile index 688e80540..d666f293d 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ IMAGE_TAG := $(shell ./tools/image-tag) GIT_REVISION := $(shell git rev-parse --short HEAD) GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) GO_FLAGS := -mod=vendor -ldflags "-extldflags \"-static\" -s -w -X $(VPREFIX).Branch=$(GIT_BRANCH) -X $(VPREFIX).Version=$(IMAGE_TAG) -X $(VPREFIX).Revision=$(GIT_REVISION)" -tags netgo -APP_NAMES := benchtool blockgen blockscopy chunktool cortextool deserializer e2ealerting logtool rules-migrator sim +APP_NAMES := benchtool blockgen blockscopy cortextool deserializer e2ealerting logtool rules-migrator sim all: $(APP_NAMES) images: $(addsuffix -image, $(APP_NAMES)) diff --git a/cmd/chunktool/Dockerfile b/cmd/chunktool/Dockerfile deleted file mode 100644 index bb464ac48..000000000 --- a/cmd/chunktool/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM golang:1.22.2-bookworm as build -ARG GOARCH="amd64" -COPY . /build_dir -WORKDIR /build_dir -ENV GOPROXY=https://proxy.golang.org -RUN make clean && make chunktool - -FROM alpine:3.19.1 -RUN apk add --update --no-cache ca-certificates -COPY --from=build /build_dir/cmd/chunktool/chunktool /usr/bin/chunktool -EXPOSE 80 -ENTRYPOINT [ "/usr/bin/chunktool" ] diff --git a/cmd/chunktool/main.go b/cmd/chunktool/main.go deleted file mode 100644 index 70a5e18f6..000000000 --- a/cmd/chunktool/main.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import ( - "os" - - "gopkg.in/alecthomas/kingpin.v2" - - "github.com/cortexproject/cortex-tools/pkg/commands" -) - -var ( - logConfig commands.LoggerConfig - pushGateway commands.PushGatewayConfig -) - -func main() { - kingpin.Version("0.0.1") - app := kingpin.New("chunktool", "A command-line tool to manage cortex chunk backends.") - logConfig.Register(app) - commands.RegisterChunkCommands(app) - pushGateway.Register(app) - kingpin.MustParse(app.Parse(os.Args[1:])) - - pushGateway.Stop() -} diff --git a/go.mod b/go.mod index 5a1850802..3ff49574c 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,11 @@ module github.com/cortexproject/cortex-tools go 1.22.2 require ( - cloud.google.com/go/bigtable v1.3.0 cloud.google.com/go/storage v1.10.0 github.com/alecthomas/chroma v0.7.0 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/cortexproject/cortex v1.13.2 github.com/go-kit/log v0.2.0 - github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b @@ -21,7 +19,6 @@ require ( github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-stdlib v1.0.0 - github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.24.0 github.com/prometheus/client_golang v1.12.1 @@ -31,7 +28,6 @@ require ( github.com/stretchr/testify v1.7.1 github.com/thanos-io/thanos v0.22.0 github.com/weaveworks/common v0.0.0-20211015155308-ebe5bdc2c89e - go.uber.org/atomic v1.9.0 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/api v0.74.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 @@ -42,6 +38,7 @@ require ( require ( cloud.google.com/go v0.100.2 // indirect + cloud.google.com/go/bigtable v1.3.0 // indirect cloud.google.com/go/compute v1.5.0 // indirect cloud.google.com/go/iam v0.3.0 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect @@ -54,19 +51,13 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 // indirect - github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/armon/go-metrics v0.3.9 // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/aws/aws-sdk-go v1.43.31 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/coreos/go-semver v0.3.0 // indirect @@ -77,36 +68,20 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dlclark/regexp2 v1.2.0 // indirect - github.com/docker/go-units v0.4.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/fatih/color v1.13.0 // indirect - github.com/felixge/fgprof v0.9.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/fsouza/fake-gcs-server v1.7.0 // indirect - github.com/go-kit/kit v0.12.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/runtime v0.23.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/strfmt v0.21.2 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect github.com/go-redis/redis/v8 v8.11.4 // indirect - github.com/go-stack/stack v1.8.1 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/status v1.1.0 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect - github.com/golang-migrate/migrate/v4 v4.7.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac // indirect @@ -118,14 +93,12 @@ require ( github.com/google/btree v1.0.1 // indirect github.com/google/go-cmp v0.5.7 // indirect github.com/google/go-querystring v1.0.0 // indirect - github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda // indirect github.com/google/uuid v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.2.0 // indirect github.com/gosimple/slug v1.1.1 // indirect github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect - github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/consul/api v1.12.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -138,17 +111,10 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/memberlist v0.3.1 // indirect github.com/hashicorp/serf v0.9.6 // indirect - github.com/jessevdk/go-flags v1.5.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/klauspost/cpuid v1.3.1 // indirect - github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect - github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/lib/pq v1.3.0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.14 // indirect @@ -162,38 +128,33 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/ncw/swift v1.0.52 // indirect - github.com/oklog/run v1.1.0 // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.7.1 // indirect github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be // indirect - github.com/rs/cors v1.8.2 // indirect github.com/rs/xid v1.2.1 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/sony/gobreaker v0.4.1 // indirect - github.com/spf13/afero v1.6.0 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/uber/jaeger-client-go v2.29.1+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/weaveworks/promrus v1.2.0 // indirect - go.etcd.io/bbolt v1.3.6 // indirect go.etcd.io/etcd/api/v3 v3.5.4 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect go.etcd.io/etcd/client/v3 v3.5.4 // indirect - go.mongodb.org/mongo-driver v1.8.3 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.1 // indirect go.opentelemetry.io/otel/metric v0.28.0 // indirect go.opentelemetry.io/otel/trace v1.6.1 // indirect + go.uber.org/atomic v1.9.0 // indirect go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.1 // indirect @@ -210,7 +171,6 @@ require ( google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.57.0 // indirect rsc.io/binaryregexp v0.2.0 // indirect ) diff --git a/go.sum b/go.sum index 4c6a214fa..422cbff04 100644 --- a/go.sum +++ b/go.sum @@ -77,7 +77,6 @@ github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9 github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -141,7 +140,6 @@ github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXY github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 h1:PPfYWScYacO3Q6JMCLkyh6Ea2Q/REDTMgmiTAeiV8Jg= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -278,7 +276,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= @@ -287,7 +284,6 @@ github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= @@ -305,7 +301,6 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -528,7 +523,6 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/digitalocean/godo v1.71.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs= github.com/digitalocean/godo v1.78.0 h1:hKMfHXChSMjZFMSev+m5R4/2rxZ3HPdhlpeA2pJI72M= @@ -660,7 +654,6 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= @@ -740,7 +733,6 @@ github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2g github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= -github.com/go-openapi/runtime v0.23.1 h1:/Drg9R96eMmgKJHVWZADz78XbE39/6QiIiB45mc+epo= github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= @@ -856,7 +848,6 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -879,7 +870,6 @@ github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzq github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-migrate/migrate/v4 v4.7.0 h1:gONcHxHApDTKXDyLH/H97gEHmpu1zcnnbAaq2zgrPrs= github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= @@ -1039,7 +1029,6 @@ github.com/gosimple/slug v1.1.1 h1:fRu/digW+NMwBIP+RmviTK97Ho/bEj/C9swrCspN3D4= github.com/gosimple/slug v1.1.1/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0= github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1 h1:Qf+/W3Tup0nO21tgJmO14WJK0yyrm4L2UJipZP+Zoow= github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1/go.mod h1:uPG2nyK4CtgNDmWv7qyzYcdI+S90kHHRWvHnBtEMBXM= -github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= @@ -1060,7 +1049,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -1178,7 +1166,6 @@ github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbB github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1258,20 +1245,16 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= @@ -1393,7 +1376,6 @@ github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGq github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -1405,7 +1387,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= @@ -1441,7 +1422,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -1646,7 +1626,6 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -1669,7 +1648,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -1712,7 +1690,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1765,7 +1742,6 @@ github.com/thanos-io/thanos v0.19.1-0.20211208205607-d1acaea2a11a/go.mod h1:LKBx github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -1846,7 +1822,6 @@ go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9v go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= diff --git a/pkg/chunk/cassandra/scanner.go b/pkg/chunk/cassandra/scanner.go deleted file mode 100644 index 8895d9925..000000000 --- a/pkg/chunk/cassandra/scanner.go +++ /dev/null @@ -1,180 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - "sync" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/cassandra" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/sirupsen/logrus" - "go.uber.org/atomic" -) - -// scanBatch represents a batch of rows read from Cassandra. -type scanBatch struct { - hash []byte - rangeValue []byte - value []byte -} - -type IndexValidator struct { - schema chunk.SchemaConfig - s *StorageClient - o *ObjectClient - tenantID string - - totalIgnoredTime *atomic.Int64 - totalInvalid *atomic.Int64 -} - -func NewIndexValidator( - cfg cassandra.Config, - schema chunk.SchemaConfig, - tenantID string, -) (*IndexValidator, error) { - logrus.Debug("Connecting to Cassandra") - o, err := NewObjectClient( - cfg, - schema, - prometheus.NewRegistry(), - ) - if err != nil { - return nil, err - } - - s, err := NewStorageClient( - cfg, - schema, - prometheus.NewRegistry(), - ) - if err != nil { - return nil, err - } - - logrus.Debug("Connected") - return &IndexValidator{ - schema: schema, - s: s, - o: o, - tenantID: tenantID, - totalIgnoredTime: atomic.NewInt64(0), - totalInvalid: atomic.NewInt64(0), - }, nil -} - -func (i *IndexValidator) Stop() { - i.s.Stop() -} - -func (i *IndexValidator) IndexScan(ctx context.Context, table string, from model.Time, to model.Time, out chan string) error { - q := i.s.readSession.Query(fmt.Sprintf("SELECT hash, range, value FROM %s", table)) - - iter := q.WithContext(ctx).Iter() - defer iter.Close() - scanner := iter.Scanner() - - wg := &sync.WaitGroup{} - batchChan := make(chan scanBatch, 1000) - - for n := 0; n < 64; n++ { - wg.Add(1) - go func() { - defer wg.Done() - for b := range batchChan { - i.checkEntry(ctx, from, to, out, b) - } - }() - } - - rowsReadTotal := 0 - - logrus.WithFields(logrus.Fields{ - "table": table, - "from_ts": from.String(), - "to_ts": to.String(), - }).Infoln("starting scan") - - for scanner.Next() { - b := scanBatch{} - if err := scanner.Scan(&b.hash, &b.rangeValue, &b.value); err != nil { - return errors.WithStack(err) - } - batchChan <- b - rowsReadTotal++ - if rowsReadTotal%25000 == 0 { - logrus.WithFields(logrus.Fields{ - "entries_scanned": rowsReadTotal, - "entries_outside_range_skipped": i.totalIgnoredTime.Load(), - "entries_invalid_found": i.totalInvalid.Load(), - }).Infoln("scan progress") - } - } - close(batchChan) - wg.Wait() - return errors.WithStack(scanner.Err()) -} - -func (i *IndexValidator) checkEntry( - ctx context.Context, - from model.Time, - to model.Time, - out chan string, - entry scanBatch, -) { - chunkID, _, isSeriesID, err := parseChunkTimeRangeValue(entry.rangeValue, entry.value) - if err != nil { - logrus.WithField("chunk_id", chunkID).WithError(err).Errorln("unable to parse chunk time range value") - return - } - - if isSeriesID { - logrus.WithField("series_id", chunkID).Debugln("ignoring series id row") - return - } - - c, err := chunk.ParseExternalKey(i.tenantID, chunkID) - if err != nil { - logrus.WithField("chunk_id", chunkID).WithError(err).Errorln("unable to parse external key") - return - } - - if from > c.Through || (c.From > to && to > 0) { - i.totalIgnoredTime.Inc() - logrus.WithField("chunk_id", chunkID).Debugln("ignoring chunk outside time range") - return - } - - chunkTable, err := i.schema.ChunkTableFor(c.From) - if err != nil { - logrus.WithFields(logrus.Fields{ - "chunk_id": chunkID, - "from": c.From.String(), - "through": c.Through.String(), - }).WithError(err).Errorln("unable to determine chunk table") - return - } - - var count int - err = i.o.readSession.Query( - fmt.Sprintf("SELECT count(*) FROM %s WHERE hash = ?", chunkTable), - c.ExternalKey(), - ).WithContext(ctx).Scan(&count) - - if err != nil { - logrus.WithFields(logrus.Fields{ - "chunk_id": chunkID, - }).WithError(err).Errorln("unable to read chunk table") - return - } - - chunkExists := count > 0 - if !chunkExists { - i.totalInvalid.Inc() - logrus.WithField("chunk_id", chunkID).Infoln("chunk not found, adding index entry to output file") - out <- fmt.Sprintf("%s,0x%x\n", string(entry.hash), entry.rangeValue) - } -} diff --git a/pkg/chunk/cassandra/schema_util.go b/pkg/chunk/cassandra/schema_util.go deleted file mode 100644 index d767db3d3..000000000 --- a/pkg/chunk/cassandra/schema_util.go +++ /dev/null @@ -1,117 +0,0 @@ -package cassandra - -import ( - "encoding/base64" - - "fmt" - - "github.com/pkg/errors" - "github.com/prometheus/common/model" -) - -const ( - chunkTimeRangeKeyV1a = 1 - chunkTimeRangeKeyV1 = '1' - chunkTimeRangeKeyV2 = '2' - chunkTimeRangeKeyV3 = '3' - chunkTimeRangeKeyV4 = '4' - chunkTimeRangeKeyV5 = '5' - - // For v9 schema - seriesRangeKeyV1 = '7' - labelSeriesRangeKeyV1 = '8' -) - -func decodeRangeKey(value []byte) [][]byte { - components := make([][]byte, 0, 5) - i, j := 0, 0 - for j < len(value) { - if value[j] != 0 { - j++ - continue - } - components = append(components, value[i:j]) - j++ - i = j - } - return components -} - -func decodeBase64Value(bs []byte) (model.LabelValue, error) { - decodedLen := base64.RawStdEncoding.DecodedLen(len(bs)) - decoded := make([]byte, decodedLen) - if _, err := base64.RawStdEncoding.Decode(decoded, bs); err != nil { - return "", err - } - return model.LabelValue(decoded), nil -} - -// parseChunkTimeRangeValue returns the chunkID and labelValue for chunk time -// range values. -func parseChunkTimeRangeValue(rangeValue []byte, value []byte) ( - chunkID string, labelValue model.LabelValue, isSeriesID bool, err error, -) { - components := decodeRangeKey(rangeValue) - - switch { - case len(components) < 3: - err = errors.Errorf("invalid chunk time range value: %x", rangeValue) - return - - // v1 & v2 schema had three components - label name, label value and chunk ID. - // No version number. - case len(components) == 3: - chunkID = string(components[2]) - labelValue = model.LabelValue(components[1]) - return - - case len(components[3]) == 1: - switch components[3][0] { - // v3 schema had four components - label name, label value, chunk ID and version. - // "version" is 1 and label value is base64 encoded. - // (older code wrote "version" as 1, not '1') - case chunkTimeRangeKeyV1a, chunkTimeRangeKeyV1: - chunkID = string(components[2]) - labelValue, err = decodeBase64Value(components[1]) - return - - // v4 schema wrote v3 range keys and a new range key - version 2, - // with four components - , , chunk ID and version. - case chunkTimeRangeKeyV2: - chunkID = string(components[2]) - return - - // v5 schema version 3 range key is chunk end time, , chunk ID, version - case chunkTimeRangeKeyV3: - chunkID = string(components[2]) - return - - // v5 schema version 4 range key is chunk end time, label value, chunk ID, version - case chunkTimeRangeKeyV4: - chunkID = string(components[2]) - labelValue, err = decodeBase64Value(components[1]) - return - - // v6 schema added version 5 range keys, which have the label value written in - // to the value, not the range key. So they are [chunk end time, , chunk ID, version]. - case chunkTimeRangeKeyV5: - chunkID = string(components[2]) - labelValue = model.LabelValue(value) - return - - // v9 schema actually return series IDs - case seriesRangeKeyV1: - chunkID = string(components[0]) - isSeriesID = true - return - - case labelSeriesRangeKeyV1: - chunkID = string(components[1]) - labelValue = model.LabelValue(value) - isSeriesID = true - return - } - } - err = fmt.Errorf("unrecognised chunkTimeRangeKey version: %q", string(components[3])) - return -} diff --git a/pkg/chunk/cassandra/storage_client.go b/pkg/chunk/cassandra/storage_client.go deleted file mode 100644 index 6e2b69866..000000000 --- a/pkg/chunk/cassandra/storage_client.go +++ /dev/null @@ -1,199 +0,0 @@ -package cassandra - -import ( - "bytes" - "crypto/tls" - "os" - "strings" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/cassandra" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gocql/gocql" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/sync/semaphore" -) - -func session(cfg cassandra.Config, name string, reg prometheus.Registerer) (*gocql.Session, error) { - cluster := gocql.NewCluster(strings.Split(cfg.Addresses, ",")...) - cluster.Port = cfg.Port - cluster.Keyspace = cfg.Keyspace - cluster.Timeout = cfg.Timeout - cluster.ConnectTimeout = cfg.ConnectTimeout - cluster.ReconnectInterval = cfg.ReconnectInterval - cluster.NumConns = cfg.NumConnections - cluster.Logger = log.With(util_log.Logger, "module", "gocql", "client", name) - cluster.Registerer = prometheus.WrapRegistererWith( - prometheus.Labels{"client": name}, reg) - if cfg.Retries > 0 { - cluster.RetryPolicy = &gocql.ExponentialBackoffRetryPolicy{ - NumRetries: cfg.Retries, - Min: cfg.MinBackoff, - Max: cfg.MaxBackoff, - } - } - if !cfg.ConvictHosts { - cluster.ConvictionPolicy = noopConvictionPolicy{} - } - if err := setClusterConfig(cfg, cluster); err != nil { - return nil, errors.WithStack(err) - } - - session, err := cluster.CreateSession() - if err == nil { - return session, nil - } - // ErrNoConnectionsStarted will be returned if keyspace don't exist or is invalid. - // ref. https://github.com/gocql/gocql/blob/07ace3bab0f84bb88477bab5d79ba1f7e1da0169/cassandra_test.go#L85-L97 - if err != gocql.ErrNoConnectionsStarted { - return nil, errors.WithStack(err) - } - - session, err = cluster.CreateSession() - return session, errors.WithStack(err) -} - -// apply config settings to a cassandra ClusterConfig -func setClusterConfig(cfg cassandra.Config, cluster *gocql.ClusterConfig) error { - consistency, err := gocql.ParseConsistencyWrapper(cfg.Consistency) - if err != nil { - return errors.Wrap(err, "unable to parse the configured consistency") - } - - cluster.Consistency = consistency - cluster.DisableInitialHostLookup = cfg.DisableInitialHostLookup - - if cfg.SSL { - if cfg.HostVerification { - cluster.SslOpts = &gocql.SslOptions{ - CaPath: cfg.CAPath, - EnableHostVerification: true, - Config: &tls.Config{ - ServerName: strings.Split(cfg.Addresses, ",")[0], - }, - } - } else { - cluster.SslOpts = &gocql.SslOptions{ - EnableHostVerification: false, - } - } - } - if cfg.Auth { - password := cfg.Password.Value - if cfg.PasswordFile != "" { - passwordBytes, err := os.ReadFile(cfg.PasswordFile) - if err != nil { - return errors.Errorf("Could not read Cassandra password file: %v", err) - } - passwordBytes = bytes.TrimRight(passwordBytes, "\n") - password = string(passwordBytes) - } - if len(cfg.CustomAuthenticators) != 0 { - cluster.Authenticator = cassandra.CustomPasswordAuthenticator{ - ApprovedAuthenticators: cfg.CustomAuthenticators, - Username: cfg.Username, - Password: password, - } - return nil - } - cluster.Authenticator = gocql.PasswordAuthenticator{ - Username: cfg.Username, - Password: password, - } - } - return nil -} - -// StorageClient implements chunk.IndexClient and chunk.ObjectClient for Cassandra. -type StorageClient struct { - cfg cassandra.Config - schemaCfg chunk.SchemaConfig - readSession *gocql.Session - writeSession *gocql.Session - querySemaphore *semaphore.Weighted -} - -// NewStorageClient returns a new StorageClient. -func NewStorageClient(cfg cassandra.Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*StorageClient, error) { - readSession, err := session(cfg, "index-read", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - writeSession, err := session(cfg, "index-write", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - var querySemaphore *semaphore.Weighted - if cfg.QueryConcurrency > 0 { - querySemaphore = semaphore.NewWeighted(int64(cfg.QueryConcurrency)) - } - - client := &StorageClient{ - cfg: cfg, - schemaCfg: schemaCfg, - readSession: readSession, - writeSession: writeSession, - querySemaphore: querySemaphore, - } - return client, nil -} - -// Stop implement chunk.IndexClient. -func (s *StorageClient) Stop() { - s.readSession.Close() - s.writeSession.Close() -} - -// ObjectClient implements chunk.ObjectClient for Cassandra. -type ObjectClient struct { - cfg cassandra.Config - schemaCfg chunk.SchemaConfig - readSession *gocql.Session - writeSession *gocql.Session - querySemaphore *semaphore.Weighted -} - -// NewObjectClient returns a new ObjectClient. -func NewObjectClient(cfg cassandra.Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*ObjectClient, error) { - readSession, err := session(cfg, "chunks-read", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - writeSession, err := session(cfg, "chunks-write", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - var querySemaphore *semaphore.Weighted - if cfg.QueryConcurrency > 0 { - querySemaphore = semaphore.NewWeighted(int64(cfg.QueryConcurrency)) - } - - client := &ObjectClient{ - cfg: cfg, - schemaCfg: schemaCfg, - readSession: readSession, - writeSession: writeSession, - querySemaphore: querySemaphore, - } - return client, nil -} - -type noopConvictionPolicy struct{} - -// AddFailure should return `true` if the host should be convicted, `false` otherwise. -// Convicted means connections are removed - we don't want that. -// Implementats gocql.ConvictionPolicy. -func (noopConvictionPolicy) AddFailure(err error, host *gocql.HostInfo) bool { - level.Error(util_log.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String()) - return false -} - -// Implementats gocql.ConvictionPolicy. -func (noopConvictionPolicy) Reset(_ *gocql.HostInfo) {} diff --git a/pkg/chunk/deleter.go b/pkg/chunk/deleter.go deleted file mode 100644 index b6723d0ce..000000000 --- a/pkg/chunk/deleter.go +++ /dev/null @@ -1,12 +0,0 @@ -package chunk - -import ( - "context" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -type Deleter interface { - DeleteEntry(context.Context, chunk.IndexEntry, bool) error - DeleteSeries(context.Context, chunk.IndexQuery) ([]error, error) -} diff --git a/pkg/chunk/filter/filter.go b/pkg/chunk/filter/filter.go deleted file mode 100644 index aaa951532..000000000 --- a/pkg/chunk/filter/filter.go +++ /dev/null @@ -1,69 +0,0 @@ -package filter - -import ( - "math" - "strings" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/prometheus/common/model" - "github.com/sirupsen/logrus" - "gopkg.in/alecthomas/kingpin.v2" -) - -type Config struct { - Name string - User string - From int64 - To int64 - Labels string -} - -func (c *Config) Register(cmd *kingpin.CmdClause) { - cmd.Flag("filter.name", "option to filter metrics by metric name").StringVar(&c.Name) - cmd.Flag("filter.user", "option to filter metrics by user").StringVar(&c.User) - cmd.Flag("filter.from", "option to filter only metrics after specific time point").Int64Var(&c.From) - cmd.Flag("filter.to", "option to filter only metrics after specific time point").Int64Var(&c.To) - cmd.Flag("filter.labels", "option to filter metrics with the corresponding labels, provide a comma separated list e.g. ,").StringVar(&c.Labels) -} - -// MetricFilter provides a set of matchers to determine whether a chunk should be returned -type MetricFilter struct { - User string - Name string - From model.Time - To model.Time - Labels []string -} - -// NewMetricFilter returns a metric filter -func NewMetricFilter(cfg Config) MetricFilter { - // By default the maximum time point is chosen if no point is specified - if cfg.To == 0 { - cfg.To = math.MaxInt64 - } - - labellist := strings.Split(cfg.Labels, ",") - - return MetricFilter{ - User: cfg.User, - Name: cfg.Name, - From: model.Time(cfg.From), - To: model.Time(cfg.To), - Labels: labellist, - } -} - -// Filter returns true if the chunk passes the filter -func (f *MetricFilter) Filter(c chunk.Chunk) bool { - if f.From > c.Through || c.From > f.To { - logrus.Debugf("chunk %v does not pass filter, incorrect chunk ranges From: %v, To: %v", c.ExternalKey(), c.From, c.Through) - return false - } - - if f.Name != "" && f.Name != c.Metric.Get("__name__") { - logrus.Debugf("chunk %v does not pass filter, incorrect name: %v", c.ExternalKey(), c.Metric.Get("__name__")) - return false - } - - return true -} diff --git a/pkg/chunk/gcp/bigtable_delete.go b/pkg/chunk/gcp/bigtable_delete.go deleted file mode 100644 index 0e0caecb6..000000000 --- a/pkg/chunk/gcp/bigtable_delete.go +++ /dev/null @@ -1,117 +0,0 @@ -package gcp - -import ( - "context" - "encoding/binary" - "encoding/hex" - - "cloud.google.com/go/bigtable" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - ot "github.com/opentracing/opentracing-go" - "github.com/sirupsen/logrus" - - chunkTool "github.com/cortexproject/cortex-tools/pkg/chunk" -) - -const ( - columnFamily = "f" -) - -// keysFn returns the row and column keys for the given hash and range keys. -type keysFn func(hashValue string, rangeValue []byte) (rowKey, columnKey string) - -// hashPrefix calculates a 64bit hash of the input string and hex-encodes -// the result, taking care to zero pad etc. -func hashPrefix(input string) string { - prefix := hashAdd(hashNew(), input) - var encodedUint64 [8]byte - binary.LittleEndian.PutUint64(encodedUint64[:], prefix) - var hexEncoded [16]byte - hex.Encode(hexEncoded[:], encodedUint64[:]) - return string(hexEncoded[:]) -} - -// storageIndexDeleter implements chunk.IndexDeleter for GCP. -type storageIndexDeleter struct { - cfg gcp.Config - client *bigtable.Client - keysFn keysFn -} - -// NewStorageIndexDeleter returns a new v2 StorageClient. -func NewStorageIndexDeleter(ctx context.Context, cfg gcp.Config) (chunkTool.Deleter, error) { - client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance) - if err != nil { - return nil, err - } - return newstorageIndexDeleter(cfg, client), nil -} - -func newstorageIndexDeleter(cfg gcp.Config, client *bigtable.Client) *storageIndexDeleter { - return &storageIndexDeleter{ - cfg: cfg, - client: client, - keysFn: func(hashValue string, rangeValue []byte) (string, string) { - - // We hash the row key and prepend it back to the key for better distribution. - // We preserve the existing key to make migrations and o11y easier. - if cfg.DistributeKeys { - hashValue = hashPrefix(hashValue) + "-" + hashValue - } - - return hashValue, string(rangeValue) - }, - } -} - -func (s *storageIndexDeleter) DeleteEntry(ctx context.Context, entry chunk.IndexEntry, deleteSeries bool) error { - sp, ctx := ot.StartSpanFromContext(ctx, "DeleteEntry") - defer sp.Finish() - - table := s.client.Open(entry.TableName) - rowKey, columnKey := s.keysFn(entry.HashValue, entry.RangeValue) - - mut := bigtable.NewMutation() - if deleteSeries { - mut.DeleteRow() - } else { - mut.DeleteCellsInColumn(columnFamily, columnKey) - } - - err := table.Apply(ctx, rowKey, mut) - if err != nil { - return err - } - return nil -} - -func (s *storageIndexDeleter) DeleteSeries(ctx context.Context, series chunk.IndexQuery) ([]error, error) { - sp, ctx := ot.StartSpanFromContext(ctx, "DeleteSeries") - defer sp.Finish() - - table := s.client.Open(series.TableName) - rowKey, _ := s.keysFn(series.HashValue, []byte{}) - - mut := bigtable.NewMutation() - mut.DeleteRow() - - muts := []*bigtable.Mutation{mut} - rowKeys := []string{rowKey} - - logrus.Infof("deleting series from bigtable, rowkey: %v, table: %v", rowKey, series.TableName) - - err := table.ReadRows(ctx, bigtable.PrefixRange(rowKey+":"), func(row bigtable.Row) bool { - mut := bigtable.NewMutation() - mut.DeleteRow() - rowKeys = append(rowKeys, row.Key()) - logrus.Infof("deleting series from bigtable, rowkey: %v, table: %v", row.Key(), series.TableName) - return true - }) - - if err != nil { - return nil, err - } - - return table.ApplyBulk(ctx, rowKeys, muts) -} diff --git a/pkg/chunk/gcp/bigtable_scanner.go b/pkg/chunk/gcp/bigtable_scanner.go deleted file mode 100644 index be978b565..000000000 --- a/pkg/chunk/gcp/bigtable_scanner.go +++ /dev/null @@ -1,74 +0,0 @@ -package gcp - -import ( - "context" - "fmt" - - "cloud.google.com/go/bigtable" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/sirupsen/logrus" - - chunkTool "github.com/cortexproject/cortex-tools/pkg/chunk" -) - -type bigtableScanner struct { - client *bigtable.Client -} - -// NewBigtableScanner returns a bigtable scanner -func NewBigtableScanner(ctx context.Context, project, instance string) (chunkTool.Scanner, error) { - client, err := bigtable.NewClient(ctx, project, instance) - if err != nil { - return nil, err - } - - return &bigtableScanner{ - client: client, - }, nil -} - -// Scan forwards metrics to a golang channel, forwarded chunks must have the same -// user ID -func (s *bigtableScanner) Scan(ctx context.Context, req chunkTool.ScanRequest, filterFunc chunkTool.FilterFunc, out chan chunk.Chunk) error { - var processingErr error - - table := s.client.Open(req.Table) - decodeContext := chunk.NewDecodeContext() - - rr := bigtable.PrefixRange(req.User + "/" + req.Prefix) - - // Read through rows and forward slices of chunks with the same metrics - // fingerprint - err := table.ReadRows(ctx, rr, func(row bigtable.Row) bool { - c, err := chunk.ParseExternalKey(req.User, row.Key()) - if err != nil { - processingErr = err - return false - } - - if !req.CheckTime(c.From, c.Through) { - logrus.Debugln("skipping chunk updated at timestamp outside filters range") - return true - } - - err = c.Decode(decodeContext, row[columnFamily][0].Value) - if err != nil { - processingErr = err - return false - } - - if filterFunc(c) { - out <- c - } - return true - }) - - if err != nil { - return fmt.Errorf("stream canceled, err: %v, table: %v, user: %v", err, req.Table, req.User) - } - if processingErr != nil { - return fmt.Errorf("stream canceled, err: %v, table: %v, user: %v", processingErr, req.Table, req.User) - } - - return nil -} diff --git a/pkg/chunk/gcp/fnv.go b/pkg/chunk/gcp/fnv.go deleted file mode 100644 index 851a9d7f1..000000000 --- a/pkg/chunk/gcp/fnv.go +++ /dev/null @@ -1,36 +0,0 @@ -// Modified from github.com/prometheus/common/model/fnv.go -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gcp - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} diff --git a/pkg/chunk/gcp/gcs_scanner.go b/pkg/chunk/gcp/gcs_scanner.go deleted file mode 100644 index d59eaaaf1..000000000 --- a/pkg/chunk/gcp/gcs_scanner.go +++ /dev/null @@ -1,92 +0,0 @@ -package gcp - -import ( - "context" - "fmt" - "io" - - "cloud.google.com/go/storage" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/api/iterator" - - chunkTool "github.com/cortexproject/cortex-tools/pkg/chunk" -) - -type gcsScanner struct { - config gcp.GCSConfig - client *storage.Client - bucket *storage.BucketHandle -} - -// NewGcsScanner returns a bigtable scanner -func NewGcsScanner(ctx context.Context, cfg gcp.GCSConfig) (chunkTool.Scanner, error) { - client, err := storage.NewClient(ctx) - if err != nil { - return nil, err - } - - bucket := client.Bucket(cfg.BucketName) - - return &gcsScanner{ - config: cfg, - client: client, - bucket: bucket, - }, nil -} - -// Scan forwards metrics to a golang channel, forwarded chunks must have the same -// user ID -func (s *gcsScanner) Scan(ctx context.Context, req chunkTool.ScanRequest, filterFunc chunkTool.FilterFunc, out chan chunk.Chunk) error { - decodeContext := chunk.NewDecodeContext() - - it := s.bucket.Objects(ctx, &storage.Query{ - Prefix: req.User + "/" + req.Prefix, - }) - - for { - objAttrs, err := it.Next() - if err == iterator.Done { - break - } - - if err != nil { - return fmt.Errorf("unable to iterate chunks, err: %v, user: %v", err, req.User) - } - - c, err := chunk.ParseExternalKey(req.User, objAttrs.Name) - if err != nil { - return errors.WithStack(err) - } - - if !req.CheckTime(c.From, c.Through) { - fmt.Println(*req.Interval, c.From, c.Through) - logrus.Debugln("skipping chunk updated at timestamp outside filters range") - continue - } - - reader, err := s.bucket.Object(objAttrs.Name).NewReader(ctx) - if err != nil { - return errors.WithStack(err) - } - - buf, err := io.ReadAll(reader) - reader.Close() - - if err != nil { - return errors.WithStack(err) - } - - if err := c.Decode(decodeContext, buf); err != nil { - return err - } - - if filterFunc(c) { - out <- c - } - } - - return nil -} diff --git a/pkg/chunk/migrate/README.md b/pkg/chunk/migrate/README.md deleted file mode 100644 index f4eae3574..000000000 --- a/pkg/chunk/migrate/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# Chunk Migrator - -Chunk Migrator helps with migrating chunks across Cortex clusters while also taking care of setting right index in the destination cluster as per the specified schema. -It also supports mapping chunks to a new user in destination cluster. - -Chunk Migrator can be invoked using `chunktool` with `chunk migrate` command. - -## Configuration -Migrator comprises of following components, having their own set of configuration: -1. Reader - Runs specified number of workers to read chunks concurrently from the specified storage. -2. Writer - Runs specified number of workers to write chunks and index concurrently to the specified storage. -3. Mapper - Used by writer while writing chunks to map them to a new User ID i.e add chunks and index with a new ID to the destination Cortex cluster. -4. Planner - Used by reader to selectively read the chunks. Selection criteria can be User IDs, Table Names or Shards. - -*Note: Configuration for Planner needs to be set using CLI flags while rest of the components are configured using a single YAML config file.* -### Shards in Planner: -``` -// When doing migrations each database is discreetly partitioned into 240 shards -// based on aspects of the databases underlying implementation. 240 was chosen due -// to the bigtable implementation sharding on the first two character of the hex encoded -// metric fingerprint. Cassandra is encoded into 240 discreet shards using the Murmur3 -// partition tokens. -// -// Shards are an integer between 1 and 240 that map onto 2 hex characters. -// For Example: -// Shard | Prefix -// 1 | 10 -// 2 | 11 -// ... | ... -// 16 | -// 240 | ff -// -// Technically there are 256 combinations of 2 hex character (16^2). However, -// fingerprints will not lead with a 0 character so 00->0f excluded, leading to -// 240 -``` - - -### Reader Config: -`storage_type`: Specifies type of the storage which has the chunks. It currently only supports `bigtable` or `gcs`. -`storage`: It is the same config that is used in Cortex for configuring Storage. See [storage_config](https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config) -`num_workers`: Number of workers to perform read operation concurrently. - -### Writer Config: -`storage`: It is the same config that is used in Cortex for configuring Storage. See [storage_config](https://github.com/cortexproject/cortex/blob/master/docs/configuration/config-file-reference.md#storage_config) -`schema`: It is the same config that is used in Cortex for configuring schemas. -`num_workers`: Number of workers to perform write operation concurrently. - -### Mapper Config: -The map config file is a yaml file structured as: -``` -users: - user_original: user_mapped - ... - : - -``` - -### Planner Config: -`firstShard`: First shard in range of shards to be migrated (1-240). -`lastShard`: Last shard in range of shards to be migrated (1-240). -`users`: Comma separated list of user ids, if empty all users will be queried. -`tables`: Comma separated list of tables to migrate. - -### Example Usage: - -The following example shows how to do migration of chunks belonging to user `old-user-id` from an old cluster having chunks in GCS bucket to -another cluster with different GCS bucket. It maps chunks from `old-user-id` to `new-user-id` and also sets index in specified Bigtable instance with configured Schema. - -```yaml ----config.yaml -reader: - storage_type: gcs - storage: - gcs: - bucket_name: old-gcs-cortex-cluster - num_workers: 2 - -writer: - storage: - bigtable: - project: bigtable-project - instance: bigtable-instance - gcs: - bucket_name: new-gcs-cortex-cluster - schema: - configs: - - from: 2019-01-01 - store: bigtable - object_store: gcs - schema: v10 - index: - prefix: index_ - period: 168h - num_workers: 2 - -mapper: - users: - "old-user-id": new-user-id -``` - -Command to run the migration: ->chunktool chunk migrate --config-file=config.yaml --plan.users=old-user-id - -*Note1: User IDs in mapper are purely for mapping chunks to a new User ID. If mapping entry is missing, chunks would be written using same ID from original chunk. -For migrating chunks of selected users, use `--plan.users`* - -*Note2: Since we haven't specified Shard config, it would default to migrating all the chunks of specified users.* - - - diff --git a/pkg/chunk/migrate/migrator.go b/pkg/chunk/migrate/migrator.go deleted file mode 100644 index 8dd457af4..000000000 --- a/pkg/chunk/migrate/migrator.go +++ /dev/null @@ -1,69 +0,0 @@ -package migrate - -import ( - "context" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - - "github.com/cortexproject/cortex-tools/pkg/chunk/migrate/reader" - "github.com/cortexproject/cortex-tools/pkg/chunk/migrate/writer" -) - -const chunkBufferSize = 1000 - -type Config struct { - ReaderConfig reader.Config `yaml:"reader"` - WriterConfig writer.Config `yaml:"writer"` - Mapper writer.Mapper `yaml:"mapper,omitempty"` -} - -type Migrator struct { - cfg Config - reader *reader.Reader - writer *writer.Writer - chunkBuffer chan chunk.Chunk -} - -func NewMigrator(cfg Config, plannerCfg reader.PlannerConfig) (*Migrator, error) { - chunkReader, err := reader.NewReader(cfg.ReaderConfig, plannerCfg) - if err != nil { - return nil, err - } - - chunkWriter, err := writer.NewWriter(cfg.WriterConfig, cfg.Mapper) - if err != nil { - return nil, err - } - - return &Migrator{ - cfg: cfg, - reader: chunkReader, - writer: chunkWriter, - chunkBuffer: make(chan chunk.Chunk, chunkBufferSize), - }, nil -} - -func (m *Migrator) Run() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go m.reader.Run(ctx, m.chunkBuffer) - m.writer.Run(ctx, m.chunkBuffer) - if m.reader.Err() != nil { - logrus.WithError(m.reader.Err()).Errorln("stopped migrator due to an error in reader") - } - - if m.writer.Err() != nil { - logrus.WithError(m.reader.Err()).Errorln("stopped migrator due to an error in writer") - } -} - -func Setup() error { - prometheus.MustRegister( - reader.SentChunks, - writer.ReceivedChunks, - ) - - return nil -} diff --git a/pkg/chunk/migrate/reader/planner.go b/pkg/chunk/migrate/reader/planner.go deleted file mode 100644 index e24baa42b..000000000 --- a/pkg/chunk/migrate/reader/planner.go +++ /dev/null @@ -1,94 +0,0 @@ -package reader - -import ( - "fmt" - "strings" - - "gopkg.in/alecthomas/kingpin.v2" - - "github.com/cortexproject/cortex-tools/pkg/chunk" -) - -// PlannerConfig is used to configure the Planner -type PlannerConfig struct { - FirstShard int - LastShard int - UserIDList string - Tables string -} - -// Notes on Planned Shards -// ####################### -// When doing migrations each database is discreetly partitioned into 240 shards -// based on aspects of the databases underlying implementation. 240 was chosen due -// to the bigtable implementation sharding on the first two character of the hex encoded -// metric fingerprint. Cassandra is encoded into 240 discreet shards using the Murmur3 -// partition tokens. -// -// Shards are an integer between 0 and 240 that map onto 2 hex characters. -// For Example: -// Shard | Prefix -// 1 | 10 -// 2 | 11 -// ... | ... -// 16 | -// 240 | ff -// -// Technically there are 256 combinations of 2 hex character (16^2). However, -// fingerprints will not lead with a 0 character so 00->0f excluded, leading to -// 240 - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *PlannerConfig) Register(cmd *kingpin.CmdClause) { - cmd.Flag("plan.firstShard", "first shard in range of shards to be migrated (1-240)").Default("1").IntVar(&cfg.FirstShard) - cmd.Flag("plan.lastShard", "last shard in range of shards to be migrated (1-240)").Default("240").IntVar(&cfg.LastShard) - cmd.Flag("plan.users", "comma separated list of user ids, if empty all users will be queried").StringVar(&cfg.UserIDList) - cmd.Flag("plan.tables", "comma separated list of tables to migrate").StringVar(&cfg.Tables) -} - -// Planner plans the queries required for the migrations -type Planner struct { - firstShard int - lastShard int - tables []string - users []string -} - -// NewPlanner returns a new planner struct -func NewPlanner(cfg PlannerConfig) (*Planner, error) { - if cfg.FirstShard < 1 || cfg.FirstShard > 240 { - return &Planner{}, fmt.Errorf("plan.firstShard set to %v, must be in range 1-240", cfg.FirstShard) - } - if cfg.LastShard < 1 || cfg.LastShard > 240 { - return &Planner{}, fmt.Errorf("plan.lastShard set to %v, must be in range 1-240", cfg.LastShard) - } - if cfg.FirstShard > cfg.LastShard { - return &Planner{}, fmt.Errorf("plan.lastShard (%v) is set to less than plan.from (%v)", cfg.LastShard, cfg.FirstShard) - } - - userList := strings.Split(cfg.UserIDList, ",") - tableList := strings.Split(cfg.Tables, ",") - return &Planner{ - firstShard: cfg.FirstShard, - lastShard: cfg.LastShard, - users: userList, - tables: tableList, - }, nil -} - -// Plan updates a Streamer with the correct queries for the planned migration -func (p Planner) Plan() []chunk.ScanRequest { - reqs := []chunk.ScanRequest{} - for _, table := range p.tables { - for _, user := range p.users { - for shard := p.firstShard; shard <= p.lastShard; shard++ { - reqs = append(reqs, chunk.ScanRequest{ - Table: table, - User: user, - Prefix: fmt.Sprintf("%02x", shard+15), - }) - } - } - } - return reqs -} diff --git a/pkg/chunk/migrate/reader/reader.go b/pkg/chunk/migrate/reader/reader.go deleted file mode 100644 index 001160e43..000000000 --- a/pkg/chunk/migrate/reader/reader.go +++ /dev/null @@ -1,166 +0,0 @@ -package reader - -import ( - "context" - "fmt" - "sync" - - cortex_chunk "github.com/cortexproject/cortex/pkg/chunk" - cortex_storage "github.com/cortexproject/cortex/pkg/chunk/storage" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - - "github.com/cortexproject/cortex-tools/pkg/chunk" - "github.com/cortexproject/cortex-tools/pkg/chunk/storage" -) - -var ( - SentChunks = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "reader_sent_chunks_total", - Help: "The total number of chunks sent by this reader.", - }) -) - -// Config is a config for a Reader -type Config struct { - StorageType string `yaml:"storage_type"` - StorageConfig cortex_storage.Config `yaml:"storage"` - NumWorkers int `yaml:"num_workers"` -} - -// Reader collects and forwards chunks according to it's planner -type Reader struct { - cfg Config - id string // ID is the configured as the reading prefix and the shards assigned to the reader - - scanner chunk.Scanner - planner *Planner - workerGroup sync.WaitGroup - scanRequestsChan chan chunk.ScanRequest - err error - quit chan struct{} -} - -// NewReader returns a Reader struct -func NewReader(cfg Config, plannerCfg PlannerConfig) (*Reader, error) { - planner, err := NewPlanner(plannerCfg) - if err != nil { - return nil, err - } - - scanner, err := storage.NewChunkScanner(cfg.StorageType, cfg.StorageConfig) - if err != nil { - return nil, err - } - - id := fmt.Sprintf("%d_%d", plannerCfg.FirstShard, plannerCfg.LastShard) - - // Default to one worker if none is set - if cfg.NumWorkers < 1 { - cfg.NumWorkers = 1 - } - - return &Reader{ - cfg: cfg, - id: id, - planner: planner, - scanner: scanner, - scanRequestsChan: make(chan chunk.ScanRequest), - quit: make(chan struct{}), - }, nil -} - -// Run initializes the writer workers -func (r *Reader) Run(ctx context.Context, outChan chan cortex_chunk.Chunk) { - errChan := make(chan error) - defer close(outChan) - - readCtx, cancel := context.WithCancel(ctx) - - // starting workers - for i := 0; i < r.cfg.NumWorkers; i++ { - r.workerGroup.Add(1) - go r.readLoop(readCtx, outChan, errChan) - } - - go func() { - // cancel context when an error occurs or errChan is closed - defer cancel() - - err := <-errChan - if err != nil { - r.err = err - logrus.WithError(err).Errorln("error scanning chunks, stopping read operation") - close(r.quit) - } - }() - - scanRequests := r.planner.Plan() - logrus.Infof("built %d plans for reading", len(scanRequests)) - - defer func() { - // lets wait for all workers to finish before we return. - // An error in errChan would cause all workers to stop because we cancel the context. - // Otherwise closure of scanRequestsChan(which is done after sending all the scanRequests) should make all workers to stop. - r.workerGroup.Wait() - close(errChan) - }() - - // feeding scan requests to workers - for _, req := range scanRequests { - select { - case r.scanRequestsChan <- req: - continue - case <-r.quit: - return - } - } - - // all scan requests are fed, close the channel - close(r.scanRequestsChan) -} - -func (r *Reader) readLoop(ctx context.Context, outChan chan cortex_chunk.Chunk, errChan chan error) { - defer r.workerGroup.Done() - - for { - select { - case <-ctx.Done(): - logrus.Infoln("shutting down reader because context was cancelled") - return - case req, open := <-r.scanRequestsChan: - if !open { - return - } - - logEntry := logrus.WithFields(logrus.Fields{ - "table": req.Table, - "user": req.User, - "shard": req.Prefix}) - - logEntry.Infoln("attempting scan request") - err := r.scanner.Scan(ctx, req, func(_ cortex_chunk.Chunk) bool { - // while this does not mean chunk is sent by scanner, this is the closest we can get - SentChunks.Inc() - return true - }, outChan) - - if err != nil { - logEntry.WithError(err).Errorln("error scanning chunks") - errChan <- fmt.Errorf("scan request failed, %v", req) - return - } - - logEntry.Infoln("completed scan request") - } - } -} - -func (r *Reader) Stop() { - close(r.quit) -} - -func (r *Reader) Err() error { - return r.err -} diff --git a/pkg/chunk/migrate/writer/mapper.go b/pkg/chunk/migrate/writer/mapper.go deleted file mode 100644 index 9969e0039..000000000 --- a/pkg/chunk/migrate/writer/mapper.go +++ /dev/null @@ -1,38 +0,0 @@ -package writer - -import ( - "github.com/cortexproject/cortex/pkg/chunk" -) - -// Map Config File -// The map config file is a yaml file structed as: -/* -users: - user_original: user_mapped - ... - : - -*/ - -// Mapper is used to update and reencode chunks with new User Ids -// It can also serve as a struct to map other aspects of chunks in -// the future as more migration needs arise -// TODO: Add functionality to add/edit/drop labels -type Mapper struct { - Users map[string]string `yaml:"users,omitempty"` -} - -// MapChunk updates and maps values onto a chunkl -func (u Mapper) MapChunk(chk chunk.Chunk) (chunk.Chunk, error) { - if u.Users != nil { - newID, ok := u.Users[chk.UserID] - if ok { - chk = chunk.NewChunk(newID, chk.Fingerprint, chk.Metric, chk.Data, chk.From, chk.Through) - err := chk.Encode() - if err != nil { - return chk, err - } - } - } - return chk, nil -} diff --git a/pkg/chunk/migrate/writer/writer.go b/pkg/chunk/migrate/writer/writer.go deleted file mode 100644 index 604f3fba3..000000000 --- a/pkg/chunk/migrate/writer/writer.go +++ /dev/null @@ -1,135 +0,0 @@ -package writer - -import ( - "context" - "sync" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/storage" - "github.com/cortexproject/cortex/pkg/util/validation" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" -) - -var ( - ReceivedChunks = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "migration_writer_received_chunks_total", - Help: "The total number of chunks received by this writer", - }, nil) -) - -// Config configures the Writer struct -type Config struct { - StorageConfig storage.Config `yaml:"storage"` - SchemaConfig chunk.SchemaConfig `yaml:"schema"` - NumWorkers int `yaml:"num_workers"` -} - -// Writer receives chunks and stores them in a storage backend -type Writer struct { - cfg Config - chunkStore chunk.Store - - workerGroup sync.WaitGroup - mapper Mapper - - err error - quit chan struct{} -} - -// NewWriter returns a Writer object -func NewWriter(cfg Config, mapper Mapper) (*Writer, error) { - overrides, err := validation.NewOverrides(validation.Limits{}, nil) - if err != nil { - return nil, err - } - - chunkStore, err := storage.NewStore(cfg.StorageConfig, chunk.StoreConfig{}, cfg.SchemaConfig, overrides, nil, nil, nil) - if err != nil { - return nil, err - } - - writer := Writer{ - cfg: cfg, - chunkStore: chunkStore, - workerGroup: sync.WaitGroup{}, - mapper: mapper, - quit: make(chan struct{}), - } - return &writer, nil -} - -// Run initializes the writer workers -func (w *Writer) Run(ctx context.Context, inChan chan chunk.Chunk) { - errChan := make(chan error) - writeCtx, cancel := context.WithCancel(ctx) - - defer func() { - // lets wait for all workers to finish before we return. - // An error in errChan would cause all workers to stop because we cancel the context. - // Otherwise closure of inChan(which is done by writer) should make all workers to stop. - w.workerGroup.Wait() - // closing the errChan to let this function return - close(errChan) - }() - - go func() { - // cancel context when an error occurs or errChan is closed - defer cancel() - - err := <-errChan - if err != nil { - w.err = err - logrus.WithError(err).Errorln("error writing chunk, stopping write operation") - } - }() - - for i := 0; i < w.cfg.NumWorkers; i++ { - w.workerGroup.Add(1) - go w.writeLoop(writeCtx, i, inChan, errChan) - } -} - -func (w *Writer) writeLoop(ctx context.Context, _ int, inChan chan chunk.Chunk, errChan chan error) { - defer w.workerGroup.Done() - - for { - select { - case <-ctx.Done(): - logrus.Info("shutting down writer because context was cancelled") - return - case c, open := <-inChan: - if !open { - return - } - - ReceivedChunks.WithLabelValues().Add(1) - - remapped, err := w.mapper.MapChunk(c) - if err != nil { - logrus.WithError(err).Errorln("failed to remap chunk", "err", err) - errChan <- err - return - } - - // Ensure the chunk has been encoded before persisting in order to avoid - // bad external keys in the index entry - if remapped.Encode() != nil { - errChan <- err - return - } - - err = w.chunkStore.PutOne(ctx, remapped.From, remapped.Through, remapped) - if err != nil { - logrus.WithError(err).Errorln("failed to store chunk") - errChan <- err - return - } - } - } -} - -func (w *Writer) Err() error { - return w.err -} diff --git a/pkg/chunk/scanner.go b/pkg/chunk/scanner.go deleted file mode 100644 index 7595091fd..000000000 --- a/pkg/chunk/scanner.go +++ /dev/null @@ -1,42 +0,0 @@ -package chunk - -import ( - "context" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/prometheus/common/model" -) - -type FilterFunc func(chunk.Chunk) bool - -// Scanner scans an -type Scanner interface { - Scan(ctx context.Context, req ScanRequest, filterFunc FilterFunc, out chan chunk.Chunk) error -} - -// ScannerProvider allows creating a new Scanner -type ScannerProvider interface { - NewScanner() Scanner -} - -// ScanRequest is used to designate the scope of a chunk table scan -// if Prefix is not set, scan all shards -// If Interval is not set, consider all chunks -type ScanRequest struct { - Table string - User string - Prefix string - Interval *model.Interval -} - -func (s *ScanRequest) CheckTime(from, through model.Time) bool { - if s.Interval == nil { - return true - } - - if s.Interval.Start > through || from > s.Interval.End { - return false - } - - return true -} diff --git a/pkg/chunk/storage/factory.go b/pkg/chunk/storage/factory.go deleted file mode 100644 index 11580cc67..000000000 --- a/pkg/chunk/storage/factory.go +++ /dev/null @@ -1,23 +0,0 @@ -package storage - -import ( - "context" - "fmt" - - "github.com/cortexproject/cortex/pkg/chunk/storage" - - "github.com/cortexproject/cortex-tools/pkg/chunk" - "github.com/cortexproject/cortex-tools/pkg/chunk/gcp" -) - -// NewChunkScanner makes a new table client based on the configuration. -func NewChunkScanner(name string, cfg storage.Config) (chunk.Scanner, error) { - switch name { - case "gcp", "gcp-columnkey": - return gcp.NewBigtableScanner(context.Background(), cfg.GCPStorageConfig.Project, cfg.GCPStorageConfig.Instance) - case "gcs": - return gcp.NewGcsScanner(context.Background(), cfg.GCSConfig) - default: - return nil, fmt.Errorf("unrecognized storage client %v, choose one of: gcp, gcp-columnkey, gcs", name) - } -} diff --git a/pkg/commands/chunks.go b/pkg/commands/chunks.go deleted file mode 100644 index b09e7d8b1..000000000 --- a/pkg/commands/chunks.go +++ /dev/null @@ -1,658 +0,0 @@ -package commands - -import ( - "bufio" - "context" - "encoding/hex" - "fmt" - "os" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/cassandra" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - "github.com/cortexproject/cortex/pkg/cortex" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" - "gopkg.in/alecthomas/kingpin.v2" - yamlV2 "gopkg.in/yaml.v2" - "gopkg.in/yaml.v3" - - chunkTool "github.com/cortexproject/cortex-tools/pkg/chunk" - toolCassandra "github.com/cortexproject/cortex-tools/pkg/chunk/cassandra" - "github.com/cortexproject/cortex-tools/pkg/chunk/filter" - toolGCP "github.com/cortexproject/cortex-tools/pkg/chunk/gcp" - "github.com/cortexproject/cortex-tools/pkg/chunk/migrate" -) - -var ( - chunkRefsDeleted = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "chunk_entries_deleted_total", - Help: "Total count of entries deleted from the cortex index", - }) - - seriesEntriesDeleted = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "series_entries_deleted_total", - Help: "Total count of entries deleted from the cortex index", - }) - - labelEntriesDeleted = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "series_label_entries_deleted_total", - Help: "Total count of label entries deleted from the cortex index", - }) - - deletionDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "delete_operation_seconds", - Help: "The duration of the chunk deletion operation.", - }) -) - -// SchemaConfig contains the config for our chunk index schemas -type SchemaConfig struct { - Configs []*chunk.PeriodConfig `yaml:"configs"` - FileName string -} - -// Load the yaml file, or build the config from legacy command-line flags -func (cfg *SchemaConfig) Load() error { - if len(cfg.Configs) > 0 { - return nil - } - - f, err := os.Open(cfg.FileName) - if err != nil { - return err - } - - decoder := yaml.NewDecoder(f) - decoder.KnownFields(true) - return decoder.Decode(&cfg) -} - -type chunkCommandOptions struct { - Bigtable gcp.Config - DryRun bool - Schema SchemaConfig - FilterConfig filter.Config - DeleteSeries bool -} - -type deleteChunkCommandOptions struct { - chunkCommandOptions - GCS gcp.GCSConfig -} - -type deleteSeriesCommandOptions struct { - chunkCommandOptions -} - -func registerDeleteChunkCommandOptions(cmd *kingpin.CmdClause) { - deleteChunkCommandOptions := &deleteChunkCommandOptions{} - deleteChunkCommand := cmd.Command("delete", "Deletes the specified chunk references from the index").Action(deleteChunkCommandOptions.run) - deleteChunkCommand.Flag("dryrun", "if enabled, no delete action will be taken").BoolVar(&deleteChunkCommandOptions.DryRun) - deleteChunkCommand.Flag("delete-series", "if enabled, the entire series will be deleted, not just the chunkID column").BoolVar(&deleteChunkCommandOptions.DeleteSeries) - deleteChunkCommand.Flag("bigtable.project", "bigtable project to use").StringVar(&deleteChunkCommandOptions.Bigtable.Project) - deleteChunkCommand.Flag("bigtable.instance", "bigtable instance to use").StringVar(&deleteChunkCommandOptions.Bigtable.Instance) - deleteChunkCommand.Flag("chunk.gcs.bucketname", "specify gcs bucket to scan for chunks").StringVar(&deleteChunkCommandOptions.GCS.BucketName) - deleteChunkCommand.Flag("schema-file", "path to file containing cortex schema config").Required().StringVar(&deleteChunkCommandOptions.Schema.FileName) - deleteChunkCommandOptions.FilterConfig.Register(deleteChunkCommand) -} - -func registerDeleteSeriesCommandOptions(cmd *kingpin.CmdClause) { - deleteSeriesCommandOptions := &deleteSeriesCommandOptions{} - deleteSeriesCommand := cmd.Command("delete-series", "Deletes the specified chunk references from the index").Action(deleteSeriesCommandOptions.run) - deleteSeriesCommand.Flag("dryrun", "if enabled, no delete action will be taken").BoolVar(&deleteSeriesCommandOptions.DryRun) - deleteSeriesCommand.Flag("bigtable.project", "bigtable project to use").StringVar(&deleteSeriesCommandOptions.Bigtable.Project) - deleteSeriesCommand.Flag("bigtable.instance", "bigtable instance to use").StringVar(&deleteSeriesCommandOptions.Bigtable.Instance) - deleteSeriesCommand.Flag("schema-file", "path to file containing cortex schema config").Required().StringVar(&deleteSeriesCommandOptions.Schema.FileName) - deleteSeriesCommandOptions.FilterConfig.Register(deleteSeriesCommand) -} - -type chunkCleanCommandOptions struct { - CortexConfigFile string - InvalidIndexEntryFile string - Table string - BatchSize int - Concurrency int -} - -func registerChunkCleanCommandOptions(cmd *kingpin.CmdClause) { - opts := &chunkCleanCommandOptions{} - chunkCleanCommand := cmd.Command("clean-index", "Deletes the index entries specified in the provided file from the specified index table.").Action(opts.run) - chunkCleanCommand.Flag("invalid-entry-file", "File with list of index entries to delete. This file is generated using the 'chunk validate-index` command.").Required().StringVar(&opts.InvalidIndexEntryFile) - chunkCleanCommand.Flag("table", "Cortex index table to delete index entries from").Required().StringVar(&opts.Table) - chunkCleanCommand.Flag("cortex-config-file", "Path to Cortex config file containing the Cassandra config").Required().StringVar(&opts.CortexConfigFile) - chunkCleanCommand.Flag("batch-size", "How many deletes to submit in one batch").Default("100").IntVar(&opts.BatchSize) - chunkCleanCommand.Flag("concurrency", "How many concurrent threads to run").Default("8").IntVar(&opts.Concurrency) -} - -type validateIndexCommandOptions struct { - CortexConfigFile string - Table string - FromTimestamp int64 - ToTimestamp int64 - InvalidIndexEntryFile string - TenantID string -} - -func registerValidateIndexCommandOptions(cmd *kingpin.CmdClause) { - opts := &validateIndexCommandOptions{} - validateIndexCommand := cmd.Command("validate-index", "Scans the provided Cortex index for invalid entries. Currently, only Cassandra is supported.").Action(opts.run) - validateIndexCommand.Flag("cortex-config-file", "Path to a valid Cortex config file.").Required().StringVar(&opts.CortexConfigFile) - validateIndexCommand.Flag("invalid-entry-file", "Path to file where the hash and range values of invalid index entries will be written.").Default("invalid-entries.txt").StringVar(&opts.InvalidIndexEntryFile) - validateIndexCommand.Flag("table", "Cortex index table to scan for invalid index entries").Required().StringVar(&opts.Table) - validateIndexCommand.Flag("from-unix-timestamp", "Set a valid unix timestamp in seconds to configure a minimum timestamp to scan for invalid entries.").Default("0").Int64Var(&opts.FromTimestamp) - validateIndexCommand.Flag("to-unix-timestamp", "Set a valid unix timestamp in seconds to configure a maximum timestamp to scan for invalid entries.").Int64Var(&opts.ToTimestamp) - validateIndexCommand.Flag("tenant-id", "Tenant ID to scan entries for.").Default("fake").StringVar(&opts.TenantID) -} - -// RegisterChunkCommands registers the ChunkCommand flags with the kingpin applicattion -func RegisterChunkCommands(app *kingpin.Application) { - chunkCommand := app.Command("chunk", "Chunk related operations").PreAction(setup) - registerDeleteChunkCommandOptions(chunkCommand) - registerDeleteSeriesCommandOptions(chunkCommand) - registerMigrateChunksCommandOptions(chunkCommand) - registerChunkCleanCommandOptions(chunkCommand) - registerValidateIndexCommandOptions(chunkCommand) -} - -func setup(k *kingpin.ParseContext) error { - if strings.HasPrefix(k.String(), "chunk migrate") { - return migrate.Setup() - } - prometheus.MustRegister( - chunkRefsDeleted, - seriesEntriesDeleted, - labelEntriesDeleted, - ) - return nil -} - -func (c *chunkCleanCommandOptions) run(_ *kingpin.ParseContext) error { - cortexCfg := &cortex.Config{} - flagext.RegisterFlags(cortexCfg) - err := LoadConfig(c.CortexConfigFile, true, cortexCfg) - if err != nil { - return errors.Wrap(err, "failed to parse Cortex config") - } - - err = cortexCfg.Schema.Load() - if err != nil { - return errors.Wrap(err, "failed to load schemas") - } - - logrus.Debug("Connecting to Cassandra") - client, err := cassandra.NewStorageClient(cortexCfg.Storage.CassandraStorageConfig, cortexCfg.Schema, nil) - if err != nil { - return errors.Wrap(err, "failed to connect to Cassandra") - } - - logrus.Debug("Connected") - - inputFile, err := os.Open(c.InvalidIndexEntryFile) - if err != nil { - return errors.Wrap(err, "failed opening input file") - } - scanner := bufio.NewScanner(inputFile) - scanner.Split(bufio.ScanLines) - - // One channel message per input line. - lineCh := make(chan string, c.Concurrency) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var totalLineCnt uint32 - - g, ctx := errgroup.WithContext(ctx) - for i := 0; i < c.Concurrency; i++ { - g.Go(func() error { - batch := client.NewWriteBatch() - lineCnt := 0 - for line := range lineCh { - select { - case <-ctx.Done(): - return nil - default: - } - - logrus.Debugf("processing line: %s", line) - parts := strings.SplitN(line, ",", 2) - if len(parts) != 2 { - logrus.WithFields(logrus.Fields{ - "line": line, - }).Errorln("invalid input format") - continue - } - - parts[0], parts[1] = strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) - if parts[1][:2] == "0x" { - parts[1] = parts[1][2:] - } - - rangeVal, err := hex.DecodeString(parts[1]) - if err != nil { - logrus.WithFields(logrus.Fields{ - "hash": parts[0], - "range": parts[1], - }).WithError(err).Errorln("invalid range value") - continue - } - - batch.Delete(c.Table, parts[0], rangeVal) - lineCnt++ - - if lineCnt >= c.BatchSize { - writeBatch(ctx, client, batch) - batch = client.NewWriteBatch() - lineCnt = 0 - } - - newTotalLineCnt := atomic.AddUint32(&totalLineCnt, 1) - if newTotalLineCnt%25000 == 0 { - logrus.WithFields(logrus.Fields{ - "entries_cleaned_up": newTotalLineCnt, - }).Infoln("cleanup progress") - } - } - - writeBatch(ctx, client, batch) - return nil - }) - } - - go func() { - for scanner.Scan() { - lineCh <- scanner.Text() - } - close(lineCh) - }() - - err = g.Wait() - if err != nil { - return errors.Wrap(err, "failed to delete invalid index entries") - } - - logrus.WithFields(logrus.Fields{ - "entries_cleaned_up": totalLineCnt, - }).Infoln("cleanup complete") - - return nil -} - -func writeBatch(ctx context.Context, client *cassandra.StorageClient, batch chunk.WriteBatch) { - logrus.Debugf("applying batch") - for retries := 5; retries > 0; retries-- { - err := client.BatchWrite(ctx, batch) - if err != nil { - if retries > 1 { - logrus.WithError(err).Warnln("failed to apply batch write, retrying") - } else { - logrus.WithError(err).Errorln("failed to apply batch write, giving up") - } - } - } -} - -// LoadConfig read YAML-formatted config from filename into cfg. -func LoadConfig(filename string, expandENV bool, cfg *cortex.Config) error { - buf, err := os.ReadFile(filename) - if err != nil { - return errors.Wrap(err, "Error reading config file") - } - - if expandENV { - buf = expandEnv(buf) - } - - err = yamlV2.Unmarshal(buf, cfg) - if err != nil { - return errors.Wrap(err, "Error parsing config file") - } - - return nil -} - -// expandEnv replaces ${var} or $var in config according to the values of the current environment variables. -// The replacement is case-sensitive. References to undefined variables are replaced by the empty string. -// A default value can be given by using the form ${var:default value}. -func expandEnv(config []byte) []byte { - return []byte(os.Expand(string(config), func(key string) string { - keyAndDefault := strings.SplitN(key, ":", 2) - key = keyAndDefault[0] - - v := os.Getenv(key) - if v == "" && len(keyAndDefault) == 2 { - v = keyAndDefault[1] // Set value to the default. - } - return v - })) -} - -func (c *deleteChunkCommandOptions) run(_ *kingpin.ParseContext) error { - err := c.Schema.Load() - if err != nil { - return errors.Wrap(err, "unable to load schema") - } - - var schemaConfig *chunk.PeriodConfig - for i := len(c.Schema.Configs) - 1; i >= 0; i-- { - if c.Schema.Configs[i].From.Unix() < c.FilterConfig.From { - schemaConfig = c.Schema.Configs[i] - break - } - } - if schemaConfig == nil { - return fmt.Errorf("no schema found for provided from timestamp") - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - fltr := filter.NewMetricFilter(c.FilterConfig) - - var ( - scanner chunkTool.Scanner - deleter chunkTool.Deleter - ) - - switch schemaConfig.ObjectType { - case "bigtable": - logrus.Infof("bigtable object store, project=%v, instance=%v", c.Bigtable.Project, c.Bigtable.Instance) - scanner, err = toolGCP.NewBigtableScanner(ctx, c.Bigtable.Project, c.Bigtable.Instance) - if err != nil { - return errors.Wrap(err, "unable to initialize scanner") - } - case "gcs": - logrus.Infof("gcs object store, bucket=%v", c.GCS.BucketName) - scanner, err = toolGCP.NewGcsScanner(ctx, c.GCS) - if err != nil { - return errors.Wrap(err, "unable to initialize scanner") - } - default: - return fmt.Errorf("object store type %v not supported for deletes", schemaConfig.ObjectType) - } - - switch schemaConfig.IndexType { - case "bigtable": - logrus.Infof("bigtable deleter, project=%v, instance=%v", c.Bigtable.Project, c.Bigtable.Instance) - deleter, err = toolGCP.NewStorageIndexDeleter(ctx, c.Bigtable) - if err != nil { - return errors.Wrap(err, "unable to initialize deleter") - } - case "bigtable-hashed": - logrus.Infof("bigtable deleter, project=%v, instance=%v", c.Bigtable.Project, c.Bigtable.Instance) - c.Bigtable.DistributeKeys = true - deleter, err = toolGCP.NewStorageIndexDeleter(ctx, c.Bigtable) - if err != nil { - return errors.Wrap(err, "unable to initialize deleter") - } - default: - return fmt.Errorf("index store type %v not supported for deletes", schemaConfig.IndexType) - } - - outChan := make(chan chunk.Chunk, 100) - - wg := &sync.WaitGroup{} - wg.Add(1) - - go func() { - defer func() { - cancel() - wg.Done() - }() - - baseSchema, err := schemaConfig.CreateSchema() - if err != nil { - logrus.WithError(err).Errorln("unable to create schema") - return - } - - schema, ok := baseSchema.(chunk.SeriesStoreSchema) - if !ok { - logrus.Errorln("unable to cast BaseSchema as SeriesStoreSchema") - return - } - - for chk := range outChan { - logrus.WithFields(logrus.Fields{ - "chunkID": chk.ExternalKey(), - "from": chk.From.Time().String(), - "through": chk.Through.Time().String(), - "dryrun": c.DryRun, - }).Infoln("found chunk eligible for deletion") - entries, err := schema.GetChunkWriteEntries(chk.From, chk.Through, chk.UserID, chk.Metric.Get(labels.MetricName), chk.Metric, chk.ExternalKey()) - if err != nil { - logrus.WithFields(logrus.Fields{ - "chunkID": chk.ExternalKey(), - "from": chk.From.Time().String(), - "through": chk.Through.Time().String(), - "dryrun": c.DryRun, - }).Errorln(err) - } - - _, labelEntries, err := schema.GetCacheKeysAndLabelWriteEntries(chk.From, chk.Through, chk.UserID, chk.Metric.Get(labels.MetricName), chk.Metric, chk.ExternalKey()) - if err != nil { - logrus.WithFields(logrus.Fields{ - "chunkID": chk.ExternalKey(), - "from": chk.From.Time().String(), - "through": chk.Through.Time().String(), - "dryrun": c.DryRun, - "message": "GetCacheKeysAndLabelWriteEntries", - }).Errorln(err) - } - - if c.DeleteSeries { - expandedLabelEntries := make([]chunk.IndexEntry, 0, len(labelEntries)) - for _, le := range labelEntries { - expandedLabelEntries = append(expandedLabelEntries, le...) - } - - // This makes sure the entries for the index are deleted first so that incase we error, we can - // still get the index entries from the chunk entries. - entries = append(expandedLabelEntries, entries...) - } - - for _, e := range entries { - if !c.DryRun { - err := deleter.DeleteEntry(ctx, e, c.DeleteSeries) - if err != nil { - logrus.Errorln(err) - } else { - chunkRefsDeleted.Inc() - } - } - } - - } - }() - - table := schemaConfig.ChunkTables.TableFor(fltr.From) - - start := time.Now() - scanRequest := chunkTool.ScanRequest{ - Table: table, - User: fltr.User, - Interval: &model.Interval{Start: fltr.From, End: fltr.To}, - } - err = scanner.Scan(ctx, scanRequest, func(c chunk.Chunk) bool { - return fltr.Filter(c) - }, outChan) - - close(outChan) - if err != nil { - return errors.Wrap(err, "scan failed") - } - wg.Wait() - deletionDuration.Set(time.Since(start).Seconds()) - return nil -} - -func (c *deleteSeriesCommandOptions) run(_ *kingpin.ParseContext) error { - err := c.Schema.Load() - if err != nil { - return errors.Wrap(err, "unable to load schema") - } - - var schemaConfig *chunk.PeriodConfig - for i := len(c.Schema.Configs) - 1; i >= 0; i-- { - if c.Schema.Configs[i].From.Unix() < c.FilterConfig.From { - schemaConfig = c.Schema.Configs[i] - break - } - } - if schemaConfig == nil { - return fmt.Errorf("no schema found for provided from timestamp") - } - - ctx := context.Background() - - fltr := filter.NewMetricFilter(c.FilterConfig) - - var deleter chunkTool.Deleter - - switch schemaConfig.IndexType { - case "bigtable": - logrus.Infof("bigtable deleter, project=%v, instance=%v", c.Bigtable.Project, c.Bigtable.Instance) - deleter, err = toolGCP.NewStorageIndexDeleter(ctx, c.Bigtable) - if err != nil { - return errors.Wrap(err, "unable to initialize deleter") - } - case "bigtable-hashed": - logrus.Infof("bigtable deleter, project=%v, instance=%v", c.Bigtable.Project, c.Bigtable.Instance) - c.Bigtable.DistributeKeys = true - deleter, err = toolGCP.NewStorageIndexDeleter(ctx, c.Bigtable) - if err != nil { - return errors.Wrap(err, "unable to initialize deleter") - } - default: - return fmt.Errorf("index store type %v not supported for deletes", schemaConfig.IndexType) - } - - baseSchema, err := schemaConfig.CreateSchema() - if err != nil { - logrus.WithError(err).Errorln("unable to create schema") - return err - } - - schema, ok := baseSchema.(chunk.SeriesStoreSchema) - if !ok { - logrus.Errorln("unable to cast BaseSchema as SeriesStoreSchema") - return errors.New("unable to cast BaseSchema as SeriesStoreSchema") - } - - deleteMetricNameRows, err := schema.GetReadQueriesForMetric(fltr.From, fltr.To, fltr.User, fltr.Name) - if err != nil { - return err - } - - start := time.Now() - - for _, query := range deleteMetricNameRows { - logrus.WithFields(logrus.Fields{ - "table": query.TableName, - "hashvalue": query.HashValue, - "dryrun": c.DryRun, - }).Debugln("deleting series from index") - if !c.DryRun { - errs, err := deleter.DeleteSeries(ctx, query) - for _, e := range errs { - logrus.WithError(e).Errorln("series deletion error") - } - if err != nil { - return err - } - seriesEntriesDeleted.Inc() - } - } - - for _, lbl := range fltr.Labels { - deleteMetricNameRows, err := schema.GetReadQueriesForMetricLabel(fltr.From, fltr.To, fltr.User, fltr.Name, lbl) - if err != nil { - logrus.Errorln(err) - } - for _, query := range deleteMetricNameRows { - logrus.WithFields(logrus.Fields{ - "table": query.TableName, - "hashvalue": query.HashValue, - "dryrun": c.DryRun, - }).Debugln("deleting series from index") - if !c.DryRun { - errs, err := deleter.DeleteSeries(ctx, query) - for _, e := range errs { - logrus.WithError(e).Errorln("series deletion error") - } - if err != nil { - return err - } - labelEntriesDeleted.Inc() - } - } - } - - deletionDuration.Set(time.Since(start).Seconds()) - - return nil -} - -func (v *validateIndexCommandOptions) run(_ *kingpin.ParseContext) error { - cortexCfg := &cortex.Config{} - flagext.RegisterFlags(cortexCfg) - err := LoadConfig(v.CortexConfigFile, true, cortexCfg) - if err != nil { - return errors.Wrap(err, "failed to parse Cortex config") - } - - err = cortexCfg.Schema.Load() - if err != nil { - return errors.Wrap(err, "failed to load schemas") - } - - indexValidator, err := toolCassandra.NewIndexValidator(cortexCfg.Storage.CassandraStorageConfig, cortexCfg.Schema, v.TenantID) - if err != nil { - return err - } - defer indexValidator.Stop() - - from := model.TimeFromUnix(v.FromTimestamp) - to := model.TimeFromUnix(v.ToTimestamp) - - outputFile, err := os.Create(v.InvalidIndexEntryFile) - if err != nil { - return err - } - defer outputFile.Close() - - outChan := make(chan string) - go func() { - defer close(outChan) - err = indexValidator.IndexScan(context.Background(), v.Table, from, to, outChan) - if err != nil { - logrus.WithError(err).Errorln("index validation scan terminated") - } - }() - - foundInvalidEntriesTotal := 0 - for s := range outChan { - _, err := outputFile.WriteString(s) - if err != nil { - logrus.WithField("entry", s).WithError(err).Errorln("unable to write invalid index entry to file") - } - foundInvalidEntriesTotal++ - } - - logrus.WithField("invalid_entries_total", foundInvalidEntriesTotal).Infoln("index-validation scan complete") - - return nil -} diff --git a/pkg/commands/migrate.go b/pkg/commands/migrate.go deleted file mode 100644 index b804253d9..000000000 --- a/pkg/commands/migrate.go +++ /dev/null @@ -1,46 +0,0 @@ -package commands - -import ( - "os" - - "gopkg.in/alecthomas/kingpin.v2" - "gopkg.in/yaml.v3" - - "github.com/cortexproject/cortex-tools/pkg/chunk/migrate" - "github.com/cortexproject/cortex-tools/pkg/chunk/migrate/reader" -) - -type migrateChunksCommandOptions struct { - ConfigFile string - Config migrate.Config - Planner reader.PlannerConfig -} - -func registerMigrateChunksCommandOptions(cmd *kingpin.CmdClause) { - migrateChunksCommandOptions := &migrateChunksCommandOptions{} - migrateChunksCommand := cmd.Command("migrate", "Deletes the specified chunk references from the index").Action(migrateChunksCommandOptions.run) - migrateChunksCommand.Flag("config-file", "path to migration job config file").Required().StringVar(&migrateChunksCommandOptions.ConfigFile) - migrateChunksCommandOptions.Planner.Register(migrateChunksCommand) -} - -func (c *migrateChunksCommandOptions) run(_ *kingpin.ParseContext) error { - f, err := os.Open(c.ConfigFile) - if err != nil { - return err - } - - decoder := yaml.NewDecoder(f) - decoder.KnownFields(true) - - if err := decoder.Decode(&c.Config); err != nil { - return err - } - - migrator, err := migrate.NewMigrator(c.Config, c.Planner) - if err != nil { - return err - } - - migrator.Run() - return nil -} From fdd7b6ce4a3f783036252b5e601d121836c7570d Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Tue, 30 Apr 2024 20:32:58 +0200 Subject: [PATCH 2/3] After go mod tidy Signed-off-by: Friedrich Gonzalez --- .../Masterminds/squirrel/.gitignore | 1 - .../Masterminds/squirrel/.travis.yml | 22 - .../Masterminds/squirrel/LICENSE.txt | 23 - .../github.com/Masterminds/squirrel/README.md | 118 - .../github.com/Masterminds/squirrel/case.go | 118 - .../github.com/Masterminds/squirrel/delete.go | 152 - .../github.com/Masterminds/squirrel/expr.go | 247 - .../github.com/Masterminds/squirrel/insert.go | 207 - .../github.com/Masterminds/squirrel/part.go | 55 - .../Masterminds/squirrel/placeholder.go | 70 - vendor/github.com/Masterminds/squirrel/row.go | 22 - .../github.com/Masterminds/squirrel/select.go | 313 - .../Masterminds/squirrel/squirrel.go | 166 - .../Masterminds/squirrel/statement.go | 83 - .../Masterminds/squirrel/stmtcacher.go | 90 - .../github.com/Masterminds/squirrel/update.go | 232 - .../github.com/Masterminds/squirrel/where.go | 28 - .../github.com/NYTimes/gziphandler/.gitignore | 1 - .../NYTimes/gziphandler/.travis.yml | 10 - .../NYTimes/gziphandler/CODE_OF_CONDUCT.md | 75 - .../NYTimes/gziphandler/CONTRIBUTING.md | 30 - vendor/github.com/NYTimes/gziphandler/LICENSE | 201 - .../github.com/NYTimes/gziphandler/README.md | 56 - vendor/github.com/NYTimes/gziphandler/gzip.go | 532 - .../NYTimes/gziphandler/gzip_go18.go | 43 - .../github.com/PuerkitoBio/purell/.gitignore | 5 - .../github.com/PuerkitoBio/purell/.travis.yml | 12 - vendor/github.com/PuerkitoBio/purell/LICENSE | 12 - .../github.com/PuerkitoBio/purell/README.md | 188 - .../github.com/PuerkitoBio/purell/purell.go | 379 - .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 - .../github.com/PuerkitoBio/urlesc/README.md | 16 - .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 - .../asaskevich/govalidator/.gitignore | 15 - .../asaskevich/govalidator/.travis.yml | 12 - .../asaskevich/govalidator/CODE_OF_CONDUCT.md | 43 - .../asaskevich/govalidator/CONTRIBUTING.md | 63 - .../github.com/asaskevich/govalidator/LICENSE | 21 - .../asaskevich/govalidator/README.md | 622 -- .../asaskevich/govalidator/arrays.go | 87 - .../asaskevich/govalidator/converter.go | 81 - .../github.com/asaskevich/govalidator/doc.go | 3 - .../asaskevich/govalidator/error.go | 47 - .../asaskevich/govalidator/numerics.go | 100 - .../asaskevich/govalidator/patterns.go | 113 - .../asaskevich/govalidator/types.go | 656 -- .../asaskevich/govalidator/utils.go | 270 - .../asaskevich/govalidator/validator.go | 1769 --- .../asaskevich/govalidator/wercker.yml | 15 - .../aws/aws-sdk-go/service/sns/api.go | 9456 ----------------- .../aws/aws-sdk-go/service/sns/doc.go | 44 - .../aws/aws-sdk-go/service/sns/errors.go | 216 - .../aws/aws-sdk-go/service/sns/service.go | 103 - .../github.com/cenkalti/backoff/v4/.gitignore | 25 - .../cenkalti/backoff/v4/.travis.yml | 10 - vendor/github.com/cenkalti/backoff/v4/LICENSE | 20 - .../github.com/cenkalti/backoff/v4/README.md | 32 - .../github.com/cenkalti/backoff/v4/backoff.go | 66 - .../github.com/cenkalti/backoff/v4/context.go | 62 - .../cenkalti/backoff/v4/exponential.go | 158 - .../github.com/cenkalti/backoff/v4/retry.go | 112 - .../github.com/cenkalti/backoff/v4/ticker.go | 97 - .../github.com/cenkalti/backoff/v4/timer.go | 35 - .../github.com/cenkalti/backoff/v4/tries.go | 38 - .../cortex/pkg/alertmanager/alertmanager.go | 734 -- .../pkg/alertmanager/alertmanager_client.go | 132 - .../pkg/alertmanager/alertmanager_http.go | 112 - .../pkg/alertmanager/alertmanager_metrics.go | 337 - .../pkg/alertmanager/alertmanager_ring.go | 126 - .../alertmanagerpb/alertmanager.pb.go | 1147 -- .../alertmanagerpb/alertmanager.proto | 46 - .../alertstore/bucketclient/bucket_client.go | 209 - .../pkg/alertmanager/alertstore/config.go | 82 - .../alertmanager/alertstore/configdb/store.go | 146 - .../alertmanager/alertstore/local/store.go | 194 - .../alertstore/objectclient/store.go | 163 - .../pkg/alertmanager/alertstore/store.go | 111 - .../cortex/pkg/alertmanager/api.go | 470 - .../cortex/pkg/alertmanager/distributor.go | 340 - .../cortex/pkg/alertmanager/lifecycle.go | 28 - .../cortex/pkg/alertmanager/merger/merger.go | 18 - .../pkg/alertmanager/merger/v1_alerts.go | 72 - .../pkg/alertmanager/merger/v1_silence_id.go | 51 - .../pkg/alertmanager/merger/v1_silences.go | 45 - .../alertmanager/merger/v2_alert_groups.go | 104 - .../pkg/alertmanager/merger/v2_alerts.go | 67 - .../pkg/alertmanager/merger/v2_silence_id.go | 34 - .../pkg/alertmanager/merger/v2_silences.go | 65 - .../cortex/pkg/alertmanager/multitenant.go | 1370 --- .../pkg/alertmanager/rate_limited_notifier.go | 65 - .../pkg/alertmanager/state_persister.go | 130 - .../pkg/alertmanager/state_replication.go | 316 - .../cortexproject/cortex/pkg/api/api.go | 426 - .../cortexproject/cortex/pkg/api/handlers.go | 275 - .../cortex/pkg/api/middlewares.go | 29 - .../pkg/chunk/cassandra/authenticator.go | 43 - .../cortex/pkg/chunk/cassandra/fixtures.go | 76 - .../pkg/chunk/cassandra/instrumentation.go | 38 - .../pkg/chunk/cassandra/storage_client.go | 565 - .../pkg/chunk/cassandra/table_client.go | 81 - .../cortex/pkg/chunk/grpc/grpc.pb.go | 6481 ----------- .../cortex/pkg/chunk/grpc/grpc.proto | 142 - .../cortex/pkg/chunk/grpc/grpc_client.go | 35 - .../cortex/pkg/chunk/grpc/index_client.go | 107 - .../cortex/pkg/chunk/grpc/storage_client.go | 118 - .../cortex/pkg/chunk/grpc/table_client.go | 107 - .../pkg/chunk/local/boltdb_index_client.go | 366 - .../pkg/chunk/local/boltdb_table_client.go | 61 - .../cortex/pkg/chunk/local/fixtures.go | 80 - .../pkg/chunk/local/fs_object_client.go | 211 - .../cortex/pkg/chunk/purger/delete_plan.pb.go | 1353 --- .../cortex/pkg/chunk/purger/delete_plan.proto | 34 - .../pkg/chunk/purger/delete_requests_store.go | 394 - .../cortex/pkg/chunk/purger/purger.go | 828 -- .../pkg/chunk/purger/request_handler.go | 183 - .../pkg/chunk/purger/table_provisioning.go | 30 - .../pkg/chunk/purger/tenant_deletion_api.go | 128 - .../cortex/pkg/chunk/purger/tombstones.go | 450 - .../cortex/pkg/chunk/storage/bytes.go | 39 - .../pkg/chunk/storage/caching_fixtures.go | 48 - .../pkg/chunk/storage/caching_index_client.go | 308 - .../chunk/storage/caching_index_client.pb.go | 843 -- .../chunk/storage/caching_index_client.proto | 25 - .../cortex/pkg/chunk/storage/factory.go | 373 - .../cortex/pkg/chunk/storage/metrics.go | 110 - .../cortex/pkg/compactor/blocks_cleaner.go | 462 - .../cortex/pkg/compactor/compactor.go | 894 -- .../cortex/pkg/compactor/compactor_http.go | 53 - .../cortex/pkg/compactor/compactor_ring.go | 107 - .../pkg/compactor/label_remover_filter.go | 29 - .../pkg/compactor/shuffle_sharding_grouper.go | 458 - .../pkg/compactor/shuffle_sharding_planner.go | 54 - .../cortex/pkg/compactor/syncer_metrics.go | 124 - .../cortex/pkg/configs/api/api.go | 367 - .../cortex/pkg/configs/client/client.go | 184 - .../cortex/pkg/configs/config.go | 19 - .../cortexproject/cortex/pkg/configs/db/db.go | 96 - .../cortex/pkg/configs/db/memory/memory.go | 145 - .../pkg/configs/db/postgres/postgres.go | 371 - .../cortex/pkg/configs/db/timed.go | 128 - .../cortex/pkg/configs/db/traced.go | 75 - .../cortex/pkg/configs/legacy_promql/ast.go | 341 - .../pkg/configs/legacy_promql/engine.go | 1747 --- .../pkg/configs/legacy_promql/functions.go | 1269 --- .../cortex/pkg/configs/legacy_promql/fuzz.go | 93 - .../cortex/pkg/configs/legacy_promql/lex.go | 906 -- .../cortex/pkg/configs/legacy_promql/parse.go | 1139 -- .../pkg/configs/legacy_promql/printer.go | 234 - .../pkg/configs/legacy_promql/quantile.go | 183 - .../cortex/pkg/configs/legacy_promql/test.go | 626 -- .../cortex/pkg/configs/legacy_promql/value.go | 216 - .../cortex/pkg/configs/userconfig/config.go | 459 - .../cortexproject/cortex/pkg/cortex/cortex.go | 527 - .../cortex/pkg/cortex/modules.go | 919 -- .../cortex/pkg/cortex/runtime_config.go | 187 - .../cortex/pkg/cortex/server_service.go | 70 - .../cortexproject/cortex/pkg/cortex/status.go | 72 - .../cortex/pkg/cortex/tracing.go | 33 - .../cortex/pkg/distributor/distributor.go | 1259 --- .../pkg/distributor/distributor_ring.go | 99 - .../distributorpb/distributor.pb.go | 127 - .../distributorpb/distributor.proto | 15 - .../cortex/pkg/distributor/ha_tracker.go | 495 - .../cortex/pkg/distributor/ha_tracker.pb.go | 494 - .../cortex/pkg/distributor/ha_tracker.proto | 20 - .../cortex/pkg/distributor/ha_tracker_http.go | 101 - .../cortex/pkg/distributor/http_admin.go | 97 - .../cortex/pkg/distributor/http_server.go | 26 - .../pkg/distributor/ingester_client_pool.go | 42 - .../distributor/ingestion_rate_strategy.go | 74 - .../cortex/pkg/distributor/query.go | 437 - .../cortex/pkg/flusher/flusher.go | 107 - .../cortex/pkg/frontend/config.go | 73 - .../pkg/frontend/downstream_roundtripper.go | 41 - .../cortex/pkg/frontend/transport/handler.go | 259 - .../pkg/frontend/transport/roundtripper.go | 58 - .../cortex/pkg/frontend/v1/frontend.go | 353 - .../frontend/v1/frontendv1pb/frontend.pb.go | 1446 --- .../frontend/v1/frontendv1pb/frontend.proto | 49 - .../cortex/pkg/frontend/v2/frontend.go | 319 - .../frontend/v2/frontend_scheduler_worker.go | 329 - .../frontend/v2/frontendv2pb/frontend.pb.go | 782 -- .../frontend/v2/frontendv2pb/frontend.proto | 28 - .../cortex/pkg/ingester/active_series.go | 244 - .../cortex/pkg/ingester/errors.go | 73 - .../cortex/pkg/ingester/flush.go | 430 - .../cortex/pkg/ingester/index/index.go | 324 - .../cortex/pkg/ingester/ingester.go | 1188 --- .../cortex/pkg/ingester/ingester_v2.go | 2364 ----- .../cortex/pkg/ingester/instance_limits.go | 32 - .../cortex/pkg/ingester/label_pairs.go | 90 - .../cortex/pkg/ingester/limiter.go | 288 - .../cortex/pkg/ingester/locker.go | 58 - .../cortex/pkg/ingester/mapper.go | 155 - .../cortex/pkg/ingester/metrics.go | 657 -- .../cortex/pkg/ingester/series.go | 260 - .../cortex/pkg/ingester/series_map.go | 110 - .../cortex/pkg/ingester/transfer.go | 390 - .../pkg/ingester/user_metrics_metadata.go | 110 - .../cortex/pkg/ingester/user_state.go | 418 - .../cortexproject/cortex/pkg/ingester/wal.go | 1134 -- .../cortex/pkg/ingester/wal.pb.go | 607 -- .../cortex/pkg/ingester/wal.proto | 16 - .../cortex/pkg/querier/batch/batch.go | 133 - .../cortex/pkg/querier/batch/chunk.go | 70 - .../cortex/pkg/querier/batch/merge.go | 187 - .../pkg/querier/batch/non_overlapping.go | 68 - .../cortex/pkg/querier/batch/stream.go | 110 - .../cortexproject/cortex/pkg/querier/block.go | 213 - .../pkg/querier/blocks_consistency_checker.go | 86 - .../pkg/querier/blocks_finder_bucket_index.go | 109 - .../pkg/querier/blocks_finder_bucket_scan.go | 433 - .../pkg/querier/blocks_store_balanced_set.go | 103 - .../pkg/querier/blocks_store_queryable.go | 969 -- .../querier/blocks_store_replicated_set.go | 159 - .../pkg/querier/chunk_store_queryable.go | 121 - .../cortex/pkg/querier/chunks_handler.go | 93 - .../pkg/querier/chunkstore/chunkstore.go | 16 - .../pkg/querier/distributor_queryable.go | 324 - .../cortexproject/cortex/pkg/querier/dummy.go | 43 - .../pkg/querier/error_translate_queryable.go | 206 - .../pkg/querier/iterators/chunk_iterator.go | 64 - .../querier/iterators/chunk_merge_iterator.go | 208 - .../cortex/pkg/querier/lazyquery/lazyquery.go | 117 - .../cortex/pkg/querier/matrix.go | 25 - .../cortex/pkg/querier/metadata_handler.go | 51 - .../cortex/pkg/querier/querier.go | 646 -- .../pkg/querier/queryrange/instrumentation.go | 64 - .../cortex/pkg/querier/queryrange/limits.go | 97 - .../pkg/querier/queryrange/query_range.go | 568 - .../pkg/querier/queryrange/queryable.go | 154 - .../pkg/querier/queryrange/queryrange.pb.go | 4208 -------- .../pkg/querier/queryrange/queryrange.proto | 87 - .../pkg/querier/queryrange/querysharding.go | 262 - .../pkg/querier/queryrange/results_cache.go | 715 -- .../cortex/pkg/querier/queryrange/retry.go | 85 - .../pkg/querier/queryrange/roundtrip.go | 308 - .../querier/queryrange/split_by_interval.go | 121 - .../pkg/querier/queryrange/step_align.go | 23 - .../pkg/querier/queryrange/test_utils.go | 185 - .../cortex/pkg/querier/queryrange/util.go | 72 - .../cortex/pkg/querier/queryrange/value.go | 125 - .../cortex/pkg/querier/remote_read.go | 102 - .../cortex/pkg/querier/series/series_set.go | 385 - .../cortex/pkg/querier/series_with_chunks.go | 15 - .../cortex/pkg/querier/stats/stats.go | 103 - .../cortex/pkg/querier/stats/stats.pb.go | 500 - .../cortex/pkg/querier/stats/stats.proto | 20 - .../pkg/querier/stats/time_middleware.go | 30 - .../pkg/querier/store_gateway_client.go | 106 - .../tenantfederation/merge_queryable.go | 474 - .../tenantfederation/tenant_federation.go | 14 - .../cortex/pkg/querier/testutils.go | 79 - .../pkg/querier/timeseries_series_set.go | 103 - .../pkg/querier/worker/frontend_processor.go | 148 - .../pkg/querier/worker/processor_manager.go | 86 - .../pkg/querier/worker/scheduler_processor.go | 228 - .../cortex/pkg/querier/worker/worker.go | 272 - .../cortex/pkg/ring/client/pool.go | 205 - .../pkg/ring/client/ring_service_discovery.go | 25 - .../cortexproject/cortex/pkg/ruler/api.go | 553 - .../cortex/pkg/ruler/client_pool.go | 101 - .../cortexproject/cortex/pkg/ruler/compat.go | 296 - .../cortex/pkg/ruler/lifecycle.go | 28 - .../cortexproject/cortex/pkg/ruler/manager.go | 301 - .../cortex/pkg/ruler/manager_metrics.go | 224 - .../cortexproject/cortex/pkg/ruler/mapper.go | 161 - .../cortex/pkg/ruler/notifier.go | 201 - .../cortexproject/cortex/pkg/ruler/ruler.go | 909 -- .../cortex/pkg/ruler/ruler.pb.go | 2380 ----- .../cortex/pkg/ruler/ruler.proto | 68 - .../cortex/pkg/ruler/ruler_ring.go | 104 - .../cortex/pkg/ruler/rulespb/compat.go | 75 - .../cortex/pkg/ruler/rulespb/custom.go | 21 - .../cortex/pkg/ruler/rulespb/rules.pb.go | 1278 --- .../cortex/pkg/ruler/rulespb/rules.proto | 47 - .../rulestore/bucketclient/bucket_client.go | 319 - .../cortex/pkg/ruler/rulestore/config.go | 37 - .../pkg/ruler/rulestore/configdb/store.go | 136 - .../cortex/pkg/ruler/rulestore/local/local.go | 182 - .../rulestore/objectclient/rule_store.go | 289 - .../cortex/pkg/ruler/rulestore/store.go | 47 - .../cortexproject/cortex/pkg/ruler/storage.go | 145 - .../cortex/pkg/scheduler/queue/queue.go | 224 - .../cortex/pkg/scheduler/queue/user_queues.go | 305 - .../cortex/pkg/scheduler/scheduler.go | 528 - .../pkg/scheduler/schedulerpb/scheduler.pb.go | 2323 ---- .../pkg/scheduler/schedulerpb/scheduler.proto | 99 - .../bucket_index_metadata_fetcher.go | 143 - .../bucket_store_inmemory_server.go | 66 - .../pkg/storegateway/bucket_store_metrics.go | 243 - .../cortex/pkg/storegateway/bucket_stores.go | 639 -- .../pkg/storegateway/chunk_bytes_pool.go | 50 - .../cortex/pkg/storegateway/gateway.go | 376 - .../cortex/pkg/storegateway/gateway_http.go | 53 - .../cortex/pkg/storegateway/gateway_ring.go | 149 - .../storegateway/metadata_fetcher_filters.go | 78 - .../storegateway/metadata_fetcher_metrics.go | 79 - .../cortex/pkg/storegateway/partitioner.go | 64 - .../pkg/storegateway/sharding_strategy.go | 244 - .../storegateway/storegatewaypb/gateway.pb.go | 247 - .../storegateway/storegatewaypb/gateway.proto | 23 - .../cortex/pkg/util/chunkcompat/compat.go | 101 - .../cortex/pkg/util/concurrency/buffer.go | 26 - .../cortex/pkg/util/concurrency/runner.go | 106 - .../cortex/pkg/util/fakeauth/fake_auth.go | 78 - .../cortex/pkg/util/httpgrpcutil/carrier.go | 40 - .../cortex/pkg/util/limiter/query_limiter.go | 110 - .../cortex/pkg/util/limiter/rate_limiter.go | 122 - .../cortex/pkg/util/modules/module_service.go | 114 - .../util/modules/module_service_wrapper.go | 31 - .../cortex/pkg/util/modules/modules.go | 237 - .../cortex/pkg/util/net/firewall_dialer.go | 96 - .../cortex/pkg/util/process/collector.go | 132 - .../cortex/pkg/util/push/push.go | 56 - .../cortex/pkg/util/runtimeconfig/manager.go | 210 - .../docker/go-units/CONTRIBUTING.md | 67 - vendor/github.com/docker/go-units/LICENSE | 191 - vendor/github.com/docker/go-units/MAINTAINERS | 46 - vendor/github.com/docker/go-units/README.md | 16 - vendor/github.com/docker/go-units/circle.yml | 11 - vendor/github.com/docker/go-units/duration.go | 35 - vendor/github.com/docker/go-units/size.go | 108 - vendor/github.com/docker/go-units/ulimit.go | 123 - vendor/github.com/felixge/fgprof/LICENSE.txt | 8 - vendor/github.com/felixge/fgprof/README.md | 214 - vendor/github.com/felixge/fgprof/fgprof.go | 97 - vendor/github.com/felixge/fgprof/format.go | 102 - vendor/github.com/felixge/fgprof/handler.go | 32 - vendor/github.com/felixge/fgprof/pprof.go | 56 - vendor/github.com/go-kit/kit/LICENSE | 22 - vendor/github.com/go-kit/kit/log/README.md | 160 - vendor/github.com/go-kit/kit/log/doc.go | 118 - .../github.com/go-kit/kit/log/json_logger.go | 15 - vendor/github.com/go-kit/kit/log/level/doc.go | 25 - .../github.com/go-kit/kit/log/level/level.go | 120 - vendor/github.com/go-kit/kit/log/log.go | 51 - .../go-kit/kit/log/logfmt_logger.go | 15 - .../github.com/go-kit/kit/log/nop_logger.go | 8 - vendor/github.com/go-kit/kit/log/stdlib.go | 54 - vendor/github.com/go-kit/kit/log/sync.go | 37 - vendor/github.com/go-kit/kit/log/value.go | 52 - .../go-openapi/analysis/.codecov.yml | 5 - .../go-openapi/analysis/.gitattributes | 2 - .../github.com/go-openapi/analysis/.gitignore | 5 - .../go-openapi/analysis/.golangci.yml | 53 - .../go-openapi/analysis/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/analysis/LICENSE | 202 - .../github.com/go-openapi/analysis/README.md | 31 - .../go-openapi/analysis/analyzer.go | 1064 -- .../go-openapi/analysis/appveyor.yml | 32 - .../github.com/go-openapi/analysis/debug.go | 23 - vendor/github.com/go-openapi/analysis/doc.go | 43 - .../github.com/go-openapi/analysis/fixer.go | 79 - .../github.com/go-openapi/analysis/flatten.go | 802 -- .../go-openapi/analysis/flatten_name.go | 293 - .../go-openapi/analysis/flatten_options.go | 78 - .../analysis/internal/debug/debug.go | 41 - .../internal/flatten/normalize/normalize.go | 87 - .../internal/flatten/operations/operations.go | 90 - .../internal/flatten/replace/replace.go | 434 - .../flatten/schutils/flatten_schema.go | 29 - .../analysis/internal/flatten/sortref/keys.go | 201 - .../internal/flatten/sortref/sort_ref.go | 141 - .../github.com/go-openapi/analysis/mixin.go | 515 - .../github.com/go-openapi/analysis/schema.go | 256 - .../go-openapi/errors/.gitattributes | 1 - .../github.com/go-openapi/errors/.gitignore | 2 - .../go-openapi/errors/.golangci.yml | 46 - .../go-openapi/errors/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/errors/LICENSE | 202 - vendor/github.com/go-openapi/errors/README.md | 11 - vendor/github.com/go-openapi/errors/api.go | 181 - vendor/github.com/go-openapi/errors/auth.go | 22 - vendor/github.com/go-openapi/errors/doc.go | 28 - .../github.com/go-openapi/errors/headers.go | 103 - .../go-openapi/errors/middleware.go | 51 - .../github.com/go-openapi/errors/parsing.go | 78 - vendor/github.com/go-openapi/errors/schema.go | 611 -- .../go-openapi/jsonpointer/.editorconfig | 26 - .../go-openapi/jsonpointer/.gitignore | 1 - .../go-openapi/jsonpointer/.travis.yml | 15 - .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/jsonpointer/LICENSE | 202 - .../go-openapi/jsonpointer/README.md | 15 - .../go-openapi/jsonpointer/pointer.go | 390 - .../go-openapi/jsonreference/.gitignore | 1 - .../go-openapi/jsonreference/.golangci.yml | 41 - .../go-openapi/jsonreference/.travis.yml | 24 - .../jsonreference/CODE_OF_CONDUCT.md | 74 - .../go-openapi/jsonreference/LICENSE | 202 - .../go-openapi/jsonreference/README.md | 15 - .../go-openapi/jsonreference/reference.go | 156 - .../github.com/go-openapi/loads/.editorconfig | 26 - vendor/github.com/go-openapi/loads/.gitignore | 4 - .../github.com/go-openapi/loads/.golangci.yml | 44 - .../github.com/go-openapi/loads/.travis.yml | 25 - .../go-openapi/loads/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/loads/LICENSE | 202 - vendor/github.com/go-openapi/loads/README.md | 6 - vendor/github.com/go-openapi/loads/doc.go | 21 - vendor/github.com/go-openapi/loads/loaders.go | 134 - vendor/github.com/go-openapi/loads/options.go | 61 - vendor/github.com/go-openapi/loads/spec.go | 266 - .../go-openapi/runtime/.editorconfig | 26 - .../go-openapi/runtime/.gitattributes | 1 - .../github.com/go-openapi/runtime/.gitignore | 5 - .../go-openapi/runtime/.golangci.yml | 44 - .../go-openapi/runtime/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/runtime/LICENSE | 202 - .../github.com/go-openapi/runtime/README.md | 7 - .../go-openapi/runtime/bytestream.go | 169 - .../go-openapi/runtime/client_auth_info.go | 30 - .../go-openapi/runtime/client_operation.go | 41 - .../go-openapi/runtime/client_request.go | 153 - .../go-openapi/runtime/client_response.go | 106 - .../go-openapi/runtime/constants.go | 47 - vendor/github.com/go-openapi/runtime/csv.go | 77 - .../github.com/go-openapi/runtime/discard.go | 9 - vendor/github.com/go-openapi/runtime/file.go | 19 - .../go-openapi/runtime/flagext/byte_size.go | 38 - .../github.com/go-openapi/runtime/headers.go | 45 - .../go-openapi/runtime/interfaces.go | 112 - vendor/github.com/go-openapi/runtime/json.go | 38 - .../go-openapi/runtime/logger/logger.go | 20 - .../go-openapi/runtime/logger/standard.go | 22 - .../go-openapi/runtime/middleware/context.go | 622 -- .../runtime/middleware/denco/LICENSE | 19 - .../runtime/middleware/denco/README.md | 180 - .../runtime/middleware/denco/router.go | 460 - .../runtime/middleware/denco/server.go | 106 - .../runtime/middleware/denco/util.go | 12 - .../go-openapi/runtime/middleware/doc.go | 62 - .../go-openapi/runtime/middleware/go18.go | 9 - .../runtime/middleware/header/header.go | 329 - .../runtime/middleware/negotiate.go | 98 - .../runtime/middleware/not_implemented.go | 67 - .../runtime/middleware/operation.go | 30 - .../runtime/middleware/parameter.go | 481 - .../go-openapi/runtime/middleware/pre_go18.go | 9 - .../go-openapi/runtime/middleware/rapidoc.go | 90 - .../go-openapi/runtime/middleware/redoc.go | 103 - .../go-openapi/runtime/middleware/request.go | 104 - .../go-openapi/runtime/middleware/router.go | 488 - .../go-openapi/runtime/middleware/security.go | 39 - .../go-openapi/runtime/middleware/spec.go | 48 - .../runtime/middleware/swaggerui.go | 162 - .../runtime/middleware/untyped/api.go | 286 - .../runtime/middleware/validation.go | 126 - .../github.com/go-openapi/runtime/request.go | 139 - .../runtime/security/authenticator.go | 276 - .../go-openapi/runtime/security/authorizer.go | 27 - .../github.com/go-openapi/runtime/statuses.go | 90 - vendor/github.com/go-openapi/runtime/text.go | 116 - .../github.com/go-openapi/runtime/values.go | 19 - vendor/github.com/go-openapi/runtime/xml.go | 36 - .../github.com/go-openapi/spec/.editorconfig | 26 - vendor/github.com/go-openapi/spec/.gitignore | 2 - .../github.com/go-openapi/spec/.golangci.yml | 42 - vendor/github.com/go-openapi/spec/.travis.yml | 31 - .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/spec/LICENSE | 202 - vendor/github.com/go-openapi/spec/README.md | 34 - .../github.com/go-openapi/spec/appveyor.yml | 32 - vendor/github.com/go-openapi/spec/bindata.go | 297 - vendor/github.com/go-openapi/spec/cache.go | 98 - .../go-openapi/spec/contact_info.go | 57 - vendor/github.com/go-openapi/spec/debug.go | 49 - vendor/github.com/go-openapi/spec/errors.go | 19 - vendor/github.com/go-openapi/spec/expander.go | 594 -- .../go-openapi/spec/external_docs.go | 24 - vendor/github.com/go-openapi/spec/header.go | 203 - vendor/github.com/go-openapi/spec/info.go | 165 - vendor/github.com/go-openapi/spec/items.go | 234 - vendor/github.com/go-openapi/spec/license.go | 56 - .../github.com/go-openapi/spec/normalizer.go | 203 - .../go-openapi/spec/normalizer_nonwindows.go | 43 - .../go-openapi/spec/normalizer_windows.go | 154 - .../github.com/go-openapi/spec/operation.go | 397 - .../github.com/go-openapi/spec/parameter.go | 326 - .../github.com/go-openapi/spec/path_item.go | 87 - vendor/github.com/go-openapi/spec/paths.go | 97 - .../github.com/go-openapi/spec/properties.go | 91 - vendor/github.com/go-openapi/spec/ref.go | 193 - vendor/github.com/go-openapi/spec/resolver.go | 127 - vendor/github.com/go-openapi/spec/response.go | 152 - .../github.com/go-openapi/spec/responses.go | 127 - vendor/github.com/go-openapi/spec/schema.go | 646 -- .../go-openapi/spec/schema_loader.go | 338 - .../go-openapi/spec/security_scheme.go | 170 - vendor/github.com/go-openapi/spec/spec.go | 78 - vendor/github.com/go-openapi/spec/swagger.go | 448 - vendor/github.com/go-openapi/spec/tag.go | 75 - .../github.com/go-openapi/spec/validations.go | 215 - .../github.com/go-openapi/spec/xml_object.go | 68 - .../go-openapi/strfmt/.editorconfig | 26 - .../go-openapi/strfmt/.gitattributes | 2 - .../github.com/go-openapi/strfmt/.gitignore | 2 - .../go-openapi/strfmt/.golangci.yml | 49 - .../go-openapi/strfmt/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/strfmt/LICENSE | 202 - vendor/github.com/go-openapi/strfmt/README.md | 88 - vendor/github.com/go-openapi/strfmt/bson.go | 165 - vendor/github.com/go-openapi/strfmt/date.go | 187 - .../github.com/go-openapi/strfmt/default.go | 2035 ---- vendor/github.com/go-openapi/strfmt/doc.go | 18 - .../github.com/go-openapi/strfmt/duration.go | 211 - vendor/github.com/go-openapi/strfmt/format.go | 326 - vendor/github.com/go-openapi/strfmt/time.go | 294 - vendor/github.com/go-openapi/strfmt/ulid.go | 225 - .../github.com/go-openapi/swag/.editorconfig | 26 - .../github.com/go-openapi/swag/.gitattributes | 2 - vendor/github.com/go-openapi/swag/.gitignore | 4 - .../github.com/go-openapi/swag/.golangci.yml | 50 - .../go-openapi/swag/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/swag/LICENSE | 202 - vendor/github.com/go-openapi/swag/README.md | 21 - vendor/github.com/go-openapi/swag/convert.go | 208 - .../go-openapi/swag/convert_types.go | 730 -- vendor/github.com/go-openapi/swag/doc.go | 32 - vendor/github.com/go-openapi/swag/file.go | 33 - vendor/github.com/go-openapi/swag/json.go | 312 - vendor/github.com/go-openapi/swag/loading.go | 120 - .../github.com/go-openapi/swag/name_lexem.go | 87 - vendor/github.com/go-openapi/swag/net.go | 38 - vendor/github.com/go-openapi/swag/path.go | 59 - .../github.com/go-openapi/swag/post_go18.go | 24 - .../github.com/go-openapi/swag/post_go19.go | 68 - vendor/github.com/go-openapi/swag/pre_go18.go | 24 - vendor/github.com/go-openapi/swag/pre_go19.go | 70 - vendor/github.com/go-openapi/swag/split.go | 262 - vendor/github.com/go-openapi/swag/util.go | 385 - vendor/github.com/go-openapi/swag/yaml.go | 246 - .../go-openapi/validate/.editorconfig | 26 - .../go-openapi/validate/.gitattributes | 2 - .../github.com/go-openapi/validate/.gitignore | 5 - .../go-openapi/validate/.golangci.yml | 50 - .../go-openapi/validate/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/validate/LICENSE | 202 - .../github.com/go-openapi/validate/README.md | 38 - .../go-openapi/validate/appveyor.yml | 32 - .../github.com/go-openapi/validate/context.go | 56 - .../github.com/go-openapi/validate/debug.go | 47 - .../go-openapi/validate/default_validator.go | 281 - vendor/github.com/go-openapi/validate/doc.go | 85 - .../go-openapi/validate/example_validator.go | 270 - .../github.com/go-openapi/validate/formats.go | 69 - .../github.com/go-openapi/validate/helpers.go | 324 - .../go-openapi/validate/object_validator.go | 279 - .../github.com/go-openapi/validate/options.go | 43 - .../github.com/go-openapi/validate/result.go | 486 - vendor/github.com/go-openapi/validate/rexp.go | 71 - .../github.com/go-openapi/validate/schema.go | 260 - .../go-openapi/validate/schema_messages.go | 78 - .../go-openapi/validate/schema_option.go | 54 - .../go-openapi/validate/schema_props.go | 240 - .../go-openapi/validate/slice_validator.go | 105 - vendor/github.com/go-openapi/validate/spec.go | 795 -- .../go-openapi/validate/spec_messages.go | 360 - vendor/github.com/go-openapi/validate/type.go | 177 - .../go-openapi/validate/update-fixtures.sh | 15 - .../go-openapi/validate/validator.go | 645 -- .../github.com/go-openapi/validate/values.go | 446 - vendor/github.com/go-stack/stack/LICENSE.md | 21 - vendor/github.com/go-stack/stack/README.md | 38 - vendor/github.com/go-stack/stack/stack.go | 400 - vendor/github.com/gocql/gocql/.gitignore | 5 - vendor/github.com/gocql/gocql/.travis.yml | 49 - vendor/github.com/gocql/gocql/AUTHORS | 117 - vendor/github.com/gocql/gocql/CONTRIBUTING.md | 78 - vendor/github.com/gocql/gocql/LICENSE | 27 - vendor/github.com/gocql/gocql/README.md | 231 - .../gocql/gocql/address_translators.go | 26 - vendor/github.com/gocql/gocql/cluster.go | 233 - vendor/github.com/gocql/gocql/compressor.go | 28 - vendor/github.com/gocql/gocql/conn.go | 1474 --- .../github.com/gocql/gocql/connectionpool.go | 661 -- vendor/github.com/gocql/gocql/control.go | 497 - vendor/github.com/gocql/gocql/cqltypes.go | 11 - vendor/github.com/gocql/gocql/debug_off.go | 5 - vendor/github.com/gocql/gocql/debug_on.go | 5 - vendor/github.com/gocql/gocql/doc.go | 9 - vendor/github.com/gocql/gocql/errors.go | 125 - vendor/github.com/gocql/gocql/events.go | 285 - vendor/github.com/gocql/gocql/filters.go | 81 - vendor/github.com/gocql/gocql/frame.go | 2106 ---- vendor/github.com/gocql/gocql/fuzz.go | 33 - vendor/github.com/gocql/gocql/helpers.go | 437 - vendor/github.com/gocql/gocql/host_source.go | 716 -- .../github.com/gocql/gocql/host_source_gen.go | 45 - .../gocql/gocql/install_test_deps.sh | 16 - vendor/github.com/gocql/gocql/integration.sh | 95 - .../gocql/gocql/internal/lru/lru.go | 127 - .../gocql/gocql/internal/murmur/murmur.go | 135 - .../gocql/internal/murmur/murmur_appengine.go | 11 - .../gocql/internal/murmur/murmur_unsafe.go | 16 - .../gocql/gocql/internal/streams/streams.go | 140 - vendor/github.com/gocql/gocql/marshal.go | 2452 ----- vendor/github.com/gocql/gocql/metadata.go | 1464 --- vendor/github.com/gocql/gocql/policies.go | 967 -- .../github.com/gocql/gocql/prepared_cache.go | 89 - .../github.com/gocql/gocql/query_executor.go | 161 - vendor/github.com/gocql/gocql/ring.go | 152 - vendor/github.com/gocql/gocql/session.go | 2075 ---- vendor/github.com/gocql/gocql/token.go | 222 - vendor/github.com/gocql/gocql/topology.go | 277 - vendor/github.com/gocql/gocql/uuid.go | 315 - vendor/github.com/gofrs/uuid/.gitignore | 15 - vendor/github.com/gofrs/uuid/LICENSE | 20 - vendor/github.com/gofrs/uuid/README.md | 109 - vendor/github.com/gofrs/uuid/codec.go | 212 - vendor/github.com/gofrs/uuid/fuzz.go | 47 - vendor/github.com/gofrs/uuid/generator.go | 573 - vendor/github.com/gofrs/uuid/sql.go | 109 - vendor/github.com/gofrs/uuid/uuid.go | 292 - .../golang-migrate/migrate/v4/.dockerignore | 13 - .../golang-migrate/migrate/v4/.gitignore | 8 - .../golang-migrate/migrate/v4/.golangci.yml | 27 - .../golang-migrate/migrate/v4/.travis.yml | 135 - .../golang-migrate/migrate/v4/CONTRIBUTING.md | 24 - .../golang-migrate/migrate/v4/Dockerfile | 23 - .../golang-migrate/migrate/v4/FAQ.md | 76 - .../migrate/v4/GETTING_STARTED.md | 43 - .../golang-migrate/migrate/v4/LICENSE | 28 - .../golang-migrate/migrate/v4/MIGRATIONS.md | 86 - .../golang-migrate/migrate/v4/Makefile | 105 - .../golang-migrate/migrate/v4/README.md | 181 - .../migrate/v4/database/driver.go | 122 - .../migrate/v4/database/error.go | 27 - .../migrate/v4/database/postgres/README.md | 28 - .../migrate/v4/database/postgres/TUTORIAL.md | 148 - .../migrate/v4/database/postgres/postgres.go | 366 - .../migrate/v4/database/util.go | 19 - .../migrate/v4/docker-deploy.sh | 5 - .../migrate/v4/internal/url/url.go | 25 - .../golang-migrate/migrate/v4/log.go | 12 - .../golang-migrate/migrate/v4/migrate.go | 980 -- .../golang-migrate/migrate/v4/migration.go | 160 - .../migrate/v4/source/driver.go | 118 - .../migrate/v4/source/file/README.md | 4 - .../migrate/v4/source/file/file.go | 127 - .../migrate/v4/source/migration.go | 143 - .../golang-migrate/migrate/v4/source/parse.go | 39 - .../golang-migrate/migrate/v4/util.go | 62 - vendor/github.com/google/pprof/AUTHORS | 7 - vendor/github.com/google/pprof/CONTRIBUTORS | 16 - vendor/github.com/google/pprof/LICENSE | 202 - .../github.com/google/pprof/profile/encode.go | 576 - .../github.com/google/pprof/profile/filter.go | 270 - .../github.com/google/pprof/profile/index.go | 64 - .../pprof/profile/legacy_java_profile.go | 315 - .../google/pprof/profile/legacy_profile.go | 1225 --- .../github.com/google/pprof/profile/merge.go | 482 - .../google/pprof/profile/profile.go | 814 -- .../github.com/google/pprof/profile/proto.go | 370 - .../github.com/google/pprof/profile/prune.go | 178 - .../hailocab/go-hostpool/.gitignore | 22 - .../hailocab/go-hostpool/.travis.yml | 0 .../github.com/hailocab/go-hostpool/LICENSE | 21 - .../github.com/hailocab/go-hostpool/README.md | 17 - .../hailocab/go-hostpool/epsilon_greedy.go | 220 - .../go-hostpool/epsilon_value_calculators.go | 40 - .../hailocab/go-hostpool/host_entry.go | 62 - .../hailocab/go-hostpool/hostpool.go | 243 - .../hashicorp/golang-lru/.gitignore | 23 - vendor/github.com/hashicorp/golang-lru/2q.go | 223 - .../github.com/hashicorp/golang-lru/README.md | 25 - vendor/github.com/hashicorp/golang-lru/arc.go | 257 - vendor/github.com/hashicorp/golang-lru/doc.go | 21 - vendor/github.com/hashicorp/golang-lru/lru.go | 150 - .../github.com/jessevdk/go-flags/.travis.yml | 39 - vendor/github.com/jessevdk/go-flags/LICENSE | 26 - vendor/github.com/jessevdk/go-flags/README.md | 139 - vendor/github.com/jessevdk/go-flags/arg.go | 27 - .../jessevdk/go-flags/check_crosscompile.sh | 20 - .../github.com/jessevdk/go-flags/closest.go | 59 - .../github.com/jessevdk/go-flags/command.go | 465 - .../jessevdk/go-flags/completion.go | 315 - .../github.com/jessevdk/go-flags/convert.go | 357 - vendor/github.com/jessevdk/go-flags/error.go | 138 - vendor/github.com/jessevdk/go-flags/flags.go | 263 - vendor/github.com/jessevdk/go-flags/group.go | 429 - vendor/github.com/jessevdk/go-flags/help.go | 514 - vendor/github.com/jessevdk/go-flags/ini.go | 615 -- vendor/github.com/jessevdk/go-flags/man.go | 223 - .../github.com/jessevdk/go-flags/multitag.go | 140 - vendor/github.com/jessevdk/go-flags/option.go | 569 - .../jessevdk/go-flags/optstyle_other.go | 67 - .../jessevdk/go-flags/optstyle_windows.go | 108 - vendor/github.com/jessevdk/go-flags/parser.go | 714 -- .../github.com/jessevdk/go-flags/termsize.go | 15 - .../jessevdk/go-flags/termsize_nosysioctl.go | 7 - .../jessevdk/go-flags/termsize_windows.go | 85 - vendor/github.com/josharian/intern/README.md | 5 - vendor/github.com/josharian/intern/intern.go | 44 - vendor/github.com/josharian/intern/license.md | 21 - .../julienschmidt/httprouter/.travis.yml | 18 - .../julienschmidt/httprouter/LICENSE | 29 - .../julienschmidt/httprouter/README.md | 300 - .../julienschmidt/httprouter/path.go | 123 - .../julienschmidt/httprouter/router.go | 452 - .../julienschmidt/httprouter/tree.go | 666 -- vendor/github.com/lann/builder/.gitignore | 2 - vendor/github.com/lann/builder/.travis.yml | 7 - vendor/github.com/lann/builder/LICENSE | 21 - vendor/github.com/lann/builder/README.md | 68 - vendor/github.com/lann/builder/builder.go | 225 - vendor/github.com/lann/builder/reflect.go | 24 - vendor/github.com/lann/builder/registry.go | 59 - vendor/github.com/lann/ps/LICENSE | 7 - vendor/github.com/lann/ps/README.md | 10 - vendor/github.com/lann/ps/list.go | 93 - vendor/github.com/lann/ps/map.go | 311 - vendor/github.com/lann/ps/profile.sh | 3 - vendor/github.com/lib/pq/.gitignore | 4 - vendor/github.com/lib/pq/.travis.sh | 73 - vendor/github.com/lib/pq/.travis.yml | 44 - vendor/github.com/lib/pq/CONTRIBUTING.md | 29 - vendor/github.com/lib/pq/LICENSE.md | 8 - vendor/github.com/lib/pq/README.md | 95 - vendor/github.com/lib/pq/TESTS.md | 33 - vendor/github.com/lib/pq/array.go | 756 -- vendor/github.com/lib/pq/buf.go | 91 - vendor/github.com/lib/pq/conn.go | 1923 ---- vendor/github.com/lib/pq/conn_go18.go | 149 - vendor/github.com/lib/pq/connector.go | 110 - vendor/github.com/lib/pq/copy.go | 282 - vendor/github.com/lib/pq/doc.go | 245 - vendor/github.com/lib/pq/encode.go | 602 -- vendor/github.com/lib/pq/error.go | 515 - vendor/github.com/lib/pq/notify.go | 797 -- vendor/github.com/lib/pq/oid/doc.go | 6 - vendor/github.com/lib/pq/oid/types.go | 343 - vendor/github.com/lib/pq/rows.go | 93 - vendor/github.com/lib/pq/scram/scram.go | 264 - vendor/github.com/lib/pq/ssl.go | 175 - vendor/github.com/lib/pq/ssl_permissions.go | 20 - vendor/github.com/lib/pq/ssl_windows.go | 9 - vendor/github.com/lib/pq/url.go | 76 - vendor/github.com/lib/pq/user_posix.go | 24 - vendor/github.com/lib/pq/user_windows.go | 27 - vendor/github.com/lib/pq/uuid.go | 23 - vendor/github.com/mailru/easyjson/LICENSE | 7 - .../github.com/mailru/easyjson/buffer/pool.go | 278 - .../mailru/easyjson/jlexer/bytestostr.go | 24 - .../easyjson/jlexer/bytestostr_nounsafe.go | 13 - .../mailru/easyjson/jlexer/error.go | 15 - .../mailru/easyjson/jlexer/lexer.go | 1244 --- .../mailru/easyjson/jwriter/writer.go | 405 - vendor/github.com/oklog/run/.gitignore | 14 - vendor/github.com/oklog/run/LICENSE | 201 - vendor/github.com/oklog/run/README.md | 75 - vendor/github.com/oklog/run/actors.go | 38 - vendor/github.com/oklog/run/group.go | 62 - .../prometheus/alertmanager/api/api.go | 230 - .../alertmanager/api/metrics/metrics.go | 54 - .../prometheus/alertmanager/api/v1/api.go | 808 -- .../prometheus/alertmanager/api/v2/api.go | 674 -- .../prometheus/alertmanager/api/v2/compat.go | 197 - .../alertmanager/api/v2/models/alert.go | 102 - .../alertmanager/api/v2/models/alert_group.go | 142 - .../api/v2/models/alert_groups.go | 59 - .../api/v2/models/alert_status.go | 152 - .../api/v2/models/alertmanager_config.go | 78 - .../api/v2/models/alertmanager_status.go | 161 - .../api/v2/models/cluster_status.go | 154 - .../api/v2/models/gettable_alert.go | 326 - .../api/v2/models/gettable_alerts.go | 59 - .../api/v2/models/gettable_silence.go | 195 - .../api/v2/models/gettable_silences.go | 59 - .../alertmanager/api/v2/models/label_set.go | 34 - .../alertmanager/api/v2/models/matcher.go | 115 - .../alertmanager/api/v2/models/matchers.go | 66 - .../alertmanager/api/v2/models/peer_status.go | 95 - .../api/v2/models/postable_alert.go | 195 - .../api/v2/models/postable_alerts.go | 59 - .../api/v2/models/postable_silence.go | 116 - .../alertmanager/api/v2/models/receiver.go | 78 - .../alertmanager/api/v2/models/silence.go | 163 - .../api/v2/models/silence_status.go | 118 - .../api/v2/models/version_info.go | 163 - .../alertmanager/api/v2/openapi.yaml | 525 - .../api/v2/restapi/configure_alertmanager.go | 130 - .../alertmanager/api/v2/restapi/doc.go | 33 - .../api/v2/restapi/embedded_spec.go | 1632 --- .../v2/restapi/operations/alert/get_alerts.go | 72 - .../operations/alert/get_alerts_parameters.go | 277 - .../operations/alert/get_alerts_responses.go | 159 - .../operations/alert/get_alerts_urlbuilder.go | 170 - .../restapi/operations/alert/post_alerts.go | 72 - .../alert/post_alerts_parameters.go | 91 - .../operations/alert/post_alerts_responses.go | 134 - .../alert/post_alerts_urlbuilder.go | 101 - .../operations/alertgroup/get_alert_groups.go | 72 - .../alertgroup/get_alert_groups_parameters.go | 241 - .../alertgroup/get_alert_groups_responses.go | 159 - .../alertgroup/get_alert_groups_urlbuilder.go | 161 - .../v2/restapi/operations/alertmanager_api.go | 399 - .../restapi/operations/general/get_status.go | 72 - .../general/get_status_parameters.go | 59 - .../general/get_status_responses.go | 72 - .../general/get_status_urlbuilder.go | 101 - .../operations/receiver/get_receivers.go | 72 - .../receiver/get_receivers_parameters.go | 59 - .../receiver/get_receivers_responses.go | 75 - .../receiver/get_receivers_urlbuilder.go | 101 - .../operations/silence/delete_silence.go | 72 - .../silence/delete_silence_parameters.go | 105 - .../silence/delete_silence_responses.go | 92 - .../silence/delete_silence_urlbuilder.go | 115 - .../restapi/operations/silence/get_silence.go | 72 - .../silence/get_silence_parameters.go | 105 - .../silence/get_silence_responses.go | 138 - .../silence/get_silence_urlbuilder.go | 115 - .../operations/silence/get_silences.go | 72 - .../silence/get_silences_parameters.go | 98 - .../silence/get_silences_responses.go | 117 - .../silence/get_silences_urlbuilder.go | 125 - .../operations/silence/post_silences.go | 106 - .../silence/post_silences_parameters.go | 91 - .../silence/post_silences_responses.go | 154 - .../silence/post_silences_urlbuilder.go | 101 - .../alertmanager/api/v2/restapi/server.go | 525 - .../alertmanager/cluster/advertise.go | 86 - .../alertmanager/cluster/channel.go | 149 - .../alertmanager/cluster/cluster.go | 860 -- .../alertmanager/cluster/connection_pool.go | 84 - .../alertmanager/cluster/delegate.go | 283 - .../alertmanager/cluster/tls_config.go | 45 - .../alertmanager/cluster/tls_connection.go | 188 - .../alertmanager/cluster/tls_transport.go | 346 - .../alertmanager/dispatch/dispatch.go | 541 - .../prometheus/alertmanager/dispatch/route.go | 249 - .../alertmanager/inhibit/inhibit.go | 251 - .../prometheus/alertmanager/nflog/nflog.go | 597 -- .../alertmanager/notify/email/email.go | 355 - .../prometheus/alertmanager/notify/notify.go | 878 -- .../alertmanager/notify/opsgenie/opsgenie.go | 296 - .../notify/pagerduty/pagerduty.go | 325 - .../alertmanager/notify/pushover/pushover.go | 134 - .../alertmanager/notify/slack/slack.go | 213 - .../prometheus/alertmanager/notify/sns/sns.go | 210 - .../prometheus/alertmanager/notify/util.go | 212 - .../notify/victorops/victorops.go | 153 - .../alertmanager/notify/webhook/webhook.go | 110 - .../alertmanager/notify/wechat/wechat.go | 197 - .../alertmanager/provider/mem/mem.go | 219 - .../alertmanager/provider/provider.go | 88 - .../alertmanager/silence/silence.go | 971 -- .../prometheus/alertmanager/store/store.go | 137 - .../prometheus/alertmanager/ui/Dockerfile | 13 - .../prometheus/alertmanager/ui/web.go | 92 - .../prometheus/common/route/route.go | 138 - .../prometheus/exporter-toolkit/LICENSE | 201 - .../prometheus/exporter-toolkit/web/README.md | 10 - .../prometheus/exporter-toolkit/web/cache.go | 91 - .../exporter-toolkit/web/handler.go | 137 - .../exporter-toolkit/web/tls_config.go | 361 - .../exporter-toolkit/web/web-config.yml | 6 - .../prometheus/discovery/dns/dns.go | 360 - .../prometheus/discovery/refresh/refresh.go | 122 - .../prometheus/notifier/notifier.go | 766 -- .../prometheus/prometheus/rules/alerting.go | 584 - .../prometheus/prometheus/rules/manager.go | 1165 -- .../prometheus/prometheus/rules/recording.go | 203 - .../prometheus/util/httputil/compression.go | 92 - .../prometheus/util/httputil/context.go | 51 - .../prometheus/util/httputil/cors.go | 48 - .../prometheus/prometheus/web/api/v1/api.go | 1722 --- vendor/github.com/rs/cors/LICENSE | 19 - vendor/github.com/rs/cors/README.md | 116 - vendor/github.com/rs/cors/cors.go | 446 - vendor/github.com/rs/cors/utils.go | 71 - vendor/github.com/segmentio/fasthash/LICENSE | 21 - .../segmentio/fasthash/fnv1a/hash.go | 71 - .../segmentio/fasthash/fnv1a/hash32.go | 53 - vendor/github.com/spf13/afero/.gitignore | 2 - vendor/github.com/spf13/afero/.travis.yml | 26 - vendor/github.com/spf13/afero/LICENSE.txt | 174 - vendor/github.com/spf13/afero/README.md | 430 - vendor/github.com/spf13/afero/afero.go | 111 - vendor/github.com/spf13/afero/appveyor.yml | 15 - vendor/github.com/spf13/afero/basepath.go | 211 - .../github.com/spf13/afero/cacheOnReadFs.go | 311 - vendor/github.com/spf13/afero/const_bsds.go | 22 - .../github.com/spf13/afero/const_win_unix.go | 26 - .../github.com/spf13/afero/copyOnWriteFs.go | 326 - vendor/github.com/spf13/afero/httpFs.go | 114 - vendor/github.com/spf13/afero/iofs.go | 288 - vendor/github.com/spf13/afero/ioutil.go | 240 - vendor/github.com/spf13/afero/lstater.go | 27 - vendor/github.com/spf13/afero/match.go | 110 - vendor/github.com/spf13/afero/mem/dir.go | 37 - vendor/github.com/spf13/afero/mem/dirmap.go | 43 - vendor/github.com/spf13/afero/mem/file.go | 338 - vendor/github.com/spf13/afero/memmap.go | 404 - vendor/github.com/spf13/afero/os.go | 113 - vendor/github.com/spf13/afero/path.go | 106 - vendor/github.com/spf13/afero/readonlyfs.go | 96 - vendor/github.com/spf13/afero/regexpfs.go | 224 - vendor/github.com/spf13/afero/symlink.go | 55 - vendor/github.com/spf13/afero/unionFile.go | 317 - vendor/github.com/spf13/afero/util.go | 330 - .../thanos/pkg/compact/blocks_cleaner.go | 60 - .../thanos-io/thanos/pkg/compact/clean.go | 62 - .../thanos-io/thanos/pkg/compact/compact.go | 1439 --- .../thanos-io/thanos/pkg/compact/planner.go | 305 - .../thanos-io/thanos/pkg/compact/retention.go | 49 - .../thanos-io/thanos/pkg/shipper/shipper.go | 512 - vendor/go.etcd.io/bbolt/.gitignore | 7 - vendor/go.etcd.io/bbolt/.travis.yml | 18 - vendor/go.etcd.io/bbolt/LICENSE | 20 - vendor/go.etcd.io/bbolt/Makefile | 36 - vendor/go.etcd.io/bbolt/README.md | 958 -- vendor/go.etcd.io/bbolt/bolt_386.go | 7 - vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm64.go | 9 - vendor/go.etcd.io/bbolt/bolt_linux.go | 10 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 9 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 9 - vendor/go.etcd.io/bbolt/bolt_openbsd.go | 27 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 9 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 9 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 9 - vendor/go.etcd.io/bbolt/bolt_unix.go | 86 - vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 90 - vendor/go.etcd.io/bbolt/bolt_unix_solaris.go | 88 - vendor/go.etcd.io/bbolt/bolt_windows.go | 141 - vendor/go.etcd.io/bbolt/boltsync_unix.go | 8 - vendor/go.etcd.io/bbolt/bucket.go | 777 -- vendor/go.etcd.io/bbolt/compact.go | 114 - vendor/go.etcd.io/bbolt/cursor.go | 396 - vendor/go.etcd.io/bbolt/db.go | 1232 --- vendor/go.etcd.io/bbolt/doc.go | 44 - vendor/go.etcd.io/bbolt/errors.go | 71 - vendor/go.etcd.io/bbolt/freelist.go | 404 - vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 - vendor/go.etcd.io/bbolt/mlock_unix.go | 36 - vendor/go.etcd.io/bbolt/mlock_windows.go | 11 - vendor/go.etcd.io/bbolt/node.go | 602 -- vendor/go.etcd.io/bbolt/page.go | 204 - vendor/go.etcd.io/bbolt/tx.go | 723 -- vendor/go.etcd.io/bbolt/unsafe.go | 39 - vendor/go.mongodb.org/mongo-driver/LICENSE | 201 - .../go.mongodb.org/mongo-driver/bson/bson.go | 50 - .../bson/bsoncodec/array_codec.go | 50 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 216 - .../bson/bsoncodec/byte_slice_codec.go | 111 - .../bson/bsoncodec/cond_addr_codec.go | 63 - .../bson/bsoncodec/default_value_decoders.go | 1717 --- .../bson/bsoncodec/default_value_encoders.go | 766 -- .../mongo-driver/bson/bsoncodec/doc.go | 84 - .../bson/bsoncodec/empty_interface_codec.go | 140 - .../mongo-driver/bson/bsoncodec/map_codec.go | 288 - .../mongo-driver/bson/bsoncodec/mode.go | 65 - .../bson/bsoncodec/pointer_codec.go | 109 - .../mongo-driver/bson/bsoncodec/proxy.go | 14 - .../mongo-driver/bson/bsoncodec/registry.go | 468 - .../bson/bsoncodec/slice_codec.go | 199 - .../bson/bsoncodec/string_codec.go | 119 - .../bson/bsoncodec/struct_codec.go | 664 -- .../bson/bsoncodec/struct_tag_parser.go | 139 - .../mongo-driver/bson/bsoncodec/time_codec.go | 127 - .../mongo-driver/bson/bsoncodec/types.go | 57 - .../mongo-driver/bson/bsoncodec/uint_codec.go | 173 - .../bsonoptions/byte_slice_codec_options.go | 38 - .../empty_interface_codec_options.go | 38 - .../bson/bsonoptions/map_codec_options.go | 67 - .../bson/bsonoptions/slice_codec_options.go | 38 - .../bson/bsonoptions/string_codec_options.go | 41 - .../bson/bsonoptions/struct_codec_options.go | 87 - .../bson/bsonoptions/time_codec_options.go | 38 - .../bson/bsonoptions/uint_codec_options.go | 38 - .../mongo-driver/bson/bsonrw/copier.go | 445 - .../mongo-driver/bson/bsonrw/doc.go | 9 - .../bson/bsonrw/extjson_parser.go | 806 -- .../bson/bsonrw/extjson_reader.go | 644 -- .../bson/bsonrw/extjson_tables.go | 223 - .../bson/bsonrw/extjson_wrappers.go | 492 - .../bson/bsonrw/extjson_writer.go | 732 -- .../mongo-driver/bson/bsonrw/json_scanner.go | 528 - .../mongo-driver/bson/bsonrw/mode.go | 108 - .../mongo-driver/bson/bsonrw/reader.go | 63 - .../mongo-driver/bson/bsonrw/value_reader.go | 867 -- .../mongo-driver/bson/bsonrw/value_writer.go | 606 -- .../mongo-driver/bson/bsonrw/writer.go | 78 - .../mongo-driver/bson/bsontype/bsontype.go | 97 - .../mongo-driver/bson/decoder.go | 118 - .../go.mongodb.org/mongo-driver/bson/doc.go | 138 - .../mongo-driver/bson/encoder.go | 99 - .../mongo-driver/bson/marshal.go | 245 - .../mongo-driver/bson/primitive/decimal.go | 424 - .../mongo-driver/bson/primitive/objectid.go | 204 - .../mongo-driver/bson/primitive/primitive.go | 217 - .../mongo-driver/bson/primitive_codecs.go | 92 - .../go.mongodb.org/mongo-driver/bson/raw.go | 85 - .../mongo-driver/bson/raw_element.go | 51 - .../mongo-driver/bson/raw_value.go | 309 - .../mongo-driver/bson/registry.go | 24 - .../go.mongodb.org/mongo-driver/bson/types.go | 36 - .../mongo-driver/bson/unmarshal.go | 101 - .../mongo-driver/x/bsonx/bsoncore/array.go | 164 - .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 - .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 - .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 862 -- .../mongo-driver/x/bsonx/bsoncore/document.go | 410 - .../x/bsonx/bsoncore/document_sequence.go | 183 - .../mongo-driver/x/bsonx/bsoncore/element.go | 152 - .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 - .../mongo-driver/x/bsonx/bsoncore/value.go | 980 -- vendor/golang.org/x/crypto/bcrypt/base64.go | 35 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 - vendor/golang.org/x/crypto/blowfish/block.go | 159 - vendor/golang.org/x/crypto/blowfish/cipher.go | 99 - vendor/golang.org/x/crypto/blowfish/const.go | 199 - .../golang.org/x/sync/semaphore/semaphore.go | 136 - vendor/golang.org/x/text/width/kind_string.go | 28 - .../golang.org/x/text/width/tables10.0.0.go | 1319 --- .../golang.org/x/text/width/tables11.0.0.go | 1331 --- .../golang.org/x/text/width/tables12.0.0.go | 1351 --- .../golang.org/x/text/width/tables13.0.0.go | 1352 --- vendor/golang.org/x/text/width/tables9.0.0.go | 1287 --- vendor/golang.org/x/text/width/transform.go | 239 - vendor/golang.org/x/text/width/trieval.go | 30 - vendor/golang.org/x/text/width/width.go | 206 - vendor/gopkg.in/inf.v0/LICENSE | 28 - vendor/gopkg.in/inf.v0/dec.go | 615 -- vendor/gopkg.in/inf.v0/rounder.go | 145 - vendor/modules.txt | 267 - 1033 files changed, 243085 deletions(-) delete mode 100644 vendor/github.com/Masterminds/squirrel/.gitignore delete mode 100644 vendor/github.com/Masterminds/squirrel/.travis.yml delete mode 100644 vendor/github.com/Masterminds/squirrel/LICENSE.txt delete mode 100644 vendor/github.com/Masterminds/squirrel/README.md delete mode 100644 vendor/github.com/Masterminds/squirrel/case.go delete mode 100644 vendor/github.com/Masterminds/squirrel/delete.go delete mode 100644 vendor/github.com/Masterminds/squirrel/expr.go delete mode 100644 vendor/github.com/Masterminds/squirrel/insert.go delete mode 100644 vendor/github.com/Masterminds/squirrel/part.go delete mode 100644 vendor/github.com/Masterminds/squirrel/placeholder.go delete mode 100644 vendor/github.com/Masterminds/squirrel/row.go delete mode 100644 vendor/github.com/Masterminds/squirrel/select.go delete mode 100644 vendor/github.com/Masterminds/squirrel/squirrel.go delete mode 100644 vendor/github.com/Masterminds/squirrel/statement.go delete mode 100644 vendor/github.com/Masterminds/squirrel/stmtcacher.go delete mode 100644 vendor/github.com/Masterminds/squirrel/update.go delete mode 100644 vendor/github.com/Masterminds/squirrel/where.go delete mode 100644 vendor/github.com/NYTimes/gziphandler/.gitignore delete mode 100644 vendor/github.com/NYTimes/gziphandler/.travis.yml delete mode 100644 vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md delete mode 100644 vendor/github.com/NYTimes/gziphandler/LICENSE delete mode 100644 vendor/github.com/NYTimes/gziphandler/README.md delete mode 100644 vendor/github.com/NYTimes/gziphandler/gzip.go delete mode 100644 vendor/github.com/NYTimes/gziphandler/gzip_go18.go delete mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore delete mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/purell/README.md delete mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go delete mode 100644 vendor/github.com/asaskevich/govalidator/.gitignore delete mode 100644 vendor/github.com/asaskevich/govalidator/.travis.yml delete mode 100644 vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md delete mode 100644 vendor/github.com/asaskevich/govalidator/LICENSE delete mode 100644 vendor/github.com/asaskevich/govalidator/README.md delete mode 100644 vendor/github.com/asaskevich/govalidator/arrays.go delete mode 100644 vendor/github.com/asaskevich/govalidator/converter.go delete mode 100644 vendor/github.com/asaskevich/govalidator/doc.go delete mode 100644 vendor/github.com/asaskevich/govalidator/error.go delete mode 100644 vendor/github.com/asaskevich/govalidator/numerics.go delete mode 100644 vendor/github.com/asaskevich/govalidator/patterns.go delete mode 100644 vendor/github.com/asaskevich/govalidator/types.go delete mode 100644 vendor/github.com/asaskevich/govalidator/utils.go delete mode 100644 vendor/github.com/asaskevich/govalidator/validator.go delete mode 100644 vendor/github.com/asaskevich/govalidator/wercker.yml delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sns/service.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/.gitignore delete mode 100644 vendor/github.com/cenkalti/backoff/v4/.travis.yml delete mode 100644 vendor/github.com/cenkalti/backoff/v4/LICENSE delete mode 100644 vendor/github.com/cenkalti/backoff/v4/README.md delete mode 100644 vendor/github.com/cenkalti/backoff/v4/backoff.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/ticker.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/timer.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient/bucket_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/config.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb/store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local/store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/objectclient/store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/merger.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_alerts.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silence_id.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silences.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alert_groups.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alerts.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silence_id.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silences.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/rate_limited_notifier.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_persister.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/api/api.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/api/handlers.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/authenticator.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/instrumentation.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/index_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/storage_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/table_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/table_provisioning.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tenant_deletion_api.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/storage/metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/label_remover_filter.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_grouper.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_planner.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/config.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/db/db.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/db/memory/memory.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/db/timed.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/ast.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/engine.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/functions.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/fuzz.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/lex.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/parse.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/printer.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/quantile.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/value.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortex/status.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ingester_client_pool.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/query.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/config.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/active_series.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/instance_limits.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/label_pairs.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/locker.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/series.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/wal.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ingester/wal.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/batch/batch.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/batch/chunk.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/batch/merge.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/batch/non_overlapping.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/batch/stream.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/block.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_scan.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/chunkstore/chunkstore.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/dummy.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/error_translate_queryable.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_iterator.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_merge_iterator.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/matrix.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/metadata_handler.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/querier.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/instrumentation.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryable.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/split_by_interval.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/step_align.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/test_utils.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/series_with_chunks.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/testutils.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/timeseries_series_set.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/api.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/client_pool.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/compat.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/custom.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/user_queues.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/chunk_bytes_pool.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/partitioner.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/chunkcompat/compat.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/carrier.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/limiter/query_limiter.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service_wrapper.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/net/firewall_dialer.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/push/push.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go delete mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/go-units/LICENSE delete mode 100644 vendor/github.com/docker/go-units/MAINTAINERS delete mode 100644 vendor/github.com/docker/go-units/README.md delete mode 100644 vendor/github.com/docker/go-units/circle.yml delete mode 100644 vendor/github.com/docker/go-units/duration.go delete mode 100644 vendor/github.com/docker/go-units/size.go delete mode 100644 vendor/github.com/docker/go-units/ulimit.go delete mode 100644 vendor/github.com/felixge/fgprof/LICENSE.txt delete mode 100644 vendor/github.com/felixge/fgprof/README.md delete mode 100644 vendor/github.com/felixge/fgprof/fgprof.go delete mode 100644 vendor/github.com/felixge/fgprof/format.go delete mode 100644 vendor/github.com/felixge/fgprof/handler.go delete mode 100644 vendor/github.com/felixge/fgprof/pprof.go delete mode 100644 vendor/github.com/go-kit/kit/LICENSE delete mode 100644 vendor/github.com/go-kit/kit/log/README.md delete mode 100644 vendor/github.com/go-kit/kit/log/doc.go delete mode 100644 vendor/github.com/go-kit/kit/log/json_logger.go delete mode 100644 vendor/github.com/go-kit/kit/log/level/doc.go delete mode 100644 vendor/github.com/go-kit/kit/log/level/level.go delete mode 100644 vendor/github.com/go-kit/kit/log/log.go delete mode 100644 vendor/github.com/go-kit/kit/log/logfmt_logger.go delete mode 100644 vendor/github.com/go-kit/kit/log/nop_logger.go delete mode 100644 vendor/github.com/go-kit/kit/log/stdlib.go delete mode 100644 vendor/github.com/go-kit/kit/log/sync.go delete mode 100644 vendor/github.com/go-kit/kit/log/value.go delete mode 100644 vendor/github.com/go-openapi/analysis/.codecov.yml delete mode 100644 vendor/github.com/go-openapi/analysis/.gitattributes delete mode 100644 vendor/github.com/go-openapi/analysis/.gitignore delete mode 100644 vendor/github.com/go-openapi/analysis/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/analysis/LICENSE delete mode 100644 vendor/github.com/go-openapi/analysis/README.md delete mode 100644 vendor/github.com/go-openapi/analysis/analyzer.go delete mode 100644 vendor/github.com/go-openapi/analysis/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/analysis/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/doc.go delete mode 100644 vendor/github.com/go-openapi/analysis/fixer.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_name.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_options.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/debug/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go delete mode 100644 vendor/github.com/go-openapi/analysis/mixin.go delete mode 100644 vendor/github.com/go-openapi/analysis/schema.go delete mode 100644 vendor/github.com/go-openapi/errors/.gitattributes delete mode 100644 vendor/github.com/go-openapi/errors/.gitignore delete mode 100644 vendor/github.com/go-openapi/errors/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/errors/LICENSE delete mode 100644 vendor/github.com/go-openapi/errors/README.md delete mode 100644 vendor/github.com/go-openapi/errors/api.go delete mode 100644 vendor/github.com/go-openapi/errors/auth.go delete mode 100644 vendor/github.com/go-openapi/errors/doc.go delete mode 100644 vendor/github.com/go-openapi/errors/headers.go delete mode 100644 vendor/github.com/go-openapi/errors/middleware.go delete mode 100644 vendor/github.com/go-openapi/errors/parsing.go delete mode 100644 vendor/github.com/go-openapi/errors/schema.go delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml delete mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE delete mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md delete mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go delete mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore delete mode 100644 vendor/github.com/go-openapi/jsonreference/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml delete mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE delete mode 100644 vendor/github.com/go-openapi/jsonreference/README.md delete mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go delete mode 100644 vendor/github.com/go-openapi/loads/.editorconfig delete mode 100644 vendor/github.com/go-openapi/loads/.gitignore delete mode 100644 vendor/github.com/go-openapi/loads/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/loads/.travis.yml delete mode 100644 vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/loads/LICENSE delete mode 100644 vendor/github.com/go-openapi/loads/README.md delete mode 100644 vendor/github.com/go-openapi/loads/doc.go delete mode 100644 vendor/github.com/go-openapi/loads/loaders.go delete mode 100644 vendor/github.com/go-openapi/loads/options.go delete mode 100644 vendor/github.com/go-openapi/loads/spec.go delete mode 100644 vendor/github.com/go-openapi/runtime/.editorconfig delete mode 100644 vendor/github.com/go-openapi/runtime/.gitattributes delete mode 100644 vendor/github.com/go-openapi/runtime/.gitignore delete mode 100644 vendor/github.com/go-openapi/runtime/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/runtime/LICENSE delete mode 100644 vendor/github.com/go-openapi/runtime/README.md delete mode 100644 vendor/github.com/go-openapi/runtime/bytestream.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_auth_info.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_operation.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_request.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_response.go delete mode 100644 vendor/github.com/go-openapi/runtime/constants.go delete mode 100644 vendor/github.com/go-openapi/runtime/csv.go delete mode 100644 vendor/github.com/go-openapi/runtime/discard.go delete mode 100644 vendor/github.com/go-openapi/runtime/file.go delete mode 100644 vendor/github.com/go-openapi/runtime/flagext/byte_size.go delete mode 100644 vendor/github.com/go-openapi/runtime/headers.go delete mode 100644 vendor/github.com/go-openapi/runtime/interfaces.go delete mode 100644 vendor/github.com/go-openapi/runtime/json.go delete mode 100644 vendor/github.com/go-openapi/runtime/logger/logger.go delete mode 100644 vendor/github.com/go-openapi/runtime/logger/standard.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/context.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/denco/README.md delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/denco/router.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/denco/server.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/denco/util.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/doc.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/go18.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/header/header.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/negotiate.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/not_implemented.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/operation.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/parameter.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/pre_go18.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/rapidoc.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/redoc.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/request.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/router.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/security.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/spec.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/swaggerui.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/untyped/api.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/validation.go delete mode 100644 vendor/github.com/go-openapi/runtime/request.go delete mode 100644 vendor/github.com/go-openapi/runtime/security/authenticator.go delete mode 100644 vendor/github.com/go-openapi/runtime/security/authorizer.go delete mode 100644 vendor/github.com/go-openapi/runtime/statuses.go delete mode 100644 vendor/github.com/go-openapi/runtime/text.go delete mode 100644 vendor/github.com/go-openapi/runtime/values.go delete mode 100644 vendor/github.com/go-openapi/runtime/xml.go delete mode 100644 vendor/github.com/go-openapi/spec/.editorconfig delete mode 100644 vendor/github.com/go-openapi/spec/.gitignore delete mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/spec/.travis.yml delete mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/spec/LICENSE delete mode 100644 vendor/github.com/go-openapi/spec/README.md delete mode 100644 vendor/github.com/go-openapi/spec/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/spec/bindata.go delete mode 100644 vendor/github.com/go-openapi/spec/cache.go delete mode 100644 vendor/github.com/go-openapi/spec/contact_info.go delete mode 100644 vendor/github.com/go-openapi/spec/debug.go delete mode 100644 vendor/github.com/go-openapi/spec/errors.go delete mode 100644 vendor/github.com/go-openapi/spec/expander.go delete mode 100644 vendor/github.com/go-openapi/spec/external_docs.go delete mode 100644 vendor/github.com/go-openapi/spec/header.go delete mode 100644 vendor/github.com/go-openapi/spec/info.go delete mode 100644 vendor/github.com/go-openapi/spec/items.go delete mode 100644 vendor/github.com/go-openapi/spec/license.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_nonwindows.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_windows.go delete mode 100644 vendor/github.com/go-openapi/spec/operation.go delete mode 100644 vendor/github.com/go-openapi/spec/parameter.go delete mode 100644 vendor/github.com/go-openapi/spec/path_item.go delete mode 100644 vendor/github.com/go-openapi/spec/paths.go delete mode 100644 vendor/github.com/go-openapi/spec/properties.go delete mode 100644 vendor/github.com/go-openapi/spec/ref.go delete mode 100644 vendor/github.com/go-openapi/spec/resolver.go delete mode 100644 vendor/github.com/go-openapi/spec/response.go delete mode 100644 vendor/github.com/go-openapi/spec/responses.go delete mode 100644 vendor/github.com/go-openapi/spec/schema.go delete mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go delete mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go delete mode 100644 vendor/github.com/go-openapi/spec/spec.go delete mode 100644 vendor/github.com/go-openapi/spec/swagger.go delete mode 100644 vendor/github.com/go-openapi/spec/tag.go delete mode 100644 vendor/github.com/go-openapi/spec/validations.go delete mode 100644 vendor/github.com/go-openapi/spec/xml_object.go delete mode 100644 vendor/github.com/go-openapi/strfmt/.editorconfig delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitattributes delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitignore delete mode 100644 vendor/github.com/go-openapi/strfmt/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/strfmt/LICENSE delete mode 100644 vendor/github.com/go-openapi/strfmt/README.md delete mode 100644 vendor/github.com/go-openapi/strfmt/bson.go delete mode 100644 vendor/github.com/go-openapi/strfmt/date.go delete mode 100644 vendor/github.com/go-openapi/strfmt/default.go delete mode 100644 vendor/github.com/go-openapi/strfmt/doc.go delete mode 100644 vendor/github.com/go-openapi/strfmt/duration.go delete mode 100644 vendor/github.com/go-openapi/strfmt/format.go delete mode 100644 vendor/github.com/go-openapi/strfmt/time.go delete mode 100644 vendor/github.com/go-openapi/strfmt/ulid.go delete mode 100644 vendor/github.com/go-openapi/swag/.editorconfig delete mode 100644 vendor/github.com/go-openapi/swag/.gitattributes delete mode 100644 vendor/github.com/go-openapi/swag/.gitignore delete mode 100644 vendor/github.com/go-openapi/swag/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/swag/LICENSE delete mode 100644 vendor/github.com/go-openapi/swag/README.md delete mode 100644 vendor/github.com/go-openapi/swag/convert.go delete mode 100644 vendor/github.com/go-openapi/swag/convert_types.go delete mode 100644 vendor/github.com/go-openapi/swag/doc.go delete mode 100644 vendor/github.com/go-openapi/swag/file.go delete mode 100644 vendor/github.com/go-openapi/swag/json.go delete mode 100644 vendor/github.com/go-openapi/swag/loading.go delete mode 100644 vendor/github.com/go-openapi/swag/name_lexem.go delete mode 100644 vendor/github.com/go-openapi/swag/net.go delete mode 100644 vendor/github.com/go-openapi/swag/path.go delete mode 100644 vendor/github.com/go-openapi/swag/post_go18.go delete mode 100644 vendor/github.com/go-openapi/swag/post_go19.go delete mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go delete mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go delete mode 100644 vendor/github.com/go-openapi/swag/split.go delete mode 100644 vendor/github.com/go-openapi/swag/util.go delete mode 100644 vendor/github.com/go-openapi/swag/yaml.go delete mode 100644 vendor/github.com/go-openapi/validate/.editorconfig delete mode 100644 vendor/github.com/go-openapi/validate/.gitattributes delete mode 100644 vendor/github.com/go-openapi/validate/.gitignore delete mode 100644 vendor/github.com/go-openapi/validate/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/validate/LICENSE delete mode 100644 vendor/github.com/go-openapi/validate/README.md delete mode 100644 vendor/github.com/go-openapi/validate/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/validate/context.go delete mode 100644 vendor/github.com/go-openapi/validate/debug.go delete mode 100644 vendor/github.com/go-openapi/validate/default_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/doc.go delete mode 100644 vendor/github.com/go-openapi/validate/example_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/formats.go delete mode 100644 vendor/github.com/go-openapi/validate/helpers.go delete mode 100644 vendor/github.com/go-openapi/validate/object_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/options.go delete mode 100644 vendor/github.com/go-openapi/validate/result.go delete mode 100644 vendor/github.com/go-openapi/validate/rexp.go delete mode 100644 vendor/github.com/go-openapi/validate/schema.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_option.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_props.go delete mode 100644 vendor/github.com/go-openapi/validate/slice_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/spec.go delete mode 100644 vendor/github.com/go-openapi/validate/spec_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/type.go delete mode 100644 vendor/github.com/go-openapi/validate/update-fixtures.sh delete mode 100644 vendor/github.com/go-openapi/validate/validator.go delete mode 100644 vendor/github.com/go-openapi/validate/values.go delete mode 100644 vendor/github.com/go-stack/stack/LICENSE.md delete mode 100644 vendor/github.com/go-stack/stack/README.md delete mode 100644 vendor/github.com/go-stack/stack/stack.go delete mode 100644 vendor/github.com/gocql/gocql/.gitignore delete mode 100644 vendor/github.com/gocql/gocql/.travis.yml delete mode 100644 vendor/github.com/gocql/gocql/AUTHORS delete mode 100644 vendor/github.com/gocql/gocql/CONTRIBUTING.md delete mode 100644 vendor/github.com/gocql/gocql/LICENSE delete mode 100644 vendor/github.com/gocql/gocql/README.md delete mode 100644 vendor/github.com/gocql/gocql/address_translators.go delete mode 100644 vendor/github.com/gocql/gocql/cluster.go delete mode 100644 vendor/github.com/gocql/gocql/compressor.go delete mode 100644 vendor/github.com/gocql/gocql/conn.go delete mode 100644 vendor/github.com/gocql/gocql/connectionpool.go delete mode 100644 vendor/github.com/gocql/gocql/control.go delete mode 100644 vendor/github.com/gocql/gocql/cqltypes.go delete mode 100644 vendor/github.com/gocql/gocql/debug_off.go delete mode 100644 vendor/github.com/gocql/gocql/debug_on.go delete mode 100644 vendor/github.com/gocql/gocql/doc.go delete mode 100644 vendor/github.com/gocql/gocql/errors.go delete mode 100644 vendor/github.com/gocql/gocql/events.go delete mode 100644 vendor/github.com/gocql/gocql/filters.go delete mode 100644 vendor/github.com/gocql/gocql/frame.go delete mode 100644 vendor/github.com/gocql/gocql/fuzz.go delete mode 100644 vendor/github.com/gocql/gocql/helpers.go delete mode 100644 vendor/github.com/gocql/gocql/host_source.go delete mode 100644 vendor/github.com/gocql/gocql/host_source_gen.go delete mode 100644 vendor/github.com/gocql/gocql/install_test_deps.sh delete mode 100644 vendor/github.com/gocql/gocql/integration.sh delete mode 100644 vendor/github.com/gocql/gocql/internal/lru/lru.go delete mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur.go delete mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go delete mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go delete mode 100644 vendor/github.com/gocql/gocql/internal/streams/streams.go delete mode 100644 vendor/github.com/gocql/gocql/marshal.go delete mode 100644 vendor/github.com/gocql/gocql/metadata.go delete mode 100644 vendor/github.com/gocql/gocql/policies.go delete mode 100644 vendor/github.com/gocql/gocql/prepared_cache.go delete mode 100644 vendor/github.com/gocql/gocql/query_executor.go delete mode 100644 vendor/github.com/gocql/gocql/ring.go delete mode 100644 vendor/github.com/gocql/gocql/session.go delete mode 100644 vendor/github.com/gocql/gocql/token.go delete mode 100644 vendor/github.com/gocql/gocql/topology.go delete mode 100644 vendor/github.com/gocql/gocql/uuid.go delete mode 100644 vendor/github.com/gofrs/uuid/.gitignore delete mode 100644 vendor/github.com/gofrs/uuid/LICENSE delete mode 100644 vendor/github.com/gofrs/uuid/README.md delete mode 100644 vendor/github.com/gofrs/uuid/codec.go delete mode 100644 vendor/github.com/gofrs/uuid/fuzz.go delete mode 100644 vendor/github.com/gofrs/uuid/generator.go delete mode 100644 vendor/github.com/gofrs/uuid/sql.go delete mode 100644 vendor/github.com/gofrs/uuid/uuid.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/.dockerignore delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/.gitignore delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/.golangci.yml delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/.travis.yml delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/CONTRIBUTING.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/Dockerfile delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/FAQ.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/LICENSE delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/MIGRATIONS.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/Makefile delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/README.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/database/driver.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/database/error.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/database/postgres/README.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/database/postgres/TUTORIAL.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/database/postgres/postgres.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/database/util.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/docker-deploy.sh delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/internal/url/url.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/log.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/migrate.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/migration.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/driver.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/file/README.md delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/file/file.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/migration.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/source/parse.go delete mode 100644 vendor/github.com/golang-migrate/migrate/v4/util.go delete mode 100644 vendor/github.com/google/pprof/AUTHORS delete mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS delete mode 100644 vendor/github.com/google/pprof/LICENSE delete mode 100644 vendor/github.com/google/pprof/profile/encode.go delete mode 100644 vendor/github.com/google/pprof/profile/filter.go delete mode 100644 vendor/github.com/google/pprof/profile/index.go delete mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go delete mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go delete mode 100644 vendor/github.com/google/pprof/profile/merge.go delete mode 100644 vendor/github.com/google/pprof/profile/profile.go delete mode 100644 vendor/github.com/google/pprof/profile/proto.go delete mode 100644 vendor/github.com/google/pprof/profile/prune.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/.gitignore delete mode 100644 vendor/github.com/hailocab/go-hostpool/.travis.yml delete mode 100644 vendor/github.com/hailocab/go-hostpool/LICENSE delete mode 100644 vendor/github.com/hailocab/go-hostpool/README.md delete mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/host_entry.go delete mode 100644 vendor/github.com/hailocab/go-hostpool/hostpool.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/.gitignore delete mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/README.md delete mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go delete mode 100644 vendor/github.com/jessevdk/go-flags/.travis.yml delete mode 100644 vendor/github.com/jessevdk/go-flags/LICENSE delete mode 100644 vendor/github.com/jessevdk/go-flags/README.md delete mode 100644 vendor/github.com/jessevdk/go-flags/arg.go delete mode 100644 vendor/github.com/jessevdk/go-flags/check_crosscompile.sh delete mode 100644 vendor/github.com/jessevdk/go-flags/closest.go delete mode 100644 vendor/github.com/jessevdk/go-flags/command.go delete mode 100644 vendor/github.com/jessevdk/go-flags/completion.go delete mode 100644 vendor/github.com/jessevdk/go-flags/convert.go delete mode 100644 vendor/github.com/jessevdk/go-flags/error.go delete mode 100644 vendor/github.com/jessevdk/go-flags/flags.go delete mode 100644 vendor/github.com/jessevdk/go-flags/group.go delete mode 100644 vendor/github.com/jessevdk/go-flags/help.go delete mode 100644 vendor/github.com/jessevdk/go-flags/ini.go delete mode 100644 vendor/github.com/jessevdk/go-flags/man.go delete mode 100644 vendor/github.com/jessevdk/go-flags/multitag.go delete mode 100644 vendor/github.com/jessevdk/go-flags/option.go delete mode 100644 vendor/github.com/jessevdk/go-flags/optstyle_other.go delete mode 100644 vendor/github.com/jessevdk/go-flags/optstyle_windows.go delete mode 100644 vendor/github.com/jessevdk/go-flags/parser.go delete mode 100644 vendor/github.com/jessevdk/go-flags/termsize.go delete mode 100644 vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go delete mode 100644 vendor/github.com/jessevdk/go-flags/termsize_windows.go delete mode 100644 vendor/github.com/josharian/intern/README.md delete mode 100644 vendor/github.com/josharian/intern/intern.go delete mode 100644 vendor/github.com/josharian/intern/license.md delete mode 100644 vendor/github.com/julienschmidt/httprouter/.travis.yml delete mode 100644 vendor/github.com/julienschmidt/httprouter/LICENSE delete mode 100644 vendor/github.com/julienschmidt/httprouter/README.md delete mode 100644 vendor/github.com/julienschmidt/httprouter/path.go delete mode 100644 vendor/github.com/julienschmidt/httprouter/router.go delete mode 100644 vendor/github.com/julienschmidt/httprouter/tree.go delete mode 100644 vendor/github.com/lann/builder/.gitignore delete mode 100644 vendor/github.com/lann/builder/.travis.yml delete mode 100644 vendor/github.com/lann/builder/LICENSE delete mode 100644 vendor/github.com/lann/builder/README.md delete mode 100644 vendor/github.com/lann/builder/builder.go delete mode 100644 vendor/github.com/lann/builder/reflect.go delete mode 100644 vendor/github.com/lann/builder/registry.go delete mode 100644 vendor/github.com/lann/ps/LICENSE delete mode 100644 vendor/github.com/lann/ps/README.md delete mode 100644 vendor/github.com/lann/ps/list.go delete mode 100644 vendor/github.com/lann/ps/map.go delete mode 100644 vendor/github.com/lann/ps/profile.sh delete mode 100644 vendor/github.com/lib/pq/.gitignore delete mode 100644 vendor/github.com/lib/pq/.travis.sh delete mode 100644 vendor/github.com/lib/pq/.travis.yml delete mode 100644 vendor/github.com/lib/pq/CONTRIBUTING.md delete mode 100644 vendor/github.com/lib/pq/LICENSE.md delete mode 100644 vendor/github.com/lib/pq/README.md delete mode 100644 vendor/github.com/lib/pq/TESTS.md delete mode 100644 vendor/github.com/lib/pq/array.go delete mode 100644 vendor/github.com/lib/pq/buf.go delete mode 100644 vendor/github.com/lib/pq/conn.go delete mode 100644 vendor/github.com/lib/pq/conn_go18.go delete mode 100644 vendor/github.com/lib/pq/connector.go delete mode 100644 vendor/github.com/lib/pq/copy.go delete mode 100644 vendor/github.com/lib/pq/doc.go delete mode 100644 vendor/github.com/lib/pq/encode.go delete mode 100644 vendor/github.com/lib/pq/error.go delete mode 100644 vendor/github.com/lib/pq/notify.go delete mode 100644 vendor/github.com/lib/pq/oid/doc.go delete mode 100644 vendor/github.com/lib/pq/oid/types.go delete mode 100644 vendor/github.com/lib/pq/rows.go delete mode 100644 vendor/github.com/lib/pq/scram/scram.go delete mode 100644 vendor/github.com/lib/pq/ssl.go delete mode 100644 vendor/github.com/lib/pq/ssl_permissions.go delete mode 100644 vendor/github.com/lib/pq/ssl_windows.go delete mode 100644 vendor/github.com/lib/pq/url.go delete mode 100644 vendor/github.com/lib/pq/user_posix.go delete mode 100644 vendor/github.com/lib/pq/user_windows.go delete mode 100644 vendor/github.com/lib/pq/uuid.go delete mode 100644 vendor/github.com/mailru/easyjson/LICENSE delete mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go delete mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go delete mode 100644 vendor/github.com/oklog/run/.gitignore delete mode 100644 vendor/github.com/oklog/run/LICENSE delete mode 100644 vendor/github.com/oklog/run/README.md delete mode 100644 vendor/github.com/oklog/run/actors.go delete mode 100644 vendor/github.com/oklog/run/group.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/api.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/metrics/metrics.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v1/api.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/api.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/compat.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/alert.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/alert_group.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/alert_groups.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_config.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/alertmanager_status.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alert.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_alerts.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silence.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/gettable_silences.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/label_set.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/matcher.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/matchers.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/peer_status.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alert.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/postable_alerts.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/postable_silence.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/receiver.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/silence.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/silence_status.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/models/version_info.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/openapi.yaml delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/configure_alertmanager.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/doc.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/embedded_spec.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/get_alerts_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alert/post_alerts_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup/get_alert_groups_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/alertmanager_api.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/general/get_status_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver/get_receivers_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/delete_silence_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silence_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/get_silences_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_parameters.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_responses.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/operations/silence/post_silences_urlbuilder.go delete mode 100644 vendor/github.com/prometheus/alertmanager/api/v2/restapi/server.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/advertise.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/channel.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/cluster.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/connection_pool.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/delegate.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/tls_config.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/tls_connection.go delete mode 100644 vendor/github.com/prometheus/alertmanager/cluster/tls_transport.go delete mode 100644 vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go delete mode 100644 vendor/github.com/prometheus/alertmanager/dispatch/route.go delete mode 100644 vendor/github.com/prometheus/alertmanager/inhibit/inhibit.go delete mode 100644 vendor/github.com/prometheus/alertmanager/nflog/nflog.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/email/email.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/notify.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/opsgenie/opsgenie.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/pushover/pushover.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/slack/slack.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/sns/sns.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/util.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/victorops/victorops.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go delete mode 100644 vendor/github.com/prometheus/alertmanager/notify/wechat/wechat.go delete mode 100644 vendor/github.com/prometheus/alertmanager/provider/mem/mem.go delete mode 100644 vendor/github.com/prometheus/alertmanager/provider/provider.go delete mode 100644 vendor/github.com/prometheus/alertmanager/silence/silence.go delete mode 100644 vendor/github.com/prometheus/alertmanager/store/store.go delete mode 100644 vendor/github.com/prometheus/alertmanager/ui/Dockerfile delete mode 100644 vendor/github.com/prometheus/alertmanager/ui/web.go delete mode 100644 vendor/github.com/prometheus/common/route/route.go delete mode 100644 vendor/github.com/prometheus/exporter-toolkit/LICENSE delete mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/README.md delete mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/cache.go delete mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/handler.go delete mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go delete mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml delete mode 100644 vendor/github.com/prometheus/prometheus/discovery/dns/dns.go delete mode 100644 vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go delete mode 100644 vendor/github.com/prometheus/prometheus/notifier/notifier.go delete mode 100644 vendor/github.com/prometheus/prometheus/rules/alerting.go delete mode 100644 vendor/github.com/prometheus/prometheus/rules/manager.go delete mode 100644 vendor/github.com/prometheus/prometheus/rules/recording.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/httputil/compression.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/httputil/context.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/httputil/cors.go delete mode 100644 vendor/github.com/prometheus/prometheus/web/api/v1/api.go delete mode 100644 vendor/github.com/rs/cors/LICENSE delete mode 100644 vendor/github.com/rs/cors/README.md delete mode 100644 vendor/github.com/rs/cors/cors.go delete mode 100644 vendor/github.com/rs/cors/utils.go delete mode 100644 vendor/github.com/segmentio/fasthash/LICENSE delete mode 100644 vendor/github.com/segmentio/fasthash/fnv1a/hash.go delete mode 100644 vendor/github.com/segmentio/fasthash/fnv1a/hash32.go delete mode 100644 vendor/github.com/spf13/afero/.gitignore delete mode 100644 vendor/github.com/spf13/afero/.travis.yml delete mode 100644 vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 vendor/github.com/spf13/afero/README.md delete mode 100644 vendor/github.com/spf13/afero/afero.go delete mode 100644 vendor/github.com/spf13/afero/appveyor.yml delete mode 100644 vendor/github.com/spf13/afero/basepath.go delete mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 vendor/github.com/spf13/afero/httpFs.go delete mode 100644 vendor/github.com/spf13/afero/iofs.go delete mode 100644 vendor/github.com/spf13/afero/ioutil.go delete mode 100644 vendor/github.com/spf13/afero/lstater.go delete mode 100644 vendor/github.com/spf13/afero/match.go delete mode 100644 vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 vendor/github.com/spf13/afero/mem/file.go delete mode 100644 vendor/github.com/spf13/afero/memmap.go delete mode 100644 vendor/github.com/spf13/afero/os.go delete mode 100644 vendor/github.com/spf13/afero/path.go delete mode 100644 vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 vendor/github.com/spf13/afero/symlink.go delete mode 100644 vendor/github.com/spf13/afero/unionFile.go delete mode 100644 vendor/github.com/spf13/afero/util.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/compact/blocks_cleaner.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/compact/clean.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/compact/compact.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/compact/planner.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/compact/retention.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go delete mode 100644 vendor/go.etcd.io/bbolt/.gitignore delete mode 100644 vendor/go.etcd.io/bbolt/.travis.yml delete mode 100644 vendor/go.etcd.io/bbolt/LICENSE delete mode 100644 vendor/go.etcd.io/bbolt/Makefile delete mode 100644 vendor/go.etcd.io/bbolt/README.md delete mode 100644 vendor/go.etcd.io/bbolt/bolt_386.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_amd64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_linux.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mips64x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mipsx.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_openbsd.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64le.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_riscv64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_s390x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_solaris.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_windows.go delete mode 100644 vendor/go.etcd.io/bbolt/boltsync_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bucket.go delete mode 100644 vendor/go.etcd.io/bbolt/compact.go delete mode 100644 vendor/go.etcd.io/bbolt/cursor.go delete mode 100644 vendor/go.etcd.io/bbolt/db.go delete mode 100644 vendor/go.etcd.io/bbolt/doc.go delete mode 100644 vendor/go.etcd.io/bbolt/errors.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go delete mode 100644 vendor/go.etcd.io/bbolt/mlock_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/mlock_windows.go delete mode 100644 vendor/go.etcd.io/bbolt/node.go delete mode 100644 vendor/go.etcd.io/bbolt/page.go delete mode 100644 vendor/go.etcd.io/bbolt/tx.go delete mode 100644 vendor/go.etcd.io/bbolt/unsafe.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 100644 vendor/golang.org/x/text/width/kind_string.go delete mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/width/transform.go delete mode 100644 vendor/golang.org/x/text/width/trieval.go delete mode 100644 vendor/golang.org/x/text/width/width.go delete mode 100644 vendor/gopkg.in/inf.v0/LICENSE delete mode 100644 vendor/gopkg.in/inf.v0/dec.go delete mode 100644 vendor/gopkg.in/inf.v0/rounder.go diff --git a/vendor/github.com/Masterminds/squirrel/.gitignore b/vendor/github.com/Masterminds/squirrel/.gitignore deleted file mode 100644 index 4a0699f0b..000000000 --- a/vendor/github.com/Masterminds/squirrel/.gitignore +++ /dev/null @@ -1 +0,0 @@ -squirrel.test \ No newline at end of file diff --git a/vendor/github.com/Masterminds/squirrel/.travis.yml b/vendor/github.com/Masterminds/squirrel/.travis.yml deleted file mode 100644 index bc6be0f81..000000000 --- a/vendor/github.com/Masterminds/squirrel/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - tip - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -install: - - go get - - go get github.com/stretchr/testify/assert - -notifications: - irc: "irc.freenode.net#masterminds" diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE.txt b/vendor/github.com/Masterminds/squirrel/LICENSE.txt deleted file mode 100644 index 74c20a2b9..000000000 --- a/vendor/github.com/Masterminds/squirrel/LICENSE.txt +++ /dev/null @@ -1,23 +0,0 @@ -Squirrel -The Masterminds -Copyright (C) 2014-2015, Lann Martin -Copyright (C) 2015-2016, Google -Copyright (C) 2015, Matt Farina and Matt Butcher - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/squirrel/README.md b/vendor/github.com/Masterminds/squirrel/README.md deleted file mode 100644 index e0c4394c9..000000000 --- a/vendor/github.com/Masterminds/squirrel/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# Squirrel - fluent SQL generator for Go - -```go -import "gopkg.in/Masterminds/squirrel.v1" -``` -or if you prefer using `master` (which may be arbitrarily ahead of or behind `v1`): - -**NOTE:** as of Go 1.6, `go get` correctly clones the Github default branch (which is `v1` in this repo). -```go -import "github.com/Masterminds/squirrel" -``` - -[![GoDoc](https://godoc.org/github.com/Masterminds/squirrel?status.png)](https://godoc.org/github.com/Masterminds/squirrel) -[![Build Status](https://travis-ci.org/Masterminds/squirrel.svg?branch=v1)](https://travis-ci.org/Masterminds/squirrel) - -_**Note:** This project has moved from `github.com/lann/squirrel` to -`github.com/Masterminds/squirrel`. Lann remains the architect of the -project, but we're helping him curate. - -**Squirrel is not an ORM.** For an application of Squirrel, check out -[structable, a table-struct mapper](https://github.com/technosophos/structable) - - -Squirrel helps you build SQL queries from composable parts: - -```go -import sq "github.com/Masterminds/squirrel" - -users := sq.Select("*").From("users").Join("emails USING (email_id)") - -active := users.Where(sq.Eq{"deleted_at": nil}) - -sql, args, err := active.ToSql() - -sql == "SELECT * FROM users JOIN emails USING (email_id) WHERE deleted_at IS NULL" -``` - -```go -sql, args, err := sq. - Insert("users").Columns("name", "age"). - Values("moe", 13).Values("larry", sq.Expr("? + 5", 12)). - ToSql() - -sql == "INSERT INTO users (name,age) VALUES (?,?),(?,? + 5)" -``` - -Squirrel can also execute queries directly: - -```go -stooges := users.Where(sq.Eq{"username": []string{"moe", "larry", "curly", "shemp"}}) -three_stooges := stooges.Limit(3) -rows, err := three_stooges.RunWith(db).Query() - -// Behaves like: -rows, err := db.Query("SELECT * FROM users WHERE username IN (?,?,?,?) LIMIT 3", - "moe", "larry", "curly", "shemp") -``` - -Squirrel makes conditional query building a breeze: - -```go -if len(q) > 0 { - users = users.Where("name LIKE ?", fmt.Sprint("%", q, "%")) -} -``` - -Squirrel wants to make your life easier: - -```go -// StmtCache caches Prepared Stmts for you -dbCache := sq.NewStmtCacher(db) - -// StatementBuilder keeps your syntax neat -mydb := sq.StatementBuilder.RunWith(dbCache) -select_users := mydb.Select("*").From("users") -``` - -Squirrel loves PostgreSQL: - -```go -psql := sq.StatementBuilder.PlaceholderFormat(sq.Dollar) - -// You use question marks for placeholders... -sql, _, _ := psql.Select("*").From("elephants").Where("name IN (?,?)", "Dumbo", "Verna") - -/// ...squirrel replaces them using PlaceholderFormat. -sql == "SELECT * FROM elephants WHERE name IN ($1,$2)" - - -/// You can retrieve id ... -query := sq.Insert("nodes"). - Columns("uuid", "type", "data"). - Values(node.Uuid, node.Type, node.Data). - Suffix("RETURNING \"id\""). - RunWith(m.db). - PlaceholderFormat(sq.Dollar) - -query.QueryRow().Scan(&node.id) -``` - -You can escape question mask by inserting two question marks: - -```sql -SELECT * FROM nodes WHERE meta->'format' ??| array[?,?] -``` - -will generate with the Dollar Placeholder: - -```sql -SELECT * FROM nodes WHERE meta->'format' ?| array[$1,$2] -``` - - - -## License - -Squirrel is released under the -[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/Masterminds/squirrel/case.go b/vendor/github.com/Masterminds/squirrel/case.go deleted file mode 100644 index 2eb69dd5c..000000000 --- a/vendor/github.com/Masterminds/squirrel/case.go +++ /dev/null @@ -1,118 +0,0 @@ -package squirrel - -import ( - "bytes" - "errors" - - "github.com/lann/builder" -) - -func init() { - builder.Register(CaseBuilder{}, caseData{}) -} - -// sqlizerBuffer is a helper that allows to write many Sqlizers one by one -// without constant checks for errors that may come from Sqlizer -type sqlizerBuffer struct { - bytes.Buffer - args []interface{} - err error -} - -// WriteSql converts Sqlizer to SQL strings and writes it to buffer -func (b *sqlizerBuffer) WriteSql(item Sqlizer) { - if b.err != nil { - return - } - - var str string - var args []interface{} - str, args, b.err = item.ToSql() - - if b.err != nil { - return - } - - b.WriteString(str) - b.WriteByte(' ') - b.args = append(b.args, args...) -} - -func (b *sqlizerBuffer) ToSql() (string, []interface{}, error) { - return b.String(), b.args, b.err -} - -// whenPart is a helper structure to describe SQLs "WHEN ... THEN ..." expression -type whenPart struct { - when Sqlizer - then Sqlizer -} - -func newWhenPart(when interface{}, then interface{}) whenPart { - return whenPart{newPart(when), newPart(then)} -} - -// caseData holds all the data required to build a CASE SQL construct -type caseData struct { - What Sqlizer - WhenParts []whenPart - Else Sqlizer -} - -// ToSql implements Sqlizer -func (d *caseData) ToSql() (sqlStr string, args []interface{}, err error) { - if len(d.WhenParts) == 0 { - err = errors.New("case expression must contain at lease one WHEN clause") - - return - } - - sql := sqlizerBuffer{} - - sql.WriteString("CASE ") - if d.What != nil { - sql.WriteSql(d.What) - } - - for _, p := range d.WhenParts { - sql.WriteString("WHEN ") - sql.WriteSql(p.when) - sql.WriteString("THEN ") - sql.WriteSql(p.then) - } - - if d.Else != nil { - sql.WriteString("ELSE ") - sql.WriteSql(d.Else) - } - - sql.WriteString("END") - - return sql.ToSql() -} - -// CaseBuilder builds SQL CASE construct which could be used as parts of queries. -type CaseBuilder builder.Builder - -// ToSql builds the query into a SQL string and bound args. -func (b CaseBuilder) ToSql() (string, []interface{}, error) { - data := builder.GetStruct(b).(caseData) - return data.ToSql() -} - -// what sets optional value for CASE construct "CASE [value] ..." -func (b CaseBuilder) what(expr interface{}) CaseBuilder { - return builder.Set(b, "What", newPart(expr)).(CaseBuilder) -} - -// When adds "WHEN ... THEN ..." part to CASE construct -func (b CaseBuilder) When(when interface{}, then interface{}) CaseBuilder { - // TODO: performance hint: replace slice of WhenPart with just slice of parts - // where even indices of the slice belong to "when"s and odd indices belong to "then"s - return builder.Append(b, "WhenParts", newWhenPart(when, then)).(CaseBuilder) -} - -// What sets optional "ELSE ..." part for CASE construct -func (b CaseBuilder) Else(expr interface{}) CaseBuilder { - return builder.Set(b, "Else", newPart(expr)).(CaseBuilder) -} diff --git a/vendor/github.com/Masterminds/squirrel/delete.go b/vendor/github.com/Masterminds/squirrel/delete.go deleted file mode 100644 index 8aa4f1e66..000000000 --- a/vendor/github.com/Masterminds/squirrel/delete.go +++ /dev/null @@ -1,152 +0,0 @@ -package squirrel - -import ( - "bytes" - "database/sql" - "fmt" - "github.com/lann/builder" - "strings" -) - -type deleteData struct { - PlaceholderFormat PlaceholderFormat - RunWith BaseRunner - Prefixes exprs - From string - WhereParts []Sqlizer - OrderBys []string - Limit string - Offset string - Suffixes exprs -} - -func (d *deleteData) Exec() (sql.Result, error) { - if d.RunWith == nil { - return nil, RunnerNotSet - } - return ExecWith(d.RunWith, d) -} - -func (d *deleteData) ToSql() (sqlStr string, args []interface{}, err error) { - if len(d.From) == 0 { - err = fmt.Errorf("delete statements must specify a From table") - return - } - - sql := &bytes.Buffer{} - - if len(d.Prefixes) > 0 { - args, _ = d.Prefixes.AppendToSql(sql, " ", args) - sql.WriteString(" ") - } - - sql.WriteString("DELETE FROM ") - sql.WriteString(d.From) - - if len(d.WhereParts) > 0 { - sql.WriteString(" WHERE ") - args, err = appendToSql(d.WhereParts, sql, " AND ", args) - if err != nil { - return - } - } - - if len(d.OrderBys) > 0 { - sql.WriteString(" ORDER BY ") - sql.WriteString(strings.Join(d.OrderBys, ", ")) - } - - if len(d.Limit) > 0 { - sql.WriteString(" LIMIT ") - sql.WriteString(d.Limit) - } - - if len(d.Offset) > 0 { - sql.WriteString(" OFFSET ") - sql.WriteString(d.Offset) - } - - if len(d.Suffixes) > 0 { - sql.WriteString(" ") - args, _ = d.Suffixes.AppendToSql(sql, " ", args) - } - - sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) - return -} - - -// Builder - -// DeleteBuilder builds SQL DELETE statements. -type DeleteBuilder builder.Builder - -func init() { - builder.Register(DeleteBuilder{}, deleteData{}) -} - -// Format methods - -// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the -// query. -func (b DeleteBuilder) PlaceholderFormat(f PlaceholderFormat) DeleteBuilder { - return builder.Set(b, "PlaceholderFormat", f).(DeleteBuilder) -} - -// Runner methods - -// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. -func (b DeleteBuilder) RunWith(runner BaseRunner) DeleteBuilder { - return setRunWith(b, runner).(DeleteBuilder) -} - -// Exec builds and Execs the query with the Runner set by RunWith. -func (b DeleteBuilder) Exec() (sql.Result, error) { - data := builder.GetStruct(b).(deleteData) - return data.Exec() -} - -// SQL methods - -// ToSql builds the query into a SQL string and bound args. -func (b DeleteBuilder) ToSql() (string, []interface{}, error) { - data := builder.GetStruct(b).(deleteData) - return data.ToSql() -} - -// Prefix adds an expression to the beginning of the query -func (b DeleteBuilder) Prefix(sql string, args ...interface{}) DeleteBuilder { - return builder.Append(b, "Prefixes", Expr(sql, args...)).(DeleteBuilder) -} - -// From sets the table to be deleted from. -func (b DeleteBuilder) From(from string) DeleteBuilder { - return builder.Set(b, "From", from).(DeleteBuilder) -} - -// Where adds WHERE expressions to the query. -// -// See SelectBuilder.Where for more information. -func (b DeleteBuilder) Where(pred interface{}, args ...interface{}) DeleteBuilder { - return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(DeleteBuilder) -} - -// OrderBy adds ORDER BY expressions to the query. -func (b DeleteBuilder) OrderBy(orderBys ...string) DeleteBuilder { - return builder.Extend(b, "OrderBys", orderBys).(DeleteBuilder) -} - -// Limit sets a LIMIT clause on the query. -func (b DeleteBuilder) Limit(limit uint64) DeleteBuilder { - return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(DeleteBuilder) -} - -// Offset sets a OFFSET clause on the query. -func (b DeleteBuilder) Offset(offset uint64) DeleteBuilder { - return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(DeleteBuilder) -} - -// Suffix adds an expression to the end of the query -func (b DeleteBuilder) Suffix(sql string, args ...interface{}) DeleteBuilder { - return builder.Append(b, "Suffixes", Expr(sql, args...)).(DeleteBuilder) -} diff --git a/vendor/github.com/Masterminds/squirrel/expr.go b/vendor/github.com/Masterminds/squirrel/expr.go deleted file mode 100644 index a8749f10d..000000000 --- a/vendor/github.com/Masterminds/squirrel/expr.go +++ /dev/null @@ -1,247 +0,0 @@ -package squirrel - -import ( - "database/sql/driver" - "fmt" - "io" - "reflect" - "strings" -) - -type expr struct { - sql string - args []interface{} -} - -// Expr builds value expressions for InsertBuilder and UpdateBuilder. -// -// Ex: -// .Values(Expr("FROM_UNIXTIME(?)", t)) -func Expr(sql string, args ...interface{}) expr { - return expr{sql: sql, args: args} -} - -func (e expr) ToSql() (sql string, args []interface{}, err error) { - return e.sql, e.args, nil -} - -type exprs []expr - -func (es exprs) AppendToSql(w io.Writer, sep string, args []interface{}) ([]interface{}, error) { - for i, e := range es { - if i > 0 { - _, err := io.WriteString(w, sep) - if err != nil { - return nil, err - } - } - _, err := io.WriteString(w, e.sql) - if err != nil { - return nil, err - } - args = append(args, e.args...) - } - return args, nil -} - -// aliasExpr helps to alias part of SQL query generated with underlying "expr" -type aliasExpr struct { - expr Sqlizer - alias string -} - -// Alias allows to define alias for column in SelectBuilder. Useful when column is -// defined as complex expression like IF or CASE -// Ex: -// .Column(Alias(caseStmt, "case_column")) -func Alias(expr Sqlizer, alias string) aliasExpr { - return aliasExpr{expr, alias} -} - -func (e aliasExpr) ToSql() (sql string, args []interface{}, err error) { - sql, args, err = e.expr.ToSql() - if err == nil { - sql = fmt.Sprintf("(%s) AS %s", sql, e.alias) - } - return -} - -// Eq is syntactic sugar for use with Where/Having/Set methods. -// Ex: -// .Where(Eq{"id": 1}) -type Eq map[string]interface{} - -func (eq Eq) toSql(useNotOpr bool) (sql string, args []interface{}, err error) { - var ( - exprs []string - equalOpr string = "=" - inOpr string = "IN" - nullOpr string = "IS" - ) - - if useNotOpr { - equalOpr = "<>" - inOpr = "NOT IN" - nullOpr = "IS NOT" - } - - for key, val := range eq { - expr := "" - - switch v := val.(type) { - case driver.Valuer: - if val, err = v.Value(); err != nil { - return - } - } - - if val == nil { - expr = fmt.Sprintf("%s %s NULL", key, nullOpr) - } else { - valVal := reflect.ValueOf(val) - if valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice { - if valVal.Len() == 0 { - expr = fmt.Sprintf("%s %s (NULL)", key, inOpr) - if args == nil { - args = []interface{}{} - } - } else { - for i := 0; i < valVal.Len(); i++ { - args = append(args, valVal.Index(i).Interface()) - } - expr = fmt.Sprintf("%s %s (%s)", key, inOpr, Placeholders(valVal.Len())) - } - } else { - expr = fmt.Sprintf("%s %s ?", key, equalOpr) - args = append(args, val) - } - } - exprs = append(exprs, expr) - } - sql = strings.Join(exprs, " AND ") - return -} - -func (eq Eq) ToSql() (sql string, args []interface{}, err error) { - return eq.toSql(false) -} - -// NotEq is syntactic sugar for use with Where/Having/Set methods. -// Ex: -// .Where(NotEq{"id": 1}) == "id <> 1" -type NotEq Eq - -func (neq NotEq) ToSql() (sql string, args []interface{}, err error) { - return Eq(neq).toSql(true) -} - -// Lt is syntactic sugar for use with Where/Having/Set methods. -// Ex: -// .Where(Lt{"id": 1}) -type Lt map[string]interface{} - -func (lt Lt) toSql(opposite, orEq bool) (sql string, args []interface{}, err error) { - var ( - exprs []string - opr string = "<" - ) - - if opposite { - opr = ">" - } - - if orEq { - opr = fmt.Sprintf("%s%s", opr, "=") - } - - for key, val := range lt { - expr := "" - - switch v := val.(type) { - case driver.Valuer: - if val, err = v.Value(); err != nil { - return - } - } - - if val == nil { - err = fmt.Errorf("cannot use null with less than or greater than operators") - return - } else { - valVal := reflect.ValueOf(val) - if valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice { - err = fmt.Errorf("cannot use array or slice with less than or greater than operators") - return - } else { - expr = fmt.Sprintf("%s %s ?", key, opr) - args = append(args, val) - } - } - exprs = append(exprs, expr) - } - sql = strings.Join(exprs, " AND ") - return -} - -func (lt Lt) ToSql() (sql string, args []interface{}, err error) { - return lt.toSql(false, false) -} - -// LtOrEq is syntactic sugar for use with Where/Having/Set methods. -// Ex: -// .Where(LtOrEq{"id": 1}) == "id <= 1" -type LtOrEq Lt - -func (ltOrEq LtOrEq) ToSql() (sql string, args []interface{}, err error) { - return Lt(ltOrEq).toSql(false, true) -} - -// Gt is syntactic sugar for use with Where/Having/Set methods. -// Ex: -// .Where(Gt{"id": 1}) == "id > 1" -type Gt Lt - -func (gt Gt) ToSql() (sql string, args []interface{}, err error) { - return Lt(gt).toSql(true, false) -} - -// GtOrEq is syntactic sugar for use with Where/Having/Set methods. -// Ex: -// .Where(GtOrEq{"id": 1}) == "id >= 1" -type GtOrEq Lt - -func (gtOrEq GtOrEq) ToSql() (sql string, args []interface{}, err error) { - return Lt(gtOrEq).toSql(true, true) -} - -type conj []Sqlizer - -func (c conj) join(sep string) (sql string, args []interface{}, err error) { - var sqlParts []string - for _, sqlizer := range c { - partSql, partArgs, err := sqlizer.ToSql() - if err != nil { - return "", nil, err - } - if partSql != "" { - sqlParts = append(sqlParts, partSql) - args = append(args, partArgs...) - } - } - if len(sqlParts) > 0 { - sql = fmt.Sprintf("(%s)", strings.Join(sqlParts, sep)) - } - return -} - -type And conj - -func (a And) ToSql() (string, []interface{}, error) { - return conj(a).join(" AND ") -} - -type Or conj - -func (o Or) ToSql() (string, []interface{}, error) { - return conj(o).join(" OR ") -} diff --git a/vendor/github.com/Masterminds/squirrel/insert.go b/vendor/github.com/Masterminds/squirrel/insert.go deleted file mode 100644 index f08025f50..000000000 --- a/vendor/github.com/Masterminds/squirrel/insert.go +++ /dev/null @@ -1,207 +0,0 @@ -package squirrel - -import ( - "bytes" - "database/sql" - "fmt" - "github.com/lann/builder" - "strings" -) - -type insertData struct { - PlaceholderFormat PlaceholderFormat - RunWith BaseRunner - Prefixes exprs - Options []string - Into string - Columns []string - Values [][]interface{} - Suffixes exprs -} - -func (d *insertData) Exec() (sql.Result, error) { - if d.RunWith == nil { - return nil, RunnerNotSet - } - return ExecWith(d.RunWith, d) -} - -func (d *insertData) Query() (*sql.Rows, error) { - if d.RunWith == nil { - return nil, RunnerNotSet - } - return QueryWith(d.RunWith, d) -} - -func (d *insertData) QueryRow() RowScanner { - if d.RunWith == nil { - return &Row{err: RunnerNotSet} - } - queryRower, ok := d.RunWith.(QueryRower) - if !ok { - return &Row{err: RunnerNotQueryRunner} - } - return QueryRowWith(queryRower, d) -} - -func (d *insertData) ToSql() (sqlStr string, args []interface{}, err error) { - if len(d.Into) == 0 { - err = fmt.Errorf("insert statements must specify a table") - return - } - if len(d.Values) == 0 { - err = fmt.Errorf("insert statements must have at least one set of values") - return - } - - sql := &bytes.Buffer{} - - if len(d.Prefixes) > 0 { - args, _ = d.Prefixes.AppendToSql(sql, " ", args) - sql.WriteString(" ") - } - - sql.WriteString("INSERT ") - - if len(d.Options) > 0 { - sql.WriteString(strings.Join(d.Options, " ")) - sql.WriteString(" ") - } - - sql.WriteString("INTO ") - sql.WriteString(d.Into) - sql.WriteString(" ") - - if len(d.Columns) > 0 { - sql.WriteString("(") - sql.WriteString(strings.Join(d.Columns, ",")) - sql.WriteString(") ") - } - - sql.WriteString("VALUES ") - - valuesStrings := make([]string, len(d.Values)) - for r, row := range d.Values { - valueStrings := make([]string, len(row)) - for v, val := range row { - e, isExpr := val.(expr) - if isExpr { - valueStrings[v] = e.sql - args = append(args, e.args...) - } else { - valueStrings[v] = "?" - args = append(args, val) - } - } - valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ",")) - } - sql.WriteString(strings.Join(valuesStrings, ",")) - - if len(d.Suffixes) > 0 { - sql.WriteString(" ") - args, _ = d.Suffixes.AppendToSql(sql, " ", args) - } - - sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) - return -} - -// Builder - -// InsertBuilder builds SQL INSERT statements. -type InsertBuilder builder.Builder - -func init() { - builder.Register(InsertBuilder{}, insertData{}) -} - -// Format methods - -// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the -// query. -func (b InsertBuilder) PlaceholderFormat(f PlaceholderFormat) InsertBuilder { - return builder.Set(b, "PlaceholderFormat", f).(InsertBuilder) -} - -// Runner methods - -// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. -func (b InsertBuilder) RunWith(runner BaseRunner) InsertBuilder { - return setRunWith(b, runner).(InsertBuilder) -} - -// Exec builds and Execs the query with the Runner set by RunWith. -func (b InsertBuilder) Exec() (sql.Result, error) { - data := builder.GetStruct(b).(insertData) - return data.Exec() -} - -// Query builds and Querys the query with the Runner set by RunWith. -func (b InsertBuilder) Query() (*sql.Rows, error) { - data := builder.GetStruct(b).(insertData) - return data.Query() -} - -// QueryRow builds and QueryRows the query with the Runner set by RunWith. -func (b InsertBuilder) QueryRow() RowScanner { - data := builder.GetStruct(b).(insertData) - return data.QueryRow() -} - -// Scan is a shortcut for QueryRow().Scan. -func (b InsertBuilder) Scan(dest ...interface{}) error { - return b.QueryRow().Scan(dest...) -} - -// SQL methods - -// ToSql builds the query into a SQL string and bound args. -func (b InsertBuilder) ToSql() (string, []interface{}, error) { - data := builder.GetStruct(b).(insertData) - return data.ToSql() -} - -// Prefix adds an expression to the beginning of the query -func (b InsertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder { - return builder.Append(b, "Prefixes", Expr(sql, args...)).(InsertBuilder) -} - -// Options adds keyword options before the INTO clause of the query. -func (b InsertBuilder) Options(options ...string) InsertBuilder { - return builder.Extend(b, "Options", options).(InsertBuilder) -} - -// Into sets the INTO clause of the query. -func (b InsertBuilder) Into(from string) InsertBuilder { - return builder.Set(b, "Into", from).(InsertBuilder) -} - -// Columns adds insert columns to the query. -func (b InsertBuilder) Columns(columns ...string) InsertBuilder { - return builder.Extend(b, "Columns", columns).(InsertBuilder) -} - -// Values adds a single row's values to the query. -func (b InsertBuilder) Values(values ...interface{}) InsertBuilder { - return builder.Append(b, "Values", values).(InsertBuilder) -} - -// Suffix adds an expression to the end of the query -func (b InsertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder { - return builder.Append(b, "Suffixes", Expr(sql, args...)).(InsertBuilder) -} - -// SetMap set columns and values for insert builder from a map of column name and value -// note that it will reset all previous columns and values was set if any -func (b InsertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder { - cols := make([]string, 0, len(clauses)) - vals := make([]interface{}, 0, len(clauses)) - for col, val := range clauses { - cols = append(cols, col) - vals = append(vals, val) - } - - b = builder.Set(b, "Columns", cols).(InsertBuilder) - b = builder.Set(b, "Values", [][]interface{}{vals}).(InsertBuilder) - return b -} diff --git a/vendor/github.com/Masterminds/squirrel/part.go b/vendor/github.com/Masterminds/squirrel/part.go deleted file mode 100644 index 2926d0315..000000000 --- a/vendor/github.com/Masterminds/squirrel/part.go +++ /dev/null @@ -1,55 +0,0 @@ -package squirrel - -import ( - "fmt" - "io" -) - -type part struct { - pred interface{} - args []interface{} -} - -func newPart(pred interface{}, args ...interface{}) Sqlizer { - return &part{pred, args} -} - -func (p part) ToSql() (sql string, args []interface{}, err error) { - switch pred := p.pred.(type) { - case nil: - // no-op - case Sqlizer: - sql, args, err = pred.ToSql() - case string: - sql = pred - args = p.args - default: - err = fmt.Errorf("expected string or Sqlizer, not %T", pred) - } - return -} - -func appendToSql(parts []Sqlizer, w io.Writer, sep string, args []interface{}) ([]interface{}, error) { - for i, p := range parts { - partSql, partArgs, err := p.ToSql() - if err != nil { - return nil, err - } else if len(partSql) == 0 { - continue - } - - if i > 0 { - _, err := io.WriteString(w, sep) - if err != nil { - return nil, err - } - } - - _, err = io.WriteString(w, partSql) - if err != nil { - return nil, err - } - args = append(args, partArgs...) - } - return args, nil -} diff --git a/vendor/github.com/Masterminds/squirrel/placeholder.go b/vendor/github.com/Masterminds/squirrel/placeholder.go deleted file mode 100644 index d377788b9..000000000 --- a/vendor/github.com/Masterminds/squirrel/placeholder.go +++ /dev/null @@ -1,70 +0,0 @@ -package squirrel - -import ( - "bytes" - "fmt" - "strings" -) - -// PlaceholderFormat is the interface that wraps the ReplacePlaceholders method. -// -// ReplacePlaceholders takes a SQL statement and replaces each question mark -// placeholder with a (possibly different) SQL placeholder. -type PlaceholderFormat interface { - ReplacePlaceholders(sql string) (string, error) -} - -var ( - // Question is a PlaceholderFormat instance that leaves placeholders as - // question marks. - Question = questionFormat{} - - // Dollar is a PlaceholderFormat instance that replaces placeholders with - // dollar-prefixed positional placeholders (e.g. $1, $2, $3). - Dollar = dollarFormat{} -) - -type questionFormat struct{} - -func (_ questionFormat) ReplacePlaceholders(sql string) (string, error) { - return sql, nil -} - -type dollarFormat struct{} - -func (_ dollarFormat) ReplacePlaceholders(sql string) (string, error) { - buf := &bytes.Buffer{} - i := 0 - for { - p := strings.Index(sql, "?") - if p == -1 { - break - } - - if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? - buf.WriteString(sql[:p]) - buf.WriteString("?") - if len(sql[p:]) == 1 { - break - } - sql = sql[p+2:] - } else { - i++ - buf.WriteString(sql[:p]) - fmt.Fprintf(buf, "$%d", i) - sql = sql[p+1:] - } - } - - buf.WriteString(sql) - return buf.String(), nil -} - -// Placeholders returns a string with count ? placeholders joined with commas. -func Placeholders(count int) string { - if count < 1 { - return "" - } - - return strings.Repeat(",?", count)[1:] -} diff --git a/vendor/github.com/Masterminds/squirrel/row.go b/vendor/github.com/Masterminds/squirrel/row.go deleted file mode 100644 index 74ffda92b..000000000 --- a/vendor/github.com/Masterminds/squirrel/row.go +++ /dev/null @@ -1,22 +0,0 @@ -package squirrel - -// RowScanner is the interface that wraps the Scan method. -// -// Scan behaves like database/sql.Row.Scan. -type RowScanner interface { - Scan(...interface{}) error -} - -// Row wraps database/sql.Row to let squirrel return new errors on Scan. -type Row struct { - RowScanner - err error -} - -// Scan returns Row.err or calls RowScanner.Scan. -func (r *Row) Scan(dest ...interface{}) error { - if r.err != nil { - return r.err - } - return r.RowScanner.Scan(dest...) -} diff --git a/vendor/github.com/Masterminds/squirrel/select.go b/vendor/github.com/Masterminds/squirrel/select.go deleted file mode 100644 index 7dc09bc5a..000000000 --- a/vendor/github.com/Masterminds/squirrel/select.go +++ /dev/null @@ -1,313 +0,0 @@ -package squirrel - -import ( - "bytes" - "database/sql" - "fmt" - "strings" - - "github.com/lann/builder" -) - -type selectData struct { - PlaceholderFormat PlaceholderFormat - RunWith BaseRunner - Prefixes exprs - Options []string - Columns []Sqlizer - From Sqlizer - Joins []Sqlizer - WhereParts []Sqlizer - GroupBys []string - HavingParts []Sqlizer - OrderBys []string - Limit string - Offset string - Suffixes exprs -} - -func (d *selectData) Exec() (sql.Result, error) { - if d.RunWith == nil { - return nil, RunnerNotSet - } - return ExecWith(d.RunWith, d) -} - -func (d *selectData) Query() (*sql.Rows, error) { - if d.RunWith == nil { - return nil, RunnerNotSet - } - return QueryWith(d.RunWith, d) -} - -func (d *selectData) QueryRow() RowScanner { - if d.RunWith == nil { - return &Row{err: RunnerNotSet} - } - queryRower, ok := d.RunWith.(QueryRower) - if !ok { - return &Row{err: RunnerNotQueryRunner} - } - return QueryRowWith(queryRower, d) -} - -func (d *selectData) ToSql() (sqlStr string, args []interface{}, err error) { - if len(d.Columns) == 0 { - err = fmt.Errorf("select statements must have at least one result column") - return - } - - sql := &bytes.Buffer{} - - if len(d.Prefixes) > 0 { - args, _ = d.Prefixes.AppendToSql(sql, " ", args) - sql.WriteString(" ") - } - - sql.WriteString("SELECT ") - - if len(d.Options) > 0 { - sql.WriteString(strings.Join(d.Options, " ")) - sql.WriteString(" ") - } - - if len(d.Columns) > 0 { - args, err = appendToSql(d.Columns, sql, ", ", args) - if err != nil { - return - } - } - - if d.From != nil { - sql.WriteString(" FROM ") - args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) - if err != nil { - return - } - } - - if len(d.Joins) > 0 { - sql.WriteString(" ") - args, err = appendToSql(d.Joins, sql, " ", args) - if err != nil { - return - } - } - - if len(d.WhereParts) > 0 { - sql.WriteString(" WHERE ") - args, err = appendToSql(d.WhereParts, sql, " AND ", args) - if err != nil { - return - } - } - - if len(d.GroupBys) > 0 { - sql.WriteString(" GROUP BY ") - sql.WriteString(strings.Join(d.GroupBys, ", ")) - } - - if len(d.HavingParts) > 0 { - sql.WriteString(" HAVING ") - args, err = appendToSql(d.HavingParts, sql, " AND ", args) - if err != nil { - return - } - } - - if len(d.OrderBys) > 0 { - sql.WriteString(" ORDER BY ") - sql.WriteString(strings.Join(d.OrderBys, ", ")) - } - - if len(d.Limit) > 0 { - sql.WriteString(" LIMIT ") - sql.WriteString(d.Limit) - } - - if len(d.Offset) > 0 { - sql.WriteString(" OFFSET ") - sql.WriteString(d.Offset) - } - - if len(d.Suffixes) > 0 { - sql.WriteString(" ") - args, _ = d.Suffixes.AppendToSql(sql, " ", args) - } - - sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) - return -} - -// Builder - -// SelectBuilder builds SQL SELECT statements. -type SelectBuilder builder.Builder - -func init() { - builder.Register(SelectBuilder{}, selectData{}) -} - -// Format methods - -// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the -// query. -func (b SelectBuilder) PlaceholderFormat(f PlaceholderFormat) SelectBuilder { - return builder.Set(b, "PlaceholderFormat", f).(SelectBuilder) -} - -// Runner methods - -// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. -func (b SelectBuilder) RunWith(runner BaseRunner) SelectBuilder { - return setRunWith(b, runner).(SelectBuilder) -} - -// Exec builds and Execs the query with the Runner set by RunWith. -func (b SelectBuilder) Exec() (sql.Result, error) { - data := builder.GetStruct(b).(selectData) - return data.Exec() -} - -// Query builds and Querys the query with the Runner set by RunWith. -func (b SelectBuilder) Query() (*sql.Rows, error) { - data := builder.GetStruct(b).(selectData) - return data.Query() -} - -// QueryRow builds and QueryRows the query with the Runner set by RunWith. -func (b SelectBuilder) QueryRow() RowScanner { - data := builder.GetStruct(b).(selectData) - return data.QueryRow() -} - -// Scan is a shortcut for QueryRow().Scan. -func (b SelectBuilder) Scan(dest ...interface{}) error { - return b.QueryRow().Scan(dest...) -} - -// SQL methods - -// ToSql builds the query into a SQL string and bound args. -func (b SelectBuilder) ToSql() (string, []interface{}, error) { - data := builder.GetStruct(b).(selectData) - return data.ToSql() -} - -// Prefix adds an expression to the beginning of the query -func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder { - return builder.Append(b, "Prefixes", Expr(sql, args...)).(SelectBuilder) -} - -// Distinct adds a DISTINCT clause to the query. -func (b SelectBuilder) Distinct() SelectBuilder { - return b.Options("DISTINCT") -} - -// Options adds select option to the query -func (b SelectBuilder) Options(options ...string) SelectBuilder { - return builder.Extend(b, "Options", options).(SelectBuilder) -} - -// Columns adds result columns to the query. -func (b SelectBuilder) Columns(columns ...string) SelectBuilder { - var parts []interface{} - for _, str := range columns { - parts = append(parts, newPart(str)) - } - return builder.Extend(b, "Columns", parts).(SelectBuilder) -} - -// Column adds a result column to the query. -// Unlike Columns, Column accepts args which will be bound to placeholders in -// the columns string, for example: -// Column("IF(col IN ("+squirrel.Placeholders(3)+"), 1, 0) as col", 1, 2, 3) -func (b SelectBuilder) Column(column interface{}, args ...interface{}) SelectBuilder { - return builder.Append(b, "Columns", newPart(column, args...)).(SelectBuilder) -} - -// From sets the FROM clause of the query. -func (b SelectBuilder) From(from string) SelectBuilder { - return builder.Set(b, "From", newPart(from)).(SelectBuilder) -} - -// FromSelect sets a subquery into the FROM clause of the query. -func (b SelectBuilder) FromSelect(from SelectBuilder, alias string) SelectBuilder { - return builder.Set(b, "From", Alias(from, alias)).(SelectBuilder) -} - -// JoinClause adds a join clause to the query. -func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder { - return builder.Append(b, "Joins", newPart(pred, args...)).(SelectBuilder) -} - -// Join adds a JOIN clause to the query. -func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder { - return b.JoinClause("JOIN "+join, rest...) -} - -// LeftJoin adds a LEFT JOIN clause to the query. -func (b SelectBuilder) LeftJoin(join string, rest ...interface{}) SelectBuilder { - return b.JoinClause("LEFT JOIN "+join, rest...) -} - -// RightJoin adds a RIGHT JOIN clause to the query. -func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder { - return b.JoinClause("RIGHT JOIN "+join, rest...) -} - -// Where adds an expression to the WHERE clause of the query. -// -// Expressions are ANDed together in the generated SQL. -// -// Where accepts several types for its pred argument: -// -// nil OR "" - ignored. -// -// string - SQL expression. -// If the expression has SQL placeholders then a set of arguments must be passed -// as well, one for each placeholder. -// -// map[string]interface{} OR Eq - map of SQL expressions to values. Each key is -// transformed into an expression like " = ?", with the corresponding value -// bound to the placeholder. If the value is nil, the expression will be " -// IS NULL". If the value is an array or slice, the expression will be " IN -// (?,?,...)", with one placeholder for each item in the value. These expressions -// are ANDed together. -// -// Where will panic if pred isn't any of the above types. -func (b SelectBuilder) Where(pred interface{}, args ...interface{}) SelectBuilder { - return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(SelectBuilder) -} - -// GroupBy adds GROUP BY expressions to the query. -func (b SelectBuilder) GroupBy(groupBys ...string) SelectBuilder { - return builder.Extend(b, "GroupBys", groupBys).(SelectBuilder) -} - -// Having adds an expression to the HAVING clause of the query. -// -// See Where. -func (b SelectBuilder) Having(pred interface{}, rest ...interface{}) SelectBuilder { - return builder.Append(b, "HavingParts", newWherePart(pred, rest...)).(SelectBuilder) -} - -// OrderBy adds ORDER BY expressions to the query. -func (b SelectBuilder) OrderBy(orderBys ...string) SelectBuilder { - return builder.Extend(b, "OrderBys", orderBys).(SelectBuilder) -} - -// Limit sets a LIMIT clause on the query. -func (b SelectBuilder) Limit(limit uint64) SelectBuilder { - return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(SelectBuilder) -} - -// Offset sets a OFFSET clause on the query. -func (b SelectBuilder) Offset(offset uint64) SelectBuilder { - return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(SelectBuilder) -} - -// Suffix adds an expression to the end of the query -func (b SelectBuilder) Suffix(sql string, args ...interface{}) SelectBuilder { - return builder.Append(b, "Suffixes", Expr(sql, args...)).(SelectBuilder) -} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel.go b/vendor/github.com/Masterminds/squirrel/squirrel.go deleted file mode 100644 index 89aaf3dcf..000000000 --- a/vendor/github.com/Masterminds/squirrel/squirrel.go +++ /dev/null @@ -1,166 +0,0 @@ -// Package squirrel provides a fluent SQL generator. -// -// See https://github.com/lann/squirrel for examples. -package squirrel - -import ( - "bytes" - "database/sql" - "fmt" - "strings" - - "github.com/lann/builder" -) - -// Sqlizer is the interface that wraps the ToSql method. -// -// ToSql returns a SQL representation of the Sqlizer, along with a slice of args -// as passed to e.g. database/sql.Exec. It can also return an error. -type Sqlizer interface { - ToSql() (string, []interface{}, error) -} - -// Execer is the interface that wraps the Exec method. -// -// Exec executes the given query as implemented by database/sql.Exec. -type Execer interface { - Exec(query string, args ...interface{}) (sql.Result, error) -} - -// Queryer is the interface that wraps the Query method. -// -// Query executes the given query as implemented by database/sql.Query. -type Queryer interface { - Query(query string, args ...interface{}) (*sql.Rows, error) -} - -// QueryRower is the interface that wraps the QueryRow method. -// -// QueryRow executes the given query as implemented by database/sql.QueryRow. -type QueryRower interface { - QueryRow(query string, args ...interface{}) RowScanner -} - -// BaseRunner groups the Execer and Queryer interfaces. -type BaseRunner interface { - Execer - Queryer -} - -// Runner groups the Execer, Queryer, and QueryRower interfaces. -type Runner interface { - Execer - Queryer - QueryRower -} - -// DBRunner wraps sql.DB to implement Runner. -type dbRunner struct { - *sql.DB -} - -func (r *dbRunner) QueryRow(query string, args ...interface{}) RowScanner { - return r.DB.QueryRow(query, args...) -} - -type txRunner struct { - *sql.Tx -} - -func (r *txRunner) QueryRow(query string, args ...interface{}) RowScanner { - return r.Tx.QueryRow(query, args...) -} - -func setRunWith(b interface{}, baseRunner BaseRunner) interface{} { - var runner Runner - switch r := baseRunner.(type) { - case Runner: - runner = r - case *sql.DB: - runner = &dbRunner{r} - case *sql.Tx: - runner = &txRunner{r} - } - return builder.Set(b, "RunWith", runner) -} - -// RunnerNotSet is returned by methods that need a Runner if it isn't set. -var RunnerNotSet = fmt.Errorf("cannot run; no Runner set (RunWith)") - -// RunnerNotQueryRunner is returned by QueryRow if the RunWith value doesn't implement QueryRower. -var RunnerNotQueryRunner = fmt.Errorf("cannot QueryRow; Runner is not a QueryRower") - -// ExecWith Execs the SQL returned by s with db. -func ExecWith(db Execer, s Sqlizer) (res sql.Result, err error) { - query, args, err := s.ToSql() - if err != nil { - return - } - return db.Exec(query, args...) -} - -// QueryWith Querys the SQL returned by s with db. -func QueryWith(db Queryer, s Sqlizer) (rows *sql.Rows, err error) { - query, args, err := s.ToSql() - if err != nil { - return - } - return db.Query(query, args...) -} - -// QueryRowWith QueryRows the SQL returned by s with db. -func QueryRowWith(db QueryRower, s Sqlizer) RowScanner { - query, args, err := s.ToSql() - return &Row{RowScanner: db.QueryRow(query, args...), err: err} -} - -// DebugSqlizer calls ToSql on s and shows the approximate SQL to be executed -// -// If ToSql returns an error, the result of this method will look like: -// "[ToSql error: %s]" or "[DebugSqlizer error: %s]" -// -// IMPORTANT: As its name suggests, this function should only be used for -// debugging. While the string result *might* be valid SQL, this function does -// not try very hard to ensure it. Additionally, executing the output of this -// function with any untrusted user input is certainly insecure. -func DebugSqlizer(s Sqlizer) string { - sql, args, err := s.ToSql() - if err != nil { - return fmt.Sprintf("[ToSql error: %s]", err) - } - - // TODO: dedupe this with placeholder.go - buf := &bytes.Buffer{} - i := 0 - for { - p := strings.Index(sql, "?") - if p == -1 { - break - } - if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? - buf.WriteString(sql[:p]) - buf.WriteString("?") - if len(sql[p:]) == 1 { - break - } - sql = sql[p+2:] - } else { - if i+1 > len(args) { - return fmt.Sprintf( - "[DebugSqlizer error: too many placeholders in %#v for %d args]", - sql, len(args)) - } - buf.WriteString(sql[:p]) - fmt.Fprintf(buf, "'%v'", args[i]) - sql = sql[p+1:] - i++ - } - } - if i < len(args) { - return fmt.Sprintf( - "[DebugSqlizer error: not enough placeholders in %#v for %d args]", - sql, len(args)) - } - buf.WriteString(sql) - return buf.String() -} diff --git a/vendor/github.com/Masterminds/squirrel/statement.go b/vendor/github.com/Masterminds/squirrel/statement.go deleted file mode 100644 index 275388f63..000000000 --- a/vendor/github.com/Masterminds/squirrel/statement.go +++ /dev/null @@ -1,83 +0,0 @@ -package squirrel - -import "github.com/lann/builder" - -// StatementBuilderType is the type of StatementBuilder. -type StatementBuilderType builder.Builder - -// Select returns a SelectBuilder for this StatementBuilderType. -func (b StatementBuilderType) Select(columns ...string) SelectBuilder { - return SelectBuilder(b).Columns(columns...) -} - -// Insert returns a InsertBuilder for this StatementBuilderType. -func (b StatementBuilderType) Insert(into string) InsertBuilder { - return InsertBuilder(b).Into(into) -} - -// Update returns a UpdateBuilder for this StatementBuilderType. -func (b StatementBuilderType) Update(table string) UpdateBuilder { - return UpdateBuilder(b).Table(table) -} - -// Delete returns a DeleteBuilder for this StatementBuilderType. -func (b StatementBuilderType) Delete(from string) DeleteBuilder { - return DeleteBuilder(b).From(from) -} - -// PlaceholderFormat sets the PlaceholderFormat field for any child builders. -func (b StatementBuilderType) PlaceholderFormat(f PlaceholderFormat) StatementBuilderType { - return builder.Set(b, "PlaceholderFormat", f).(StatementBuilderType) -} - -// RunWith sets the RunWith field for any child builders. -func (b StatementBuilderType) RunWith(runner BaseRunner) StatementBuilderType { - return setRunWith(b, runner).(StatementBuilderType) -} - -// StatementBuilder is a parent builder for other builders, e.g. SelectBuilder. -var StatementBuilder = StatementBuilderType(builder.EmptyBuilder).PlaceholderFormat(Question) - -// Select returns a new SelectBuilder, optionally setting some result columns. -// -// See SelectBuilder.Columns. -func Select(columns ...string) SelectBuilder { - return StatementBuilder.Select(columns...) -} - -// Insert returns a new InsertBuilder with the given table name. -// -// See InsertBuilder.Into. -func Insert(into string) InsertBuilder { - return StatementBuilder.Insert(into) -} - -// Update returns a new UpdateBuilder with the given table name. -// -// See UpdateBuilder.Table. -func Update(table string) UpdateBuilder { - return StatementBuilder.Update(table) -} - -// Delete returns a new DeleteBuilder with the given table name. -// -// See DeleteBuilder.Table. -func Delete(from string) DeleteBuilder { - return StatementBuilder.Delete(from) -} - -// Case returns a new CaseBuilder -// "what" represents case value -func Case(what ...interface{}) CaseBuilder { - b := CaseBuilder(builder.EmptyBuilder) - - switch len(what) { - case 0: - case 1: - b = b.what(what[0]) - default: - b = b.what(newPart(what[0], what[1:]...)) - - } - return b -} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher.go b/vendor/github.com/Masterminds/squirrel/stmtcacher.go deleted file mode 100644 index c2dc22088..000000000 --- a/vendor/github.com/Masterminds/squirrel/stmtcacher.go +++ /dev/null @@ -1,90 +0,0 @@ -package squirrel - -import ( - "database/sql" - "sync" -) - -// Prepareer is the interface that wraps the Prepare method. -// -// Prepare executes the given query as implemented by database/sql.Prepare. -type Preparer interface { - Prepare(query string) (*sql.Stmt, error) -} - -// DBProxy groups the Execer, Queryer, QueryRower, and Preparer interfaces. -type DBProxy interface { - Execer - Queryer - QueryRower - Preparer -} - -type stmtCacher struct { - prep Preparer - cache map[string]*sql.Stmt - mu sync.Mutex -} - -// NewStmtCacher returns a DBProxy wrapping prep that caches Prepared Stmts. -// -// Stmts are cached based on the string value of their queries. -func NewStmtCacher(prep Preparer) DBProxy { - return &stmtCacher{prep: prep, cache: make(map[string]*sql.Stmt)} -} - -func (sc *stmtCacher) Prepare(query string) (*sql.Stmt, error) { - sc.mu.Lock() - defer sc.mu.Unlock() - stmt, ok := sc.cache[query] - if ok { - return stmt, nil - } - stmt, err := sc.prep.Prepare(query) - if err == nil { - sc.cache[query] = stmt - } - return stmt, err -} - -func (sc *stmtCacher) Exec(query string, args ...interface{}) (res sql.Result, err error) { - stmt, err := sc.Prepare(query) - if err != nil { - return - } - return stmt.Exec(args...) -} - -func (sc *stmtCacher) Query(query string, args ...interface{}) (rows *sql.Rows, err error) { - stmt, err := sc.Prepare(query) - if err != nil { - return - } - return stmt.Query(args...) -} - -func (sc *stmtCacher) QueryRow(query string, args ...interface{}) RowScanner { - stmt, err := sc.Prepare(query) - if err != nil { - return &Row{err: err} - } - return stmt.QueryRow(args...) -} - -type DBProxyBeginner interface { - DBProxy - Begin() (*sql.Tx, error) -} - -type stmtCacheProxy struct { - DBProxy - db *sql.DB -} - -func NewStmtCacheProxy(db *sql.DB) DBProxyBeginner { - return &stmtCacheProxy{DBProxy: NewStmtCacher(db), db: db} -} - -func (sp *stmtCacheProxy) Begin() (*sql.Tx, error) { - return sp.db.Begin() -} diff --git a/vendor/github.com/Masterminds/squirrel/update.go b/vendor/github.com/Masterminds/squirrel/update.go deleted file mode 100644 index 682906bc0..000000000 --- a/vendor/github.com/Masterminds/squirrel/update.go +++ /dev/null @@ -1,232 +0,0 @@ -package squirrel - -import ( - "bytes" - "database/sql" - "fmt" - "sort" - "strings" - - "github.com/lann/builder" -) - -type updateData struct { - PlaceholderFormat PlaceholderFormat - RunWith BaseRunner - Prefixes exprs - Table string - SetClauses []setClause - WhereParts []Sqlizer - OrderBys []string - Limit string - Offset string - Suffixes exprs -} - -type setClause struct { - column string - value interface{} -} - -func (d *updateData) Exec() (sql.Result, error) { - if d.RunWith == nil { - return nil, RunnerNotSet - } - return ExecWith(d.RunWith, d) -} - -func (d *updateData) Query() (*sql.Rows, error) { - if d.RunWith == nil { - return nil, RunnerNotSet - } - return QueryWith(d.RunWith, d) -} - -func (d *updateData) QueryRow() RowScanner { - if d.RunWith == nil { - return &Row{err: RunnerNotSet} - } - queryRower, ok := d.RunWith.(QueryRower) - if !ok { - return &Row{err: RunnerNotQueryRunner} - } - return QueryRowWith(queryRower, d) -} - -func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) { - if len(d.Table) == 0 { - err = fmt.Errorf("update statements must specify a table") - return - } - if len(d.SetClauses) == 0 { - err = fmt.Errorf("update statements must have at least one Set clause") - return - } - - sql := &bytes.Buffer{} - - if len(d.Prefixes) > 0 { - args, _ = d.Prefixes.AppendToSql(sql, " ", args) - sql.WriteString(" ") - } - - sql.WriteString("UPDATE ") - sql.WriteString(d.Table) - - sql.WriteString(" SET ") - setSqls := make([]string, len(d.SetClauses)) - for i, setClause := range d.SetClauses { - var valSql string - e, isExpr := setClause.value.(expr) - if isExpr { - valSql = e.sql - args = append(args, e.args...) - } else { - valSql = "?" - args = append(args, setClause.value) - } - setSqls[i] = fmt.Sprintf("%s = %s", setClause.column, valSql) - } - sql.WriteString(strings.Join(setSqls, ", ")) - - if len(d.WhereParts) > 0 { - sql.WriteString(" WHERE ") - args, err = appendToSql(d.WhereParts, sql, " AND ", args) - if err != nil { - return - } - } - - if len(d.OrderBys) > 0 { - sql.WriteString(" ORDER BY ") - sql.WriteString(strings.Join(d.OrderBys, ", ")) - } - - if len(d.Limit) > 0 { - sql.WriteString(" LIMIT ") - sql.WriteString(d.Limit) - } - - if len(d.Offset) > 0 { - sql.WriteString(" OFFSET ") - sql.WriteString(d.Offset) - } - - if len(d.Suffixes) > 0 { - sql.WriteString(" ") - args, _ = d.Suffixes.AppendToSql(sql, " ", args) - } - - sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) - return -} - -// Builder - -// UpdateBuilder builds SQL UPDATE statements. -type UpdateBuilder builder.Builder - -func init() { - builder.Register(UpdateBuilder{}, updateData{}) -} - -// Format methods - -// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the -// query. -func (b UpdateBuilder) PlaceholderFormat(f PlaceholderFormat) UpdateBuilder { - return builder.Set(b, "PlaceholderFormat", f).(UpdateBuilder) -} - -// Runner methods - -// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. -func (b UpdateBuilder) RunWith(runner BaseRunner) UpdateBuilder { - return setRunWith(b, runner).(UpdateBuilder) -} - -// Exec builds and Execs the query with the Runner set by RunWith. -func (b UpdateBuilder) Exec() (sql.Result, error) { - data := builder.GetStruct(b).(updateData) - return data.Exec() -} - -func (b UpdateBuilder) Query() (*sql.Rows, error) { - data := builder.GetStruct(b).(updateData) - return data.Query() -} - -func (b UpdateBuilder) QueryRow() RowScanner { - data := builder.GetStruct(b).(updateData) - return data.QueryRow() -} - -func (b UpdateBuilder) Scan(dest ...interface{}) error { - return b.QueryRow().Scan(dest...) -} - -// SQL methods - -// ToSql builds the query into a SQL string and bound args. -func (b UpdateBuilder) ToSql() (string, []interface{}, error) { - data := builder.GetStruct(b).(updateData) - return data.ToSql() -} - -// Prefix adds an expression to the beginning of the query -func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateBuilder { - return builder.Append(b, "Prefixes", Expr(sql, args...)).(UpdateBuilder) -} - -// Table sets the table to be updated. -func (b UpdateBuilder) Table(table string) UpdateBuilder { - return builder.Set(b, "Table", table).(UpdateBuilder) -} - -// Set adds SET clauses to the query. -func (b UpdateBuilder) Set(column string, value interface{}) UpdateBuilder { - return builder.Append(b, "SetClauses", setClause{column: column, value: value}).(UpdateBuilder) -} - -// SetMap is a convenience method which calls .Set for each key/value pair in clauses. -func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder { - keys := make([]string, len(clauses)) - i := 0 - for key := range clauses { - keys[i] = key - i++ - } - sort.Strings(keys) - for _, key := range keys { - val, _ := clauses[key] - b = b.Set(key, val) - } - return b -} - -// Where adds WHERE expressions to the query. -// -// See SelectBuilder.Where for more information. -func (b UpdateBuilder) Where(pred interface{}, args ...interface{}) UpdateBuilder { - return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(UpdateBuilder) -} - -// OrderBy adds ORDER BY expressions to the query. -func (b UpdateBuilder) OrderBy(orderBys ...string) UpdateBuilder { - return builder.Extend(b, "OrderBys", orderBys).(UpdateBuilder) -} - -// Limit sets a LIMIT clause on the query. -func (b UpdateBuilder) Limit(limit uint64) UpdateBuilder { - return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(UpdateBuilder) -} - -// Offset sets a OFFSET clause on the query. -func (b UpdateBuilder) Offset(offset uint64) UpdateBuilder { - return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(UpdateBuilder) -} - -// Suffix adds an expression to the end of the query -func (b UpdateBuilder) Suffix(sql string, args ...interface{}) UpdateBuilder { - return builder.Append(b, "Suffixes", Expr(sql, args...)).(UpdateBuilder) -} diff --git a/vendor/github.com/Masterminds/squirrel/where.go b/vendor/github.com/Masterminds/squirrel/where.go deleted file mode 100644 index 3a2d7b709..000000000 --- a/vendor/github.com/Masterminds/squirrel/where.go +++ /dev/null @@ -1,28 +0,0 @@ -package squirrel - -import ( - "fmt" -) - -type wherePart part - -func newWherePart(pred interface{}, args ...interface{}) Sqlizer { - return &wherePart{pred: pred, args: args} -} - -func (p wherePart) ToSql() (sql string, args []interface{}, err error) { - switch pred := p.pred.(type) { - case nil: - // no-op - case Sqlizer: - return pred.ToSql() - case map[string]interface{}: - return Eq(pred).ToSql() - case string: - sql = pred - args = p.args - default: - err = fmt.Errorf("expected string-keyed map or string, not %T", pred) - } - return -} diff --git a/vendor/github.com/NYTimes/gziphandler/.gitignore b/vendor/github.com/NYTimes/gziphandler/.gitignore deleted file mode 100644 index 1377554eb..000000000 --- a/vendor/github.com/NYTimes/gziphandler/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.swp diff --git a/vendor/github.com/NYTimes/gziphandler/.travis.yml b/vendor/github.com/NYTimes/gziphandler/.travis.yml deleted file mode 100644 index 94dfae362..000000000 --- a/vendor/github.com/NYTimes/gziphandler/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.x - - tip -env: - - GO111MODULE=on -install: - - go mod download -script: - - go test -race -v diff --git a/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md deleted file mode 100644 index cdbca194c..000000000 --- a/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -layout: code-of-conduct -version: v1.0 ---- - -This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community. - -Our open source community strives to: - -* **Be friendly and patient.** -* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability. -* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language. -* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one. -* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable. -* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes. - -## Definitions - -Harassment includes, but is not limited to: - -- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation -- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment -- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle -- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop -- Threats of violence, both physical and psychological -- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm -- Deliberate intimidation -- Stalking or following -- Harassing photography or recording, including logging online activity for harassment purposes -- Sustained disruption of discussion -- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour -- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others -- Continued one-on-one communication after requests to cease -- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse -- Publication of non-harassing private communication - -Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding: - -- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’ -- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you” -- Refusal to explain or debate social justice concepts -- Communicating in a ‘tone’ you don’t find congenial -- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions - - -### Diversity Statement - -We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong. - -Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected -characteristics above, including participants with disabilities. - -### Reporting Issues - -If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include: - -- Your contact information. -- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please -include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link. -- Any additional information that may be helpful. - -After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse. - -### Attribution & Acknowledgements - -We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration: - -* [Django](https://www.djangoproject.com/conduct/reporting/) -* [Python](https://www.python.org/community/diversity/) -* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct) -* [Contributor Covenant](http://contributor-covenant.org/) -* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/) -* [Citizen Code of Conduct](http://citizencodeofconduct.org/) - -This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct diff --git a/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md deleted file mode 100644 index b89a9eb4f..000000000 --- a/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md +++ /dev/null @@ -1,30 +0,0 @@ -# Contributing to NYTimes/gziphandler - -This is an open source project started by handful of developers at The New York Times and open to the entire Go community. - -We really appreciate your help! - -## Filing issues - -When filing an issue, make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -## Contributing code - -Before submitting changes, please follow these guidelines: - -1. Check the open issues and pull requests for existing discussions. -2. Open an issue to discuss a new feature. -3. Write tests. -4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments). -5. Make sure your changes pass `go test`. -6. Make sure the entire test suite passes locally and on Travis CI. -7. Open a Pull Request. -8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). - -Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file. diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE b/vendor/github.com/NYTimes/gziphandler/LICENSE deleted file mode 100644 index df6192d36..000000000 --- a/vendor/github.com/NYTimes/gziphandler/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md deleted file mode 100644 index 6259acaca..000000000 --- a/vendor/github.com/NYTimes/gziphandler/README.md +++ /dev/null @@ -1,56 +0,0 @@ -Gzip Handler -============ - -This is a tiny Go package which wraps HTTP handlers to transparently gzip the -response body, for clients which support it. Although it's usually simpler to -leave that to a reverse proxy (like nginx or Varnish), this package is useful -when that's undesirable. - -## Install -```bash -go get -u github.com/NYTimes/gziphandler -``` - -## Usage - -Call `GzipHandler` with any handler (an object which implements the -`http.Handler` interface), and it'll return a new handler which gzips the -response. For example: - -```go -package main - -import ( - "io" - "net/http" - "github.com/NYTimes/gziphandler" -) - -func main() { - withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "Hello, World") - }) - - withGz := gziphandler.GzipHandler(withoutGz) - - http.Handle("/", withGz) - http.ListenAndServe("0.0.0.0:8000", nil) -} -``` - - -## Documentation - -The docs can be found at [godoc.org][docs], as usual. - - -## License - -[Apache 2.0][license]. - - - - -[docs]: https://godoc.org/github.com/NYTimes/gziphandler -[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go deleted file mode 100644 index c112bbdf8..000000000 --- a/vendor/github.com/NYTimes/gziphandler/gzip.go +++ /dev/null @@ -1,532 +0,0 @@ -package gziphandler // import "github.com/NYTimes/gziphandler" - -import ( - "bufio" - "compress/gzip" - "fmt" - "io" - "mime" - "net" - "net/http" - "strconv" - "strings" - "sync" -) - -const ( - vary = "Vary" - acceptEncoding = "Accept-Encoding" - contentEncoding = "Content-Encoding" - contentType = "Content-Type" - contentLength = "Content-Length" -) - -type codings map[string]float64 - -const ( - // DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set. - // This is actually kind of ambiguous in RFC 2616, so hopefully it's correct. - // The examples seem to indicate that it is. - DefaultQValue = 1.0 - - // DefaultMinSize is the default minimum size until we enable gzip compression. - // 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer. - // If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing. - // That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value. - DefaultMinSize = 1400 -) - -// gzipWriterPools stores a sync.Pool for each compression level for reuse of -// gzip.Writers. Use poolIndex to covert a compression level to an index into -// gzipWriterPools. -var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool - -func init() { - for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ { - addLevelPool(i) - } - addLevelPool(gzip.DefaultCompression) -} - -// poolIndex maps a compression level to its index into gzipWriterPools. It -// assumes that level is a valid gzip compression level. -func poolIndex(level int) int { - // gzip.DefaultCompression == -1, so we need to treat it special. - if level == gzip.DefaultCompression { - return gzip.BestCompression - gzip.BestSpeed + 1 - } - return level - gzip.BestSpeed -} - -func addLevelPool(level int) { - gzipWriterPools[poolIndex(level)] = &sync.Pool{ - New: func() interface{} { - // NewWriterLevel only returns error on a bad level, we are guaranteeing - // that this will be a valid level so it is okay to ignore the returned - // error. - w, _ := gzip.NewWriterLevel(nil, level) - return w - }, - } -} - -// GzipResponseWriter provides an http.ResponseWriter interface, which gzips -// bytes before writing them to the underlying response. This doesn't close the -// writers, so don't forget to do that. -// It can be configured to skip response smaller than minSize. -type GzipResponseWriter struct { - http.ResponseWriter - index int // Index for gzipWriterPools. - gw *gzip.Writer - - code int // Saves the WriteHeader value. - - minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed. - buf []byte // Holds the first part of the write before reaching the minSize or the end of the write. - ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter. - - contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty. -} - -type GzipResponseWriterWithCloseNotify struct { - *GzipResponseWriter -} - -func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool { - return w.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Write appends data to the gzip writer. -func (w *GzipResponseWriter) Write(b []byte) (int, error) { - // GZIP responseWriter is initialized. Use the GZIP responseWriter. - if w.gw != nil { - return w.gw.Write(b) - } - - // If we have already decided not to use GZIP, immediately passthrough. - if w.ignore { - return w.ResponseWriter.Write(b) - } - - // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter. - // On the first write, w.buf changes from nil to a valid slice - w.buf = append(w.buf, b...) - - var ( - cl, _ = strconv.Atoi(w.Header().Get(contentLength)) - ct = w.Header().Get(contentType) - ce = w.Header().Get(contentEncoding) - ) - // Only continue if they didn't already choose an encoding or a known unhandled content length or type. - if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) { - // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data. - if len(w.buf) < w.minSize && cl == 0 { - return len(b), nil - } - // If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue. - if cl >= w.minSize || len(w.buf) >= w.minSize { - // If a Content-Type wasn't specified, infer it from the current buffer. - if ct == "" { - ct = http.DetectContentType(w.buf) - w.Header().Set(contentType, ct) - } - // If the Content-Type is acceptable to GZIP, initialize the GZIP writer. - if handleContentType(w.contentTypes, ct) { - if err := w.startGzip(); err != nil { - return 0, err - } - return len(b), nil - } - } - } - // If we got here, we should not GZIP this response. - if err := w.startPlain(); err != nil { - return 0, err - } - return len(b), nil -} - -// startGzip initializes a GZIP writer and writes the buffer. -func (w *GzipResponseWriter) startGzip() error { - // Set the GZIP header. - w.Header().Set(contentEncoding, "gzip") - - // if the Content-Length is already set, then calls to Write on gzip - // will fail to set the Content-Length header since its already set - // See: https://github.com/golang/go/issues/14975. - w.Header().Del(contentLength) - - // Write the header to gzip response. - if w.code != 0 { - w.ResponseWriter.WriteHeader(w.code) - // Ensure that no other WriteHeader's happen - w.code = 0 - } - - // Initialize and flush the buffer into the gzip response if there are any bytes. - // If there aren't any, we shouldn't initialize it yet because on Close it will - // write the gzip header even if nothing was ever written. - if len(w.buf) > 0 { - // Initialize the GZIP response. - w.init() - n, err := w.gw.Write(w.buf) - - // This should never happen (per io.Writer docs), but if the write didn't - // accept the entire buffer but returned no specific error, we have no clue - // what's going on, so abort just to be safe. - if err == nil && n < len(w.buf) { - err = io.ErrShortWrite - } - return err - } - return nil -} - -// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip. -func (w *GzipResponseWriter) startPlain() error { - if w.code != 0 { - w.ResponseWriter.WriteHeader(w.code) - // Ensure that no other WriteHeader's happen - w.code = 0 - } - w.ignore = true - // If Write was never called then don't call Write on the underlying ResponseWriter. - if w.buf == nil { - return nil - } - n, err := w.ResponseWriter.Write(w.buf) - w.buf = nil - // This should never happen (per io.Writer docs), but if the write didn't - // accept the entire buffer but returned no specific error, we have no clue - // what's going on, so abort just to be safe. - if err == nil && n < len(w.buf) { - err = io.ErrShortWrite - } - return err -} - -// WriteHeader just saves the response code until close or GZIP effective writes. -func (w *GzipResponseWriter) WriteHeader(code int) { - if w.code == 0 { - w.code = code - } -} - -// init graps a new gzip writer from the gzipWriterPool and writes the correct -// content encoding header. -func (w *GzipResponseWriter) init() { - // Bytes written during ServeHTTP are redirected to this gzip writer - // before being written to the underlying response. - gzw := gzipWriterPools[w.index].Get().(*gzip.Writer) - gzw.Reset(w.ResponseWriter) - w.gw = gzw -} - -// Close will close the gzip.Writer and will put it back in the gzipWriterPool. -func (w *GzipResponseWriter) Close() error { - if w.ignore { - return nil - } - - if w.gw == nil { - // GZIP not triggered yet, write out regular response. - err := w.startPlain() - // Returns the error if any at write. - if err != nil { - err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error()) - } - return err - } - - err := w.gw.Close() - gzipWriterPools[w.index].Put(w.gw) - w.gw = nil - return err -} - -// Flush flushes the underlying *gzip.Writer and then the underlying -// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter -// an http.Flusher. -func (w *GzipResponseWriter) Flush() { - if w.gw == nil && !w.ignore { - // Only flush once startGzip or startPlain has been called. - // - // Flush is thus a no-op until we're certain whether a plain - // or gzipped response will be served. - return - } - - if w.gw != nil { - w.gw.Flush() - } - - if fw, ok := w.ResponseWriter.(http.Flusher); ok { - fw.Flush() - } -} - -// Hijack implements http.Hijacker. If the underlying ResponseWriter is a -// Hijacker, its Hijack method is returned. Otherwise an error is returned. -func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if hj, ok := w.ResponseWriter.(http.Hijacker); ok { - return hj.Hijack() - } - return nil, nil, fmt.Errorf("http.Hijacker interface is not supported") -} - -// verify Hijacker interface implementation -var _ http.Hijacker = &GzipResponseWriter{} - -// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in -// an error case it panics rather than returning an error. -func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler { - wrap, err := NewGzipLevelHandler(level) - if err != nil { - panic(err) - } - return wrap -} - -// NewGzipLevelHandler returns a wrapper function (often known as middleware) -// which can be used to wrap an HTTP handler to transparently gzip the response -// body if the client supports it (via the Accept-Encoding header). Responses will -// be encoded at the given gzip compression level. An error will be returned only -// if an invalid gzip compression level is given, so if one can ensure the level -// is valid, the returned error can be safely ignored. -func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) { - return NewGzipLevelAndMinSize(level, DefaultMinSize) -} - -// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller -// specify the minimum size before compression. -func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) { - return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize)) -} - -func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) { - c := &config{ - level: gzip.DefaultCompression, - minSize: DefaultMinSize, - } - - for _, o := range opts { - o(c) - } - - if err := c.validate(); err != nil { - return nil, err - } - - return func(h http.Handler) http.Handler { - index := poolIndex(c.level) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add(vary, acceptEncoding) - if acceptsGzip(r) { - gw := &GzipResponseWriter{ - ResponseWriter: w, - index: index, - minSize: c.minSize, - contentTypes: c.contentTypes, - } - defer gw.Close() - - if _, ok := w.(http.CloseNotifier); ok { - gwcn := GzipResponseWriterWithCloseNotify{gw} - h.ServeHTTP(gwcn, r) - } else { - h.ServeHTTP(gw, r) - } - - } else { - h.ServeHTTP(w, r) - } - }) - }, nil -} - -// Parsed representation of one of the inputs to ContentTypes. -// See https://golang.org/pkg/mime/#ParseMediaType -type parsedContentType struct { - mediaType string - params map[string]string -} - -// equals returns whether this content type matches another content type. -func (pct parsedContentType) equals(mediaType string, params map[string]string) bool { - if pct.mediaType != mediaType { - return false - } - // if pct has no params, don't care about other's params - if len(pct.params) == 0 { - return true - } - - // if pct has any params, they must be identical to other's. - if len(pct.params) != len(params) { - return false - } - for k, v := range pct.params { - if w, ok := params[k]; !ok || v != w { - return false - } - } - return true -} - -// Used for functional configuration. -type config struct { - minSize int - level int - contentTypes []parsedContentType -} - -func (c *config) validate() error { - if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) { - return fmt.Errorf("invalid compression level requested: %d", c.level) - } - - if c.minSize < 0 { - return fmt.Errorf("minimum size must be more than zero") - } - - return nil -} - -type option func(c *config) - -func MinSize(size int) option { - return func(c *config) { - c.minSize = size - } -} - -func CompressionLevel(level int) option { - return func(c *config) { - c.level = level - } -} - -// ContentTypes specifies a list of content types to compare -// the Content-Type header to before compressing. If none -// match, the response will be returned as-is. -// -// Content types are compared in a case-insensitive, whitespace-ignored -// manner. -// -// A MIME type without any other directive will match a content type -// that has the same MIME type, regardless of that content type's other -// directives. I.e., "text/html" will match both "text/html" and -// "text/html; charset=utf-8". -// -// A MIME type with any other directive will only match a content type -// that has the same MIME type and other directives. I.e., -// "text/html; charset=utf-8" will only match "text/html; charset=utf-8". -// -// By default, responses are gzipped regardless of -// Content-Type. -func ContentTypes(types []string) option { - return func(c *config) { - c.contentTypes = []parsedContentType{} - for _, v := range types { - mediaType, params, err := mime.ParseMediaType(v) - if err == nil { - c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params}) - } - } - } -} - -// GzipHandler wraps an HTTP handler, to transparently gzip the response body if -// the client supports it (via the Accept-Encoding header). This will compress at -// the default compression level. -func GzipHandler(h http.Handler) http.Handler { - wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression) - return wrapper(h) -} - -// acceptsGzip returns true if the given HTTP request indicates that it will -// accept a gzipped response. -func acceptsGzip(r *http.Request) bool { - acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding)) - return acceptedEncodings["gzip"] > 0.0 -} - -// returns true if we've been configured to compress the specific content type. -func handleContentType(contentTypes []parsedContentType, ct string) bool { - // If contentTypes is empty we handle all content types. - if len(contentTypes) == 0 { - return true - } - - mediaType, params, err := mime.ParseMediaType(ct) - if err != nil { - return false - } - - for _, c := range contentTypes { - if c.equals(mediaType, params) { - return true - } - } - - return false -} - -// parseEncodings attempts to parse a list of codings, per RFC 2616, as might -// appear in an Accept-Encoding header. It returns a map of content-codings to -// quality values, and an error containing the errors encountered. It's probably -// safe to ignore those, because silently ignoring errors is how the internet -// works. -// -// See: http://tools.ietf.org/html/rfc2616#section-14.3. -func parseEncodings(s string) (codings, error) { - c := make(codings) - var e []string - - for _, ss := range strings.Split(s, ",") { - coding, qvalue, err := parseCoding(ss) - - if err != nil { - e = append(e, err.Error()) - } else { - c[coding] = qvalue - } - } - - // TODO (adammck): Use a proper multi-error struct, so the individual errors - // can be extracted if anyone cares. - if len(e) > 0 { - return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", ")) - } - - return c, nil -} - -// parseCoding parses a single conding (content-coding with an optional qvalue), -// as might appear in an Accept-Encoding header. It attempts to forgive minor -// formatting errors. -func parseCoding(s string) (coding string, qvalue float64, err error) { - for n, part := range strings.Split(s, ";") { - part = strings.TrimSpace(part) - qvalue = DefaultQValue - - if n == 0 { - coding = strings.ToLower(part) - } else if strings.HasPrefix(part, "q=") { - qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64) - - if qvalue < 0.0 { - qvalue = 0.0 - } else if qvalue > 1.0 { - qvalue = 1.0 - } - } - } - - if coding == "" { - err = fmt.Errorf("empty content-coding") - } - - return -} diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go deleted file mode 100644 index fa9665b7e..000000000 --- a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build go1.8 - -package gziphandler - -import "net/http" - -// Push initiates an HTTP/2 server push. -// Push returns ErrNotSupported if the client has disabled push or if push -// is not supported on the underlying connection. -func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error { - pusher, ok := w.ResponseWriter.(http.Pusher) - if ok && pusher != nil { - return pusher.Push(target, setAcceptEncodingForPushOptions(opts)) - } - return http.ErrNotSupported -} - -// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers. -func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions { - - if opts == nil { - opts = &http.PushOptions{ - Header: http.Header{ - acceptEncoding: []string{"gzip"}, - }, - } - return opts - } - - if opts.Header == nil { - opts.Header = http.Header{ - acceptEncoding: []string{"gzip"}, - } - return opts - } - - if encoding := opts.Header.Get(acceptEncoding); encoding == "" { - opts.Header.Add(acceptEncoding, "gzip") - return opts - } - - return opts -} diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c807..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af6..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986dea..000000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c498..000000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc190a..000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f9..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a53..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b8462459..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore deleted file mode 100644 index 8d69a9418..000000000 --- a/vendor/github.com/asaskevich/govalidator/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -bin/ -.idea/ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml deleted file mode 100644 index bb83c6670..000000000 --- a/vendor/github.com/asaskevich/govalidator/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -dist: xenial -go: - - '1.10' - - '1.11' - - '1.12' - - '1.13' - - 'tip' - -script: - - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md deleted file mode 100644 index 4b462b0d8..000000000 --- a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor Code of Conduct - -This project adheres to [The Code Manifesto](http://codemanifesto.com) -as its guidelines for contributor interactions. - -## The Code Manifesto - -We want to work in an ecosystem that empowers developers to reach their -potential — one that encourages growth and effective collaboration. A space -that is safe for all. - -A space such as this benefits everyone that participates in it. It encourages -new developers to enter our field. It is through discussion and collaboration -that we grow, and through growth that we improve. - -In the effort to create such a place, we hold to these values: - -1. **Discrimination limits us.** This includes discrimination on the basis of - race, gender, sexual orientation, gender identity, age, nationality, - technology and any other arbitrary exclusion of a group of people. -2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort - levels. Remember that, and if brought to your attention, heed it. -3. **We are our biggest assets.** None of us were born masters of our trade. - Each of us has been helped along the way. Return that favor, when and where - you can. -4. **We are resources for the future.** As an extension of #3, share what you - know. Make yourself a resource to help those that come after you. -5. **Respect defines us.** Treat others as you wish to be treated. Make your - discussions, criticisms and debates from a position of respectfulness. Ask - yourself, is it true? Is it necessary? Is it constructive? Anything less is - unacceptable. -6. **Reactions require grace.** Angry responses are valid, but abusive language - and vindictive actions are toxic. When something happens that offends you, - handle it assertively, but be respectful. Escalate reasonably, and try to - allow the offender an opportunity to explain themselves, and possibly - correct the issue. -7. **Opinions are just that: opinions.** Each and every one of us, due to our - background and upbringing, have varying opinions. That is perfectly - acceptable. Remember this: if you respect your own opinions, you should - respect the opinions of others. -8. **To err is human.** You might not intend it, but mistakes do happen and - contribute to build experience. Tolerate honest mistakes, and don't - hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md deleted file mode 100644 index 7ed268a1e..000000000 --- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Financial contributions - -We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). -Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. - - -## Credits - - -### Contributors - -Thank you to all the people who have already contributed to govalidator! - - - -### Backers - -Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE deleted file mode 100644 index cacba9102..000000000 --- a/vendor/github.com/asaskevich/govalidator/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2020 Alex Saskevich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md deleted file mode 100644 index 2c3fc35eb..000000000 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ /dev/null @@ -1,622 +0,0 @@ -govalidator -=========== -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) -[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) - -A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). - -#### Installation -Make sure that Go is installed on your computer. -Type the following command in your terminal: - - go get github.com/asaskevich/govalidator - -or you can get specified release of the package with `gopkg.in`: - - go get gopkg.in/asaskevich/govalidator.v10 - -After it the package is ready to use. - - -#### Import package in your project -Add following line in your `*.go` file: -```go -import "github.com/asaskevich/govalidator" -``` -If you are unhappy to use long `govalidator`, you can do something like this: -```go -import ( - valid "github.com/asaskevich/govalidator" -) -``` - -#### Activate behavior to require all fields have a validation tag by default -`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. - -`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. - -```go -import "github.com/asaskevich/govalidator" - -func init() { - govalidator.SetFieldsRequiredByDefault(true) -} -``` - -Here's some code to explain it: -```go -// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -type exampleStruct struct { - Name string `` - Email string `valid:"email"` -} - -// this, however, will only fail when Email is empty or an invalid email address: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email"` -} - -// lastly, this will only fail when Email is an invalid email address but not when it's empty: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email,optional"` -} -``` - -#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) -##### Custom validator function signature -A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. -```go -import "github.com/asaskevich/govalidator" - -// old signature -func(i interface{}) bool - -// new signature -func(i interface{}, o interface{}) bool -``` - -##### Adding a custom validator -This was changed to prevent data races when accessing custom validators. -```go -import "github.com/asaskevich/govalidator" - -// before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { - // ... -} - -// after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { - // ... -}) -``` - -#### List of functions: -```go -func Abs(value float64) float64 -func BlackList(str, chars string) string -func ByteLength(str string, params ...string) bool -func CamelCaseToUnderscore(str string) string -func Contains(str, substring string) bool -func Count(array []interface{}, iterator ConditionIterator) int -func Each(array []interface{}, iterator Iterator) -func ErrorByField(e error, field string) string -func ErrorsByField(e error) map[string]string -func Filter(array []interface{}, iterator ConditionIterator) []interface{} -func Find(array []interface{}, iterator ConditionIterator) interface{} -func GetLine(s string, index int) (string, error) -func GetLines(s string) []string -func HasLowerCase(str string) bool -func HasUpperCase(str string) bool -func HasWhitespace(str string) bool -func HasWhitespaceOnly(str string) bool -func InRange(value interface{}, left interface{}, right interface{}) bool -func InRangeFloat32(value, left, right float32) bool -func InRangeFloat64(value, left, right float64) bool -func InRangeInt(value, left, right interface{}) bool -func IsASCII(str string) bool -func IsAlpha(str string) bool -func IsAlphanumeric(str string) bool -func IsBase64(str string) bool -func IsByteLength(str string, min, max int) bool -func IsCIDR(str string) bool -func IsCRC32(str string) bool -func IsCRC32b(str string) bool -func IsCreditCard(str string) bool -func IsDNSName(str string) bool -func IsDataURI(str string) bool -func IsDialString(str string) bool -func IsDivisibleBy(str, num string) bool -func IsEmail(str string) bool -func IsExistingEmail(email string) bool -func IsFilePath(str string) (bool, int) -func IsFloat(str string) bool -func IsFullWidth(str string) bool -func IsHalfWidth(str string) bool -func IsHash(str string, algorithm string) bool -func IsHexadecimal(str string) bool -func IsHexcolor(str string) bool -func IsHost(str string) bool -func IsIP(str string) bool -func IsIPv4(str string) bool -func IsIPv6(str string) bool -func IsISBN(str string, version int) bool -func IsISBN10(str string) bool -func IsISBN13(str string) bool -func IsISO3166Alpha2(str string) bool -func IsISO3166Alpha3(str string) bool -func IsISO4217(str string) bool -func IsISO693Alpha2(str string) bool -func IsISO693Alpha3b(str string) bool -func IsIn(str string, params ...string) bool -func IsInRaw(str string, params ...string) bool -func IsInt(str string) bool -func IsJSON(str string) bool -func IsLatitude(str string) bool -func IsLongitude(str string) bool -func IsLowerCase(str string) bool -func IsMAC(str string) bool -func IsMD4(str string) bool -func IsMD5(str string) bool -func IsMagnetURI(str string) bool -func IsMongoID(str string) bool -func IsMultibyte(str string) bool -func IsNatural(value float64) bool -func IsNegative(value float64) bool -func IsNonNegative(value float64) bool -func IsNonPositive(value float64) bool -func IsNotNull(str string) bool -func IsNull(str string) bool -func IsNumeric(str string) bool -func IsPort(str string) bool -func IsPositive(value float64) bool -func IsPrintableASCII(str string) bool -func IsRFC3339(str string) bool -func IsRFC3339WithoutZone(str string) bool -func IsRGBcolor(str string) bool -func IsRegex(str string) bool -func IsRequestURI(rawurl string) bool -func IsRequestURL(rawurl string) bool -func IsRipeMD128(str string) bool -func IsRipeMD160(str string) bool -func IsRsaPub(str string, params ...string) bool -func IsRsaPublicKey(str string, keylen int) bool -func IsSHA1(str string) bool -func IsSHA256(str string) bool -func IsSHA384(str string) bool -func IsSHA512(str string) bool -func IsSSN(str string) bool -func IsSemver(str string) bool -func IsTiger128(str string) bool -func IsTiger160(str string) bool -func IsTiger192(str string) bool -func IsTime(str string, format string) bool -func IsType(v interface{}, params ...string) bool -func IsURL(str string) bool -func IsUTFDigit(str string) bool -func IsUTFLetter(str string) bool -func IsUTFLetterNumeric(str string) bool -func IsUTFNumeric(str string) bool -func IsUUID(str string) bool -func IsUUIDv3(str string) bool -func IsUUIDv4(str string) bool -func IsUUIDv5(str string) bool -func IsULID(str string) bool -func IsUnixTime(str string) bool -func IsUpperCase(str string) bool -func IsVariableWidth(str string) bool -func IsWhole(value float64) bool -func LeftTrim(str, chars string) string -func Map(array []interface{}, iterator ResultIterator) []interface{} -func Matches(str, pattern string) bool -func MaxStringLength(str string, params ...string) bool -func MinStringLength(str string, params ...string) bool -func NormalizeEmail(str string) (string, error) -func PadBoth(str string, padStr string, padLen int) string -func PadLeft(str string, padStr string, padLen int) string -func PadRight(str string, padStr string, padLen int) string -func PrependPathToErrors(err error, path string) error -func Range(str string, params ...string) bool -func RemoveTags(s string) string -func ReplacePattern(str, pattern, replace string) string -func Reverse(s string) string -func RightTrim(str, chars string) string -func RuneLength(str string, params ...string) bool -func SafeFileName(str string) string -func SetFieldsRequiredByDefault(value bool) -func SetNilPtrAllowedByRequired(value bool) -func Sign(value float64) float64 -func StringLength(str string, params ...string) bool -func StringMatches(s string, params ...string) bool -func StripLow(str string, keepNewLines bool) string -func ToBoolean(str string) (bool, error) -func ToFloat(str string) (float64, error) -func ToInt(value interface{}) (res int64, err error) -func ToJSON(obj interface{}) (string, error) -func ToString(obj interface{}) string -func Trim(str, chars string) string -func Truncate(str string, length int, ending string) string -func TruncatingErrorf(str string, args ...interface{}) error -func UnderscoreToCamelCase(s string) string -func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) -func ValidateStruct(s interface{}) (bool, error) -func WhiteList(str, chars string) string -type ConditionIterator -type CustomTypeValidator -type Error -func (e Error) Error() string -type Errors -func (es Errors) Error() string -func (es Errors) Errors() []error -type ISO3166Entry -type ISO693Entry -type InterfaceParamValidator -type Iterator -type ParamValidator -type ResultIterator -type UnsupportedTypeError -func (e *UnsupportedTypeError) Error() string -type Validator -``` - -#### Examples -###### IsURL -```go -println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) -``` -###### IsType -```go -println(govalidator.IsType("Bob", "string")) -println(govalidator.IsType(1, "int")) -i := 1 -println(govalidator.IsType(&i, "*int")) -``` - -IsType can be used through the tag `type` which is essential for map validation: -```go -type User struct { - Name string `valid:"type(string)"` - Age int `valid:"type(int)"` - Meta interface{} `valid:"type(string)"` -} -result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ToString -```go -type User struct { - FirstName string - LastName string -} - -str := govalidator.ToString(&User{"John", "Juan"}) -println(str) -``` -###### Each, Map, Filter, Count for slices -Each iterates over the slice/array and calls Iterator for every item -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.Iterator = func(value interface{}, index int) { - println(value.(int)) -} -govalidator.Each(data, fn) -``` -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { - return value.(int) * 3 -} -_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} -``` -```go -data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} -var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { - return value.(int)%2 == 0 -} -_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} -_ = govalidator.Count(data, fn) // result = 5 -``` -###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) -If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: -```go -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) -``` -For completely custom validators (interface-based), see below. - -Here is a list of available validators for struct fields (validator - used function): -```go -"email": IsEmail, -"url": IsURL, -"dialstring": IsDialString, -"requrl": IsRequestURL, -"requri": IsRequestURI, -"alpha": IsAlpha, -"utfletter": IsUTFLetter, -"alphanum": IsAlphanumeric, -"utfletternum": IsUTFLetterNumeric, -"numeric": IsNumeric, -"utfnumeric": IsUTFNumeric, -"utfdigit": IsUTFDigit, -"hexadecimal": IsHexadecimal, -"hexcolor": IsHexcolor, -"rgbcolor": IsRGBcolor, -"lowercase": IsLowerCase, -"uppercase": IsUpperCase, -"int": IsInt, -"float": IsFloat, -"null": IsNull, -"uuid": IsUUID, -"uuidv3": IsUUIDv3, -"uuidv4": IsUUIDv4, -"uuidv5": IsUUIDv5, -"creditcard": IsCreditCard, -"isbn10": IsISBN10, -"isbn13": IsISBN13, -"json": IsJSON, -"multibyte": IsMultibyte, -"ascii": IsASCII, -"printableascii": IsPrintableASCII, -"fullwidth": IsFullWidth, -"halfwidth": IsHalfWidth, -"variablewidth": IsVariableWidth, -"base64": IsBase64, -"datauri": IsDataURI, -"ip": IsIP, -"port": IsPort, -"ipv4": IsIPv4, -"ipv6": IsIPv6, -"dns": IsDNSName, -"host": IsHost, -"mac": IsMAC, -"latitude": IsLatitude, -"longitude": IsLongitude, -"ssn": IsSSN, -"semver": IsSemver, -"rfc3339": IsRFC3339, -"rfc3339WithoutZone": IsRFC3339WithoutZone, -"ISO3166Alpha2": IsISO3166Alpha2, -"ISO3166Alpha3": IsISO3166Alpha3, -"ulid": IsULID, -``` -Validators with parameters - -```go -"range(min|max)": Range, -"length(min|max)": ByteLength, -"runelength(min|max)": RuneLength, -"stringlength(min|max)": StringLength, -"matches(pattern)": StringMatches, -"in(string1|string2|...|stringN)": IsIn, -"rsapub(keylength)" : IsRsaPub, -"minstringlength(int): MinStringLength, -"maxstringlength(int): MaxStringLength, -``` -Validators with parameters for any type - -```go -"type(type)": IsType, -``` - -And here is small example of usage: -```go -type Post struct { - Title string `valid:"alphanum,required"` - Message string `valid:"duck,ascii"` - Message2 string `valid:"animal(dog)"` - AuthorIP string `valid:"ipv4"` - Date string `valid:"-"` -} -post := &Post{ - Title: "My Example Post", - Message: "duck", - Message2: "dog", - AuthorIP: "123.234.54.3", -} - -// Add your own struct validation tags -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) - -// Add your own struct validation tags with parameter -govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { - species := params[0] - return str == species -}) -govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") - -result, err := govalidator.ValidateStruct(post) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) -If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` - -So here is small example of usage: -```go -var mapTemplate = map[string]interface{}{ - "name":"required,alpha", - "family":"required,alpha", - "email":"required,email", - "cell-phone":"numeric", - "address":map[string]interface{}{ - "line1":"required,alphanum", - "line2":"alphanum", - "postal-code":"numeric", - }, -} - -var inputMap = map[string]interface{}{ - "name":"Bob", - "family":"Smith", - "email":"foo@bar.baz", - "address":map[string]interface{}{ - "line1":"", - "line2":"", - "postal-code":"", - }, -} - -result, err := govalidator.ValidateMap(inputMap, mapTemplate) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` - -###### WhiteList -```go -// Remove all characters from string ignoring characters between "a" and "z" -println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") -``` - -###### Custom validation functions -Custom validation using your own domain specific validators is also available - here's an example of how to use it: -```go -import "github.com/asaskevich/govalidator" - -type CustomByteArray [6]byte // custom types are supported and can be validated - -type StructWithCustomByteArray struct { - ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence - Email string `valid:"email"` - CustomMinLength int `valid:"-"` -} - -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // you can type switch on the context interface being validated - case StructWithCustomByteArray: - // you can check and validate against some other field in the context, - // return early or not validate against the context at all – your choice - case SomeOtherType: - // ... - default: - // expecting some other type? Throw/panic here or continue - } - - switch v := i.(type) { // type switch on the struct field being validated - case CustomByteArray: - for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes - if e != 0 { - return true - } - } - } - return false -}) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation - case StructWithCustomByteArray: - return len(v.ID) >= v.CustomMinLength - } - return false -}) -``` - -###### Loop over Error() -By default .Error() returns all errors in a single String. To access each error you can do this: -```go - if err != nil { - errs := err.(govalidator.Errors).Errors() - for _, e := range errs { - fmt.Println(e.Error()) - } - } -``` - -###### Custom error messages -Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: -```go -type Ticket struct { - Id int64 `json:"id"` - FirstName string `json:"firstname" valid:"required~First name is blank"` -} -``` - -#### Notes -Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). -Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). - -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Credits -### Contributors - -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. - -#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) -* [Daniel Lohse](https://github.com/annismckenzie) -* [Attila Oláh](https://github.com/attilaolah) -* [Daniel Korner](https://github.com/Dadie) -* [Steven Wilkin](https://github.com/stevenwilkin) -* [Deiwin Sarjas](https://github.com/deiwin) -* [Noah Shibley](https://github.com/slugmobile) -* [Nathan Davies](https://github.com/nathj07) -* [Matt Sanford](https://github.com/mzsanford) -* [Simon ccl1115](https://github.com/ccl1115) - - - - -### Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] - - - - - - - - - - - - - - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go deleted file mode 100644 index 3e1da7cb4..000000000 --- a/vendor/github.com/asaskevich/govalidator/arrays.go +++ /dev/null @@ -1,87 +0,0 @@ -package govalidator - -// Iterator is the function that accepts element of slice/array and its index -type Iterator func(interface{}, int) - -// ResultIterator is the function that accepts element of slice/array and its index and returns any result -type ResultIterator func(interface{}, int) interface{} - -// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean -type ConditionIterator func(interface{}, int) bool - -// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values -type ReduceIterator func(interface{}, interface{}) interface{} - -// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. -func Some(array []interface{}, iterator ConditionIterator) bool { - res := false - for index, data := range array { - res = res || iterator(data, index) - } - return res -} - -// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. -func Every(array []interface{}, iterator ConditionIterator) bool { - res := true - for index, data := range array { - res = res && iterator(data, index) - } - return res -} - -// Reduce boils down a list of values into a single value by ReduceIterator -func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { - for _, data := range array { - initialValue = iterator(initialValue, data) - } - return initialValue -} - -// Each iterates over the slice and apply Iterator to every item -func Each(array []interface{}, iterator Iterator) { - for index, data := range array { - iterator(data, index) - } -} - -// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. -func Map(array []interface{}, iterator ResultIterator) []interface{} { - var result = make([]interface{}, len(array)) - for index, data := range array { - result[index] = iterator(data, index) - } - return result -} - -// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. -func Find(array []interface{}, iterator ConditionIterator) interface{} { - for index, data := range array { - if iterator(data, index) { - return data - } - } - return nil -} - -// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. -func Filter(array []interface{}, iterator ConditionIterator) []interface{} { - var result = make([]interface{}, 0) - for index, data := range array { - if iterator(data, index) { - result = append(result, data) - } - } - return result -} - -// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. -func Count(array []interface{}, iterator ConditionIterator) int { - count := 0 - for index, data := range array { - if iterator(data, index) { - count = count + 1 - } - } - return count -} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go deleted file mode 100644 index d68e990fc..000000000 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ /dev/null @@ -1,81 +0,0 @@ -package govalidator - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" -) - -// ToString convert the input to a string. -func ToString(obj interface{}) string { - res := fmt.Sprintf("%v", obj) - return res -} - -// ToJSON convert the input to a valid JSON string -func ToJSON(obj interface{}) (string, error) { - res, err := json.Marshal(obj) - if err != nil { - res = []byte("") - } - return string(res), err -} - -// ToFloat convert the input string to a float, or 0.0 if the input is not a float. -func ToFloat(value interface{}) (res float64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = float64(val.Int()) - case uint, uint8, uint16, uint32, uint64: - res = float64(val.Uint()) - case float32, float64: - res = val.Float() - case string: - res, err = strconv.ParseFloat(val.String(), 64) - if err != nil { - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. -func ToInt(value interface{}) (res int64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = val.Int() - case uint, uint8, uint16, uint32, uint64: - res = int64(val.Uint()) - case float32, float64: - res = int64(val.Float()) - case string: - if IsInt(val.String()) { - res, err = strconv.ParseInt(val.String(), 0, 64) - if err != nil { - res = 0 - } - } else { - err = fmt.Errorf("ToInt: invalid numeric format %g", value) - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToBoolean convert the input string to a boolean. -func ToBoolean(str string) (bool, error) { - return strconv.ParseBool(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go deleted file mode 100644 index 55dce62dc..000000000 --- a/vendor/github.com/asaskevich/govalidator/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -package govalidator - -// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go deleted file mode 100644 index 1da2336f4..000000000 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ /dev/null @@ -1,47 +0,0 @@ -package govalidator - -import ( - "sort" - "strings" -) - -// Errors is an array of multiple errors and conforms to the error interface. -type Errors []error - -// Errors returns itself. -func (es Errors) Errors() []error { - return es -} - -func (es Errors) Error() string { - var errs []string - for _, e := range es { - errs = append(errs, e.Error()) - } - sort.Strings(errs) - return strings.Join(errs, ";") -} - -// Error encapsulates a name, an error and whether there's a custom error message or not. -type Error struct { - Name string - Err error - CustomErrorMessageExists bool - - // Validator indicates the name of the validator that failed - Validator string - Path []string -} - -func (e Error) Error() string { - if e.CustomErrorMessageExists { - return e.Err.Error() - } - - errName := e.Name - if len(e.Path) > 0 { - errName = strings.Join(append(e.Path, e.Name), ".") - } - - return errName + ": " + e.Err.Error() -} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go deleted file mode 100644 index 5041d9e86..000000000 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ /dev/null @@ -1,100 +0,0 @@ -package govalidator - -import ( - "math" -) - -// Abs returns absolute value of number -func Abs(value float64) float64 { - return math.Abs(value) -} - -// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise -func Sign(value float64) float64 { - if value > 0 { - return 1 - } else if value < 0 { - return -1 - } else { - return 0 - } -} - -// IsNegative returns true if value < 0 -func IsNegative(value float64) bool { - return value < 0 -} - -// IsPositive returns true if value > 0 -func IsPositive(value float64) bool { - return value > 0 -} - -// IsNonNegative returns true if value >= 0 -func IsNonNegative(value float64) bool { - return value >= 0 -} - -// IsNonPositive returns true if value <= 0 -func IsNonPositive(value float64) bool { - return value <= 0 -} - -// InRangeInt returns true if value lies between left and right border -func InRangeInt(value, left, right interface{}) bool { - value64, _ := ToInt(value) - left64, _ := ToInt(left) - right64, _ := ToInt(right) - if left64 > right64 { - left64, right64 = right64, left64 - } - return value64 >= left64 && value64 <= right64 -} - -// InRangeFloat32 returns true if value lies between left and right border -func InRangeFloat32(value, left, right float32) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRangeFloat64 returns true if value lies between left and right border -func InRangeFloat64(value, left, right float64) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. -// All types must the same type. -// False if value doesn't lie in range or if it incompatible or not comparable -func InRange(value interface{}, left interface{}, right interface{}) bool { - switch value.(type) { - case int: - intValue, _ := ToInt(value) - intLeft, _ := ToInt(left) - intRight, _ := ToInt(right) - return InRangeInt(intValue, intLeft, intRight) - case float32, float64: - intValue, _ := ToFloat(value) - intLeft, _ := ToFloat(left) - intRight, _ := ToFloat(right) - return InRangeFloat64(intValue, intLeft, intRight) - case string: - return value.(string) >= left.(string) && value.(string) <= right.(string) - default: - return false - } -} - -// IsWhole returns true if value is whole number -func IsWhole(value float64) bool { - return math.Remainder(value, 1) == 0 -} - -// IsNatural returns true if value is natural number (positive and whole) -func IsNatural(value float64) bool { - return IsWhole(value) && IsPositive(value) -} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go deleted file mode 100644 index bafc3765e..000000000 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ /dev/null @@ -1,113 +0,0 @@ -package govalidator - -import "regexp" - -// Basic regular expressions for validating strings -const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" - ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" - ISBN13 string = "^(?:[0-9]{13})$" - UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - Alpha string = "^[a-zA-Z]+$" - Alphanumeric string = "^[a-zA-Z0-9]+$" - Numeric string = "^[0-9]+$" - Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" - Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" - Hexadecimal string = "^[0-9a-fA-F]+$" - Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" - ASCII string = "^[\x00-\x7F]+$" - Multibyte string = "[^\x00-\x7F]" - FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - PrintableASCII string = "^[\x20-\x7E]+$" - DataURI string = "^data:.+\\/(.+);base64$" - MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" - Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` - IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername string = `(\S+(:\S*)?@)` - URLPath string = `((\/|\?|#)[^\s]*)` - URLPort string = `(:(\d{1,5}))` - URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` - URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` - SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` - WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixPath string = `^(/[^/\x00]*)+/?$` - WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` - Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" - tagName string = "valid" - hasLowerCase string = ".*[[:lower:]]" - hasUpperCase string = ".*[[:upper:]]" - hasWhitespace string = ".*[[:space:]]" - hasWhitespaceOnly string = "^[[:space:]]+$" - IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" - IMSI string = "^\\d{14,15}$" - E164 string = `^\+?[1-9]\d{1,14}$` -) - -// Used by IsFilePath func -const ( - // Unknown is unresolved OS type - Unknown = iota - // Win is Windows type - Win - // Unix is *nix OS types - Unix -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") - rxEmail = regexp.MustCompile(Email) - rxCreditCard = regexp.MustCompile(CreditCard) - rxISBN10 = regexp.MustCompile(ISBN10) - rxISBN13 = regexp.MustCompile(ISBN13) - rxUUID3 = regexp.MustCompile(UUID3) - rxUUID4 = regexp.MustCompile(UUID4) - rxUUID5 = regexp.MustCompile(UUID5) - rxUUID = regexp.MustCompile(UUID) - rxAlpha = regexp.MustCompile(Alpha) - rxAlphanumeric = regexp.MustCompile(Alphanumeric) - rxNumeric = regexp.MustCompile(Numeric) - rxInt = regexp.MustCompile(Int) - rxFloat = regexp.MustCompile(Float) - rxHexadecimal = regexp.MustCompile(Hexadecimal) - rxHexcolor = regexp.MustCompile(Hexcolor) - rxRGBcolor = regexp.MustCompile(RGBcolor) - rxASCII = regexp.MustCompile(ASCII) - rxPrintableASCII = regexp.MustCompile(PrintableASCII) - rxMultibyte = regexp.MustCompile(Multibyte) - rxFullWidth = regexp.MustCompile(FullWidth) - rxHalfWidth = regexp.MustCompile(HalfWidth) - rxBase64 = regexp.MustCompile(Base64) - rxDataURI = regexp.MustCompile(DataURI) - rxMagnetURI = regexp.MustCompile(MagnetURI) - rxLatitude = regexp.MustCompile(Latitude) - rxLongitude = regexp.MustCompile(Longitude) - rxDNSName = regexp.MustCompile(DNSName) - rxURL = regexp.MustCompile(URL) - rxSSN = regexp.MustCompile(SSN) - rxWinPath = regexp.MustCompile(WinPath) - rxUnixPath = regexp.MustCompile(UnixPath) - rxARWinPath = regexp.MustCompile(WinARPath) - rxARUnixPath = regexp.MustCompile(UnixARPath) - rxSemver = regexp.MustCompile(Semver) - rxHasLowerCase = regexp.MustCompile(hasLowerCase) - rxHasUpperCase = regexp.MustCompile(hasUpperCase) - rxHasWhitespace = regexp.MustCompile(hasWhitespace) - rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) - rxIMEI = regexp.MustCompile(IMEI) - rxIMSI = regexp.MustCompile(IMSI) - rxE164 = regexp.MustCompile(E164) -) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go deleted file mode 100644 index c573abb51..000000000 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ /dev/null @@ -1,656 +0,0 @@ -package govalidator - -import ( - "reflect" - "regexp" - "sort" - "sync" -) - -// Validator is a wrapper for a validator function that returns bool and accepts string. -type Validator func(str string) bool - -// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. -// The second parameter should be the context (in the case of validating a struct: the whole object being validated). -type CustomTypeValidator func(i interface{}, o interface{}) bool - -// ParamValidator is a wrapper for validator functions that accept additional parameters. -type ParamValidator func(str string, params ...string) bool - -// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value -type InterfaceParamValidator func(in interface{}, params ...string) bool -type tagOptionsMap map[string]tagOption - -func (t tagOptionsMap) orderedKeys() []string { - var keys []string - for k := range t { - keys = append(keys, k) - } - - sort.Slice(keys, func(a, b int) bool { - return t[keys[a]].order < t[keys[b]].order - }) - - return keys -} - -type tagOption struct { - name string - customErrorMessage string - order int -} - -// UnsupportedTypeError is a wrapper for reflect.Type -type UnsupportedTypeError struct { - Type reflect.Type -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value -var InterfaceParamTagMap = map[string]InterfaceParamValidator{ - "type": IsType, -} - -// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. -var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ - "type": regexp.MustCompile(`^type\((.*)\)$`), -} - -// ParamTagMap is a map of functions accept variants parameters -var ParamTagMap = map[string]ParamValidator{ - "length": ByteLength, - "range": Range, - "runelength": RuneLength, - "stringlength": StringLength, - "matches": StringMatches, - "in": IsInRaw, - "rsapub": IsRsaPub, - "minstringlength": MinStringLength, - "maxstringlength": MaxStringLength, -} - -// ParamTagRegexMap maps param tags to their respective regexes. -var ParamTagRegexMap = map[string]*regexp.Regexp{ - "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), - "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), - "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), - "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), - "in": regexp.MustCompile(`^in\((.*)\)`), - "matches": regexp.MustCompile(`^matches\((.+)\)$`), - "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), - "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), - "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), -} - -type customTypeTagMap struct { - validators map[string]CustomTypeValidator - - sync.RWMutex -} - -func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { - tm.RLock() - defer tm.RUnlock() - v, ok := tm.validators[name] - return v, ok -} - -func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { - tm.Lock() - defer tm.Unlock() - tm.validators[name] = ctv -} - -// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. -// Use this to validate compound or custom types that need to be handled as a whole, e.g. -// `type UUID [16]byte` (this would be handled as an array of bytes). -var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} - -// TagMap is a map of functions, that can be used as tags for ValidateStruct function. -var TagMap = map[string]Validator{ - "email": IsEmail, - "url": IsURL, - "dialstring": IsDialString, - "requrl": IsRequestURL, - "requri": IsRequestURI, - "alpha": IsAlpha, - "utfletter": IsUTFLetter, - "alphanum": IsAlphanumeric, - "utfletternum": IsUTFLetterNumeric, - "numeric": IsNumeric, - "utfnumeric": IsUTFNumeric, - "utfdigit": IsUTFDigit, - "hexadecimal": IsHexadecimal, - "hexcolor": IsHexcolor, - "rgbcolor": IsRGBcolor, - "lowercase": IsLowerCase, - "uppercase": IsUpperCase, - "int": IsInt, - "float": IsFloat, - "null": IsNull, - "notnull": IsNotNull, - "uuid": IsUUID, - "uuidv3": IsUUIDv3, - "uuidv4": IsUUIDv4, - "uuidv5": IsUUIDv5, - "creditcard": IsCreditCard, - "isbn10": IsISBN10, - "isbn13": IsISBN13, - "json": IsJSON, - "multibyte": IsMultibyte, - "ascii": IsASCII, - "printableascii": IsPrintableASCII, - "fullwidth": IsFullWidth, - "halfwidth": IsHalfWidth, - "variablewidth": IsVariableWidth, - "base64": IsBase64, - "datauri": IsDataURI, - "ip": IsIP, - "port": IsPort, - "ipv4": IsIPv4, - "ipv6": IsIPv6, - "dns": IsDNSName, - "host": IsHost, - "mac": IsMAC, - "latitude": IsLatitude, - "longitude": IsLongitude, - "ssn": IsSSN, - "semver": IsSemver, - "rfc3339": IsRFC3339, - "rfc3339WithoutZone": IsRFC3339WithoutZone, - "ISO3166Alpha2": IsISO3166Alpha2, - "ISO3166Alpha3": IsISO3166Alpha3, - "ISO4217": IsISO4217, - "IMEI": IsIMEI, - "ulid": IsULID, -} - -// ISO3166Entry stores country codes -type ISO3166Entry struct { - EnglishShortName string - FrenchShortName string - Alpha2Code string - Alpha3Code string - Numeric string -} - -//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" -var ISO3166List = []ISO3166Entry{ - {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, - {"Albania", "Albanie (l')", "AL", "ALB", "008"}, - {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, - {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, - {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, - {"Andorra", "Andorre (l')", "AD", "AND", "020"}, - {"Angola", "Angola (l')", "AO", "AGO", "024"}, - {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, - {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, - {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, - {"Australia", "Australie (l')", "AU", "AUS", "036"}, - {"Austria", "Autriche (l')", "AT", "AUT", "040"}, - {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, - {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, - {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, - {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, - {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, - {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, - {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, - {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, - {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, - {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, - {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, - {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, - {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, - {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, - {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, - {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, - {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, - {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, - {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, - {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, - {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, - {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, - {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, - {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, - {"Canada", "Canada (le)", "CA", "CAN", "124"}, - {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, - {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, - {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, - {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, - {"Chad", "Tchad (le)", "TD", "TCD", "148"}, - {"Chile", "Chili (le)", "CL", "CHL", "152"}, - {"China", "Chine (la)", "CN", "CHN", "156"}, - {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, - {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, - {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, - {"Colombia", "Colombie (la)", "CO", "COL", "170"}, - {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, - {"Mayotte", "Mayotte", "YT", "MYT", "175"}, - {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, - {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, - {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, - {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, - {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, - {"Cuba", "Cuba", "CU", "CUB", "192"}, - {"Cyprus", "Chypre", "CY", "CYP", "196"}, - {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, - {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, - {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, - {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, - {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, - {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, - {"El Salvador", "El Salvador", "SV", "SLV", "222"}, - {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, - {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, - {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, - {"Estonia", "Estonie (l')", "EE", "EST", "233"}, - {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, - {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, - {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, - {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, - {"Finland", "Finlande (la)", "FI", "FIN", "246"}, - {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, - {"France", "France (la)", "FR", "FRA", "250"}, - {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, - {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, - {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, - {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, - {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, - {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, - {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, - {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, - {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, - {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, - {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, - {"Kiribati", "Kiribati", "KI", "KIR", "296"}, - {"Greece", "Grèce (la)", "GR", "GRC", "300"}, - {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, - {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, - {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, - {"Guam", "Guam", "GU", "GUM", "316"}, - {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, - {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, - {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, - {"Haiti", "Haïti", "HT", "HTI", "332"}, - {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, - {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, - {"Honduras", "Honduras (le)", "HN", "HND", "340"}, - {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, - {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, - {"Iceland", "Islande (l')", "IS", "ISL", "352"}, - {"India", "Inde (l')", "IN", "IND", "356"}, - {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, - {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, - {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, - {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, - {"Israel", "Israël", "IL", "ISR", "376"}, - {"Italy", "Italie (l')", "IT", "ITA", "380"}, - {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, - {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, - {"Japan", "Japon (le)", "JP", "JPN", "392"}, - {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, - {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, - {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, - {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, - {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, - {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, - {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, - {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, - {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, - {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, - {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, - {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, - {"Libya", "Libye (la)", "LY", "LBY", "434"}, - {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, - {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, - {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, - {"Macao", "Macao", "MO", "MAC", "446"}, - {"Madagascar", "Madagascar", "MG", "MDG", "450"}, - {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, - {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, - {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, - {"Mali", "Mali (le)", "ML", "MLI", "466"}, - {"Malta", "Malte", "MT", "MLT", "470"}, - {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, - {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, - {"Mauritius", "Maurice", "MU", "MUS", "480"}, - {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, - {"Monaco", "Monaco", "MC", "MCO", "492"}, - {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, - {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, - {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, - {"Montserrat", "Montserrat", "MS", "MSR", "500"}, - {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, - {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, - {"Oman", "Oman", "OM", "OMN", "512"}, - {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, - {"Nauru", "Nauru", "NR", "NRU", "520"}, - {"Nepal", "Népal (le)", "NP", "NPL", "524"}, - {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, - {"Curaçao", "Curaçao", "CW", "CUW", "531"}, - {"Aruba", "Aruba", "AW", "ABW", "533"}, - {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, - {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, - {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, - {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, - {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, - {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, - {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, - {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, - {"Niue", "Niue", "NU", "NIU", "570"}, - {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, - {"Norway", "Norvège (la)", "NO", "NOR", "578"}, - {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, - {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, - {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, - {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, - {"Palau", "Palaos (les)", "PW", "PLW", "585"}, - {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, - {"Panama", "Panama (le)", "PA", "PAN", "591"}, - {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, - {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, - {"Peru", "Pérou (le)", "PE", "PER", "604"}, - {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, - {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, - {"Poland", "Pologne (la)", "PL", "POL", "616"}, - {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, - {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, - {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, - {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, - {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, - {"Réunion", "Réunion (La)", "RE", "REU", "638"}, - {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, - {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, - {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, - {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, - {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, - {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, - {"Anguilla", "Anguilla", "AI", "AIA", "660"}, - {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, - {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, - {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, - {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, - {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, - {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, - {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, - {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, - {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, - {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, - {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, - {"Singapore", "Singapour", "SG", "SGP", "702"}, - {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, - {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, - {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, - {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, - {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, - {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, - {"Spain", "Espagne (l')", "ES", "ESP", "724"}, - {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, - {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, - {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, - {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, - {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, - {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, - {"Sweden", "Suède (la)", "SE", "SWE", "752"}, - {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, - {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, - {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, - {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, - {"Togo", "Togo (le)", "TG", "TGO", "768"}, - {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, - {"Tonga", "Tonga (les)", "TO", "TON", "776"}, - {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, - {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, - {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, - {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, - {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, - {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, - {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, - {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, - {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, - {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, - {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, - {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, - {"Guernsey", "Guernesey", "GG", "GGY", "831"}, - {"Jersey", "Jersey", "JE", "JEY", "832"}, - {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, - {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, - {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, - {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, - {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, - {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, - {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, - {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, - {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, - {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, - {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, - {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, -} - -// ISO4217List is the list of ISO currency codes -var ISO4217List = []string{ - "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", - "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", - "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", - "DJF", "DKK", "DOP", "DZD", - "EGP", "ERN", "ETB", "EUR", - "FJD", "FKP", - "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", - "HKD", "HNL", "HRK", "HTG", "HUF", - "IDR", "ILS", "INR", "IQD", "IRR", "ISK", - "JMD", "JOD", "JPY", - "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", - "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", - "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", - "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", - "OMR", - "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", - "QAR", - "RON", "RSD", "RUB", "RWF", - "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", - "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", - "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", - "VEF", "VES", "VND", "VUV", - "WST", - "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", - "YER", - "ZAR", "ZMW", "ZWL", -} - -// ISO693Entry stores ISO language codes -type ISO693Entry struct { - Alpha3bCode string - Alpha2Code string - English string -} - -//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json -var ISO693List = []ISO693Entry{ - {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, - {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, - {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, - {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, - {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, - {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, - {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, - {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, - {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, - {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, - {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, - {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, - {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, - {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, - {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, - {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, - {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, - {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, - {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, - {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, - {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, - {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, - {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, - {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, - {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, - {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, - {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, - {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, - {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, - {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, - {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, - {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, - {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, - {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, - {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, - {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, - {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, - {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, - {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, - {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, - {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, - {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, - {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, - {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, - {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, - {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, - {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, - {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, - {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, - {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, - {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, - {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, - {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, - {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, - {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, - {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, - {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, - {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, - {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, - {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, - {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, - {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, - {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, - {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, - {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, - {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, - {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, - {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, - {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, - {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, - {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, - {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, - {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, - {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, - {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, - {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, - {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, - {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, - {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, - {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, - {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, - {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, - {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, - {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, - {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, - {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, - {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, - {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, - {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, - {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, - {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, - {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, - {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, - {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, - {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, - {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, - {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, - {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, - {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, - {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, - {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, - {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, - {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, - {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, - {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, - {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, - {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, - {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, - {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, - {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, - {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, - {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, - {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, - {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, - {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, - {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, - {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, - {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, - {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, - {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, - {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, - {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, - {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, - {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, - {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, - {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, - {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, - {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, - {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, - {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, - {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, - {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, - {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, - {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, - {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, - {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, - {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, - {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, - {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, - {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, - {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, - {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, - {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, - {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, - {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, - {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, - {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, - {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, - {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, - {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, - {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, - {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, - {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, - {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, - {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, - {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, - {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, - {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, - {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, - {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, - {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, - {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, - {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, - {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, - {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, - {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, - {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, - {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, - {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, - {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, - {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, - {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, - {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, - {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, - {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, - {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, - {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, - {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, - {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, - {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, - {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, - {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, - {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, - {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, -} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go deleted file mode 100644 index f4c30f824..000000000 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ /dev/null @@ -1,270 +0,0 @@ -package govalidator - -import ( - "errors" - "fmt" - "html" - "math" - "path" - "regexp" - "strings" - "unicode" - "unicode/utf8" -) - -// Contains checks if the string contains the substring. -func Contains(str, substring string) bool { - return strings.Contains(str, substring) -} - -// Matches checks if string matches the pattern (pattern is regular expression) -// In case of error return false -func Matches(str, pattern string) bool { - match, _ := regexp.MatchString(pattern, str) - return match -} - -// LeftTrim trims characters from the left side of the input. -// If second argument is empty, it will remove leading spaces. -func LeftTrim(str, chars string) string { - if chars == "" { - return strings.TrimLeftFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("^[" + chars + "]+") - return r.ReplaceAllString(str, "") -} - -// RightTrim trims characters from the right side of the input. -// If second argument is empty, it will remove trailing spaces. -func RightTrim(str, chars string) string { - if chars == "" { - return strings.TrimRightFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("[" + chars + "]+$") - return r.ReplaceAllString(str, "") -} - -// Trim trims characters from both sides of the input. -// If second argument is empty, it will remove spaces. -func Trim(str, chars string) string { - return LeftTrim(RightTrim(str, chars), chars) -} - -// WhiteList removes characters that do not appear in the whitelist. -func WhiteList(str, chars string) string { - pattern := "[^" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// BlackList removes characters that appear in the blacklist. -func BlackList(str, chars string) string { - pattern := "[" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. -// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). -func StripLow(str string, keepNewLines bool) string { - chars := "" - if keepNewLines { - chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" - } else { - chars = "\x00-\x1F\x7F" - } - return BlackList(str, chars) -} - -// ReplacePattern replaces regular expression pattern in string -func ReplacePattern(str, pattern, replace string) string { - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, replace) -} - -// Escape replaces <, >, & and " with HTML entities. -var Escape = html.EscapeString - -func addSegment(inrune, segment []rune) []rune { - if len(segment) == 0 { - return inrune - } - if len(inrune) != 0 { - inrune = append(inrune, '_') - } - inrune = append(inrune, segment...) - return inrune -} - -// UnderscoreToCamelCase converts from underscore separated form to camel case form. -// Ex.: my_func => MyFunc -func UnderscoreToCamelCase(s string) string { - return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) -} - -// CamelCaseToUnderscore converts from camel case form to underscore separated form. -// Ex.: MyFunc => my_func -func CamelCaseToUnderscore(str string) string { - var output []rune - var segment []rune - for _, r := range str { - - // not treat number as separate segment - if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { - output = addSegment(output, segment) - segment = nil - } - segment = append(segment, unicode.ToLower(r)) - } - output = addSegment(output, segment) - return string(output) -} - -// Reverse returns reversed string -func Reverse(s string) string { - r := []rune(s) - for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r) -} - -// GetLines splits string by "\n" and return array of lines -func GetLines(s string) []string { - return strings.Split(s, "\n") -} - -// GetLine returns specified line of multiline string -func GetLine(s string, index int) (string, error) { - lines := GetLines(s) - if index < 0 || index >= len(lines) { - return "", errors.New("line index out of bounds") - } - return lines[index], nil -} - -// RemoveTags removes all tags from HTML string -func RemoveTags(s string) string { - return ReplacePattern(s, "<[^>]*>", "") -} - -// SafeFileName returns safe string that can be used in file names -func SafeFileName(str string) string { - name := strings.ToLower(str) - name = path.Clean(path.Base(name)) - name = strings.Trim(name, " ") - separators, err := regexp.Compile(`[ &_=+:]`) - if err == nil { - name = separators.ReplaceAllString(name, "-") - } - legal, err := regexp.Compile(`[^[:alnum:]-.]`) - if err == nil { - name = legal.ReplaceAllString(name, "") - } - for strings.Contains(name, "--") { - name = strings.Replace(name, "--", "-", -1) - } - return name -} - -// NormalizeEmail canonicalize an email address. -// The local part of the email address is lowercased for all domains; the hostname is always lowercased and -// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). -// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and -// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are -// normalized to @gmail.com. -func NormalizeEmail(str string) (string, error) { - if !IsEmail(str) { - return "", fmt.Errorf("%s is not an email", str) - } - parts := strings.Split(str, "@") - parts[0] = strings.ToLower(parts[0]) - parts[1] = strings.ToLower(parts[1]) - if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { - parts[1] = "gmail.com" - parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] - } - return strings.Join(parts, "@"), nil -} - -// Truncate a string to the closest length without breaking words. -func Truncate(str string, length int, ending string) string { - var aftstr, befstr string - if len(str) > length { - words := strings.Fields(str) - before, present := 0, 0 - for i := range words { - befstr = aftstr - before = present - aftstr = aftstr + words[i] + " " - present = len(aftstr) - if present > length && i != 0 { - if (length - before) < (present - length) { - return Trim(befstr, " /\\.,\"'#!?&@+-") + ending - } - return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending - } - } - } - - return str -} - -// PadLeft pads left side of a string if size of string is less then indicated pad length -func PadLeft(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, false) -} - -// PadRight pads right side of a string if size of string is less then indicated pad length -func PadRight(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, false, true) -} - -// PadBoth pads both sides of a string if size of string is less then indicated pad length -func PadBoth(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, true) -} - -// PadString either left, right or both sides. -// Note that padding string can be unicode and more then one character -func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { - - // When padded length is less then the current string size - if padLen < utf8.RuneCountInString(str) { - return str - } - - padLen -= utf8.RuneCountInString(str) - - targetLen := padLen - - targetLenLeft := targetLen - targetLenRight := targetLen - if padLeft && padRight { - targetLenLeft = padLen / 2 - targetLenRight = padLen - targetLenLeft - } - - strToRepeatLen := utf8.RuneCountInString(padStr) - - repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) - repeatedString := strings.Repeat(padStr, repeatTimes) - - leftSide := "" - if padLeft { - leftSide = repeatedString[0:targetLenLeft] - } - - rightSide := "" - if padRight { - rightSide = repeatedString[0:targetLenRight] - } - - return leftSide + str + rightSide -} - -// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object -func TruncatingErrorf(str string, args ...interface{}) error { - n := strings.Count(str, "%s") - return fmt.Errorf(str, args[:n]...) -} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go deleted file mode 100644 index 46ecfc84a..000000000 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ /dev/null @@ -1,1769 +0,0 @@ -// Package govalidator is package of validators and sanitizers for strings, structs and collections. -package govalidator - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "net/url" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -var ( - fieldsRequiredByDefault bool - nilPtrAllowedByRequired = false - notNumberRegexp = regexp.MustCompile("[^0-9]+") - whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) - paramsRegexp = regexp.MustCompile(`\(.*\)$`) -) - -const maxURLRuneCount = 2083 -const minURLRuneCount = 3 -const rfc3339WithoutZone = "2006-01-02T15:04:05" - -// SetFieldsRequiredByDefault causes validation to fail when struct fields -// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). -// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -// type exampleStruct struct { -// Name string `` -// Email string `valid:"email"` -// This, however, will only fail when Email is empty or an invalid email address: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email"` -// Lastly, this will only fail when Email is an invalid email address but not when it's empty: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email,optional"` -func SetFieldsRequiredByDefault(value bool) { - fieldsRequiredByDefault = value -} - -// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. -// The validation will still reject ptr fields in their zero value state. Example with this enabled: -// type exampleStruct struct { -// Name *string `valid:"required"` -// With `Name` set to "", this will be considered invalid input and will cause a validation error. -// With `Name` set to nil, this will be considered valid by validation. -// By default this is disabled. -func SetNilPtrAllowedByRequired(value bool) { - nilPtrAllowedByRequired = value -} - -// IsEmail checks if the string is an email. -func IsEmail(str string) bool { - // TODO uppercase letters are not supported - return rxEmail.MatchString(str) -} - -// IsExistingEmail checks if the string is an email of existing domain -func IsExistingEmail(email string) bool { - - if len(email) < 6 || len(email) > 254 { - return false - } - at := strings.LastIndex(email, "@") - if at <= 0 || at > len(email)-3 { - return false - } - user := email[:at] - host := email[at+1:] - if len(user) > 64 { - return false - } - switch host { - case "localhost", "example.com": - return true - } - if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { - return false - } - if _, err := net.LookupMX(host); err != nil { - if _, err := net.LookupIP(host); err != nil { - return false - } - } - - return true -} - -// IsURL checks if the string is an URL. -func IsURL(str string) bool { - if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { - return false - } - strTemp := str - if strings.Contains(str, ":") && !strings.Contains(str, "://") { - // support no indicated urlscheme but with colon for port number - // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString - strTemp = "http://" + str - } - u, err := url.Parse(strTemp) - if err != nil { - return false - } - if strings.HasPrefix(u.Host, ".") { - return false - } - if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { - return false - } - return rxURL.MatchString(str) -} - -// IsRequestURL checks if the string rawurl, assuming -// it was received in an HTTP request, is a valid -// URL confirm to RFC 3986 -func IsRequestURL(rawurl string) bool { - url, err := url.ParseRequestURI(rawurl) - if err != nil { - return false //Couldn't even parse the rawurl - } - if len(url.Scheme) == 0 { - return false //No Scheme found - } - return true -} - -// IsRequestURI checks if the string rawurl, assuming -// it was received in an HTTP request, is an -// absolute URI or an absolute path. -func IsRequestURI(rawurl string) bool { - _, err := url.ParseRequestURI(rawurl) - return err == nil -} - -// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. -func IsAlpha(str string) bool { - if IsNull(str) { - return true - } - return rxAlpha.MatchString(str) -} - -//IsUTFLetter checks if the string contains only unicode letter characters. -//Similar to IsAlpha but for all languages. Empty string is valid. -func IsUTFLetter(str string) bool { - if IsNull(str) { - return true - } - - for _, c := range str { - if !unicode.IsLetter(c) { - return false - } - } - return true - -} - -// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. -func IsAlphanumeric(str string) bool { - if IsNull(str) { - return true - } - return rxAlphanumeric.MatchString(str) -} - -// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. -func IsUTFLetterNumeric(str string) bool { - if IsNull(str) { - return true - } - for _, c := range str { - if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok - return false - } - } - return true - -} - -// IsNumeric checks if the string contains only numbers. Empty string is valid. -func IsNumeric(str string) bool { - if IsNull(str) { - return true - } - return rxNumeric.MatchString(str) -} - -// IsUTFNumeric checks if the string contains only unicode numbers of any kind. -// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. -func IsUTFNumeric(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsNumber(c) { //numbers && minus sign are ok - return false - } - } - return true - -} - -// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. -func IsUTFDigit(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsDigit(c) { //digits && minus sign are ok - return false - } - } - return true - -} - -// IsHexadecimal checks if the string is a hexadecimal number. -func IsHexadecimal(str string) bool { - return rxHexadecimal.MatchString(str) -} - -// IsHexcolor checks if the string is a hexadecimal color. -func IsHexcolor(str string) bool { - return rxHexcolor.MatchString(str) -} - -// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). -func IsRGBcolor(str string) bool { - return rxRGBcolor.MatchString(str) -} - -// IsLowerCase checks if the string is lowercase. Empty string is valid. -func IsLowerCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToLower(str) -} - -// IsUpperCase checks if the string is uppercase. Empty string is valid. -func IsUpperCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToUpper(str) -} - -// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. -func HasLowerCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasLowerCase.MatchString(str) -} - -// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. -func HasUpperCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasUpperCase.MatchString(str) -} - -// IsInt checks if the string is an integer. Empty string is valid. -func IsInt(str string) bool { - if IsNull(str) { - return true - } - return rxInt.MatchString(str) -} - -// IsFloat checks if the string is a float. -func IsFloat(str string) bool { - return str != "" && rxFloat.MatchString(str) -} - -// IsDivisibleBy checks if the string is a number that's divisible by another. -// If second argument is not valid integer or zero, it's return false. -// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). -func IsDivisibleBy(str, num string) bool { - f, _ := ToFloat(str) - p := int64(f) - q, _ := ToInt(num) - if q == 0 { - return false - } - return (p == 0) || (p%q == 0) -} - -// IsNull checks if the string is null. -func IsNull(str string) bool { - return len(str) == 0 -} - -// IsNotNull checks if the string is not null. -func IsNotNull(str string) bool { - return !IsNull(str) -} - -// HasWhitespaceOnly checks the string only contains whitespace -func HasWhitespaceOnly(str string) bool { - return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) -} - -// HasWhitespace checks if the string contains any whitespace -func HasWhitespace(str string) bool { - return len(str) > 0 && rxHasWhitespace.MatchString(str) -} - -// IsByteLength checks if the string's length (in bytes) falls in a range. -func IsByteLength(str string, min, max int) bool { - return len(str) >= min && len(str) <= max -} - -// IsUUIDv3 checks if the string is a UUID version 3. -func IsUUIDv3(str string) bool { - return rxUUID3.MatchString(str) -} - -// IsUUIDv4 checks if the string is a UUID version 4. -func IsUUIDv4(str string) bool { - return rxUUID4.MatchString(str) -} - -// IsUUIDv5 checks if the string is a UUID version 5. -func IsUUIDv5(str string) bool { - return rxUUID5.MatchString(str) -} - -// IsUUID checks if the string is a UUID (version 3, 4 or 5). -func IsUUID(str string) bool { - return rxUUID.MatchString(str) -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var ulidDec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const ulidEncodedSize = 26 - -// IsULID checks if the string is a ULID. -// -// Implementation got from: -// https://github.com/oklog/ulid (Apache-2.0 License) -// -func IsULID(str string) bool { - // Check if a base32 encoded ULID is the right length. - if len(str) != ulidEncodedSize { - return false - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if ulidDec[str[0]] == 0xFF || - ulidDec[str[1]] == 0xFF || - ulidDec[str[2]] == 0xFF || - ulidDec[str[3]] == 0xFF || - ulidDec[str[4]] == 0xFF || - ulidDec[str[5]] == 0xFF || - ulidDec[str[6]] == 0xFF || - ulidDec[str[7]] == 0xFF || - ulidDec[str[8]] == 0xFF || - ulidDec[str[9]] == 0xFF || - ulidDec[str[10]] == 0xFF || - ulidDec[str[11]] == 0xFF || - ulidDec[str[12]] == 0xFF || - ulidDec[str[13]] == 0xFF || - ulidDec[str[14]] == 0xFF || - ulidDec[str[15]] == 0xFF || - ulidDec[str[16]] == 0xFF || - ulidDec[str[17]] == 0xFF || - ulidDec[str[18]] == 0xFF || - ulidDec[str[19]] == 0xFF || - ulidDec[str[20]] == 0xFF || - ulidDec[str[21]] == 0xFF || - ulidDec[str[22]] == 0xFF || - ulidDec[str[23]] == 0xFF || - ulidDec[str[24]] == 0xFF || - ulidDec[str[25]] == 0xFF { - return false - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if str[0] > '7' { - return false - } - return true -} - -// IsCreditCard checks if the string is a credit card. -func IsCreditCard(str string) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - if !rxCreditCard.MatchString(sanitized) { - return false - } - var sum int64 - var digit string - var tmpNum int64 - var shouldDouble bool - for i := len(sanitized) - 1; i >= 0; i-- { - digit = sanitized[i:(i + 1)] - tmpNum, _ = ToInt(digit) - if shouldDouble { - tmpNum *= 2 - if tmpNum >= 10 { - sum += (tmpNum % 10) + 1 - } else { - sum += tmpNum - } - } else { - sum += tmpNum - } - shouldDouble = !shouldDouble - } - - return sum%10 == 0 -} - -// IsISBN10 checks if the string is an ISBN version 10. -func IsISBN10(str string) bool { - return IsISBN(str, 10) -} - -// IsISBN13 checks if the string is an ISBN version 13. -func IsISBN13(str string) bool { - return IsISBN(str, 13) -} - -// IsISBN checks if the string is an ISBN (version 10 or 13). -// If version value is not equal to 10 or 13, it will be checks both variants. -func IsISBN(str string, version int) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - var checksum int32 - var i int32 - if version == 10 { - if !rxISBN10.MatchString(sanitized) { - return false - } - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(sanitized[i]-'0') - } - if sanitized[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(sanitized[9]-'0') - } - if checksum%11 == 0 { - return true - } - return false - } else if version == 13 { - if !rxISBN13.MatchString(sanitized) { - return false - } - factor := []int32{1, 3} - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(sanitized[i]-'0') - } - return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 - } - return IsISBN(str, 10) || IsISBN(str, 13) -} - -// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). -func IsJSON(str string) bool { - var js json.RawMessage - return json.Unmarshal([]byte(str), &js) == nil -} - -// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. -func IsMultibyte(str string) bool { - if IsNull(str) { - return true - } - return rxMultibyte.MatchString(str) -} - -// IsASCII checks if the string contains ASCII chars only. Empty string is valid. -func IsASCII(str string) bool { - if IsNull(str) { - return true - } - return rxASCII.MatchString(str) -} - -// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. -func IsPrintableASCII(str string) bool { - if IsNull(str) { - return true - } - return rxPrintableASCII.MatchString(str) -} - -// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. -func IsFullWidth(str string) bool { - if IsNull(str) { - return true - } - return rxFullWidth.MatchString(str) -} - -// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. -func IsHalfWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) -} - -// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. -func IsVariableWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) -} - -// IsBase64 checks if a string is base64 encoded. -func IsBase64(str string) bool { - return rxBase64.MatchString(str) -} - -// IsFilePath checks is a string is Win or Unix file path and returns it's type. -func IsFilePath(str string) (bool, int) { - if rxWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false, Win - } - return true, Win - } else if rxUnixPath.MatchString(str) { - return true, Unix - } - return false, Unknown -} - -//IsWinFilePath checks both relative & absolute paths in Windows -func IsWinFilePath(str string) bool { - if rxARWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false - } - return true - } - return false -} - -//IsUnixFilePath checks both relative & absolute paths in Unix -func IsUnixFilePath(str string) bool { - if rxARUnixPath.MatchString(str) { - return true - } - return false -} - -// IsDataURI checks if a string is base64 encoded data URI such as an image -func IsDataURI(str string) bool { - dataURI := strings.Split(str, ",") - if !rxDataURI.MatchString(dataURI[0]) { - return false - } - return IsBase64(dataURI[1]) -} - -// IsMagnetURI checks if a string is valid magnet URI -func IsMagnetURI(str string) bool { - return rxMagnetURI.MatchString(str) -} - -// IsISO3166Alpha2 checks if a string is valid two-letter country code -func IsISO3166Alpha2(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO3166Alpha3 checks if a string is valid three-letter country code -func IsISO3166Alpha3(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha3Code { - return true - } - } - return false -} - -// IsISO693Alpha2 checks if a string is valid two-letter language code -func IsISO693Alpha2(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO693Alpha3b checks if a string is valid three-letter language code -func IsISO693Alpha3b(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha3bCode { - return true - } - } - return false -} - -// IsDNSName will validate the given string as a DNS name -func IsDNSName(str string) bool { - if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { - // constraints already violated - return false - } - return !IsIP(str) && rxDNSName.MatchString(str) -} - -// IsHash checks if a string is a hash of type algorithm. -// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] -func IsHash(str string, algorithm string) bool { - var len string - algo := strings.ToLower(algorithm) - - if algo == "crc32" || algo == "crc32b" { - len = "8" - } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { - len = "32" - } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { - len = "40" - } else if algo == "tiger192" { - len = "48" - } else if algo == "sha3-224" { - len = "56" - } else if algo == "sha256" || algo == "sha3-256" { - len = "64" - } else if algo == "sha384" || algo == "sha3-384" { - len = "96" - } else if algo == "sha512" || algo == "sha3-512" { - len = "128" - } else { - return false - } - - return Matches(str, "^[a-f0-9]{"+len+"}$") -} - -// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` -func IsSHA3224(str string) bool { - return IsHash(str, "sha3-224") -} - -// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` -func IsSHA3256(str string) bool { - return IsHash(str, "sha3-256") -} - -// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` -func IsSHA3384(str string) bool { - return IsHash(str, "sha3-384") -} - -// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` -func IsSHA3512(str string) bool { - return IsHash(str, "sha3-512") -} - -// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` -func IsSHA512(str string) bool { - return IsHash(str, "sha512") -} - -// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` -func IsSHA384(str string) bool { - return IsHash(str, "sha384") -} - -// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` -func IsSHA256(str string) bool { - return IsHash(str, "sha256") -} - -// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` -func IsTiger192(str string) bool { - return IsHash(str, "tiger192") -} - -// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` -func IsTiger160(str string) bool { - return IsHash(str, "tiger160") -} - -// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` -func IsRipeMD160(str string) bool { - return IsHash(str, "ripemd160") -} - -// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` -func IsSHA1(str string) bool { - return IsHash(str, "sha1") -} - -// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` -func IsTiger128(str string) bool { - return IsHash(str, "tiger128") -} - -// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` -func IsRipeMD128(str string) bool { - return IsHash(str, "ripemd128") -} - -// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` -func IsCRC32(str string) bool { - return IsHash(str, "crc32") -} - -// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` -func IsCRC32b(str string) bool { - return IsHash(str, "crc32b") -} - -// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` -func IsMD5(str string) bool { - return IsHash(str, "md5") -} - -// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` -func IsMD4(str string) bool { - return IsHash(str, "md4") -} - -// IsDialString validates the given string for usage with the various Dial() functions -func IsDialString(str string) bool { - if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { - return true - } - - return false -} - -// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` -func IsIP(str string) bool { - return net.ParseIP(str) != nil -} - -// IsPort checks if a string represents a valid port -func IsPort(str string) bool { - if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { - return true - } - return false -} - -// IsIPv4 checks if the string is an IP version 4. -func IsIPv4(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ".") -} - -// IsIPv6 checks if the string is an IP version 6. -func IsIPv6(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ":") -} - -// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) -func IsCIDR(str string) bool { - _, _, err := net.ParseCIDR(str) - return err == nil -} - -// IsMAC checks if a string is valid MAC address. -// Possible MAC formats: -// 01:23:45:67:89:ab -// 01:23:45:67:89:ab:cd:ef -// 01-23-45-67-89-ab -// 01-23-45-67-89-ab-cd-ef -// 0123.4567.89ab -// 0123.4567.89ab.cdef -func IsMAC(str string) bool { - _, err := net.ParseMAC(str) - return err == nil -} - -// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name -func IsHost(str string) bool { - return IsIP(str) || IsDNSName(str) -} - -// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. -func IsMongoID(str string) bool { - return rxHexadecimal.MatchString(str) && (len(str) == 24) -} - -// IsLatitude checks if a string is valid latitude. -func IsLatitude(str string) bool { - return rxLatitude.MatchString(str) -} - -// IsLongitude checks if a string is valid longitude. -func IsLongitude(str string) bool { - return rxLongitude.MatchString(str) -} - -// IsIMEI checks if a string is valid IMEI -func IsIMEI(str string) bool { - return rxIMEI.MatchString(str) -} - -// IsIMSI checks if a string is valid IMSI -func IsIMSI(str string) bool { - if !rxIMSI.MatchString(str) { - return false - } - - mcc, err := strconv.ParseInt(str[0:3], 10, 32) - if err != nil { - return false - } - - switch mcc { - case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: - case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: - case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: - case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: - case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: - case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: - case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: - case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: - case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: - case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: - case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: - case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: - case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: - case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: - case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: - case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: - case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: - case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: - case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: - case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: - case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: - case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: - case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: - case 738, 740, 742, 744, 746, 748, 750, 995: - return true - default: - return false - } - return true -} - -// IsRsaPublicKey checks if a string is valid public key with provided length -func IsRsaPublicKey(str string, keylen int) bool { - bb := bytes.NewBufferString(str) - pemBytes, err := ioutil.ReadAll(bb) - if err != nil { - return false - } - block, _ := pem.Decode(pemBytes) - if block != nil && block.Type != "PUBLIC KEY" { - return false - } - var der []byte - - if block != nil { - der = block.Bytes - } else { - der, err = base64.StdEncoding.DecodeString(str) - if err != nil { - return false - } - } - - key, err := x509.ParsePKIXPublicKey(der) - if err != nil { - return false - } - pubkey, ok := key.(*rsa.PublicKey) - if !ok { - return false - } - bitlen := len(pubkey.N.Bytes()) * 8 - return bitlen == int(keylen) -} - -// IsRegex checks if a give string is a valid regex with RE2 syntax or not -func IsRegex(str string) bool { - if _, err := regexp.Compile(str); err == nil { - return true - } - return false -} - -func toJSONName(tag string) string { - if tag == "" { - return "" - } - - // JSON name always comes first. If there's no options then split[0] is - // JSON name, if JSON name is not set, then split[0] is an empty string. - split := strings.SplitN(tag, ",", 2) - - name := split[0] - - // However it is possible that the field is skipped when - // (de-)serializing from/to JSON, in which case assume that there is no - // tag name to use - if name == "-" { - return "" - } - return name -} - -func prependPathToErrors(err error, path string) error { - switch err2 := err.(type) { - case Error: - err2.Path = append([]string{path}, err2.Path...) - return err2 - case Errors: - errors := err2.Errors() - for i, err3 := range errors { - errors[i] = prependPathToErrors(err3, path) - } - return err2 - } - return err -} - -// ValidateArray performs validation according to condition iterator that validates every element of the array -func ValidateArray(array []interface{}, iterator ConditionIterator) bool { - return Every(array, iterator) -} - -// ValidateMap use validation map for fields. -// result will be equal to `false` if there are any errors. -// s is the map containing the data to be validated. -// m is the validation map in the form: -// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} -func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - var errs Errors - var index int - val := reflect.ValueOf(s) - for key, value := range s { - presentResult := true - validator, ok := m[key] - if !ok { - presentResult = false - var err error - err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - valueField := reflect.ValueOf(value) - mapResult := true - typeResult := true - structResult := true - resultField := true - switch subValidator := validator.(type) { - case map[string]interface{}: - var err error - if v, ok := value.(map[string]interface{}); !ok { - mapResult = false - err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } else { - mapResult, err = ValidateMap(v, subValidator) - if err != nil { - mapResult = false - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - case string: - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - subValidator != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - resultField, err = typeCheck(valueField, reflect.StructField{ - Name: key, - PkgPath: "", - Type: val.Type(), - Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), - Offset: 0, - Index: []int{index}, - Anonymous: false, - }, val, nil) - if err != nil { - errs = append(errs, err) - } - case nil: - // already handlerd when checked before - default: - typeResult = false - err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - result = result && presentResult && typeResult && resultField && structResult && mapResult - index++ - } - // checks required keys - requiredResult := true - for key, value := range m { - if schema, ok := value.(string); ok { - tags := parseTagIntoMap(schema) - if required, ok := tags["required"]; ok { - if _, ok := s[key]; !ok { - requiredResult = false - if required.customErrorMessage != "" { - err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} - } else { - err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} - } - errs = append(errs, err) - } - } - } - } - - if len(errs) > 0 { - err = errs - } - return result && requiredResult, err -} - -// ValidateStruct use tags for fields. -// result will be equal to `false` if there are any errors. -// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) -func ValidateStruct(s interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - val := reflect.ValueOf(s) - if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { - val = val.Elem() - } - // we only accept structs - if val.Kind() != reflect.Struct { - return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) - } - var errs Errors - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - typeField := val.Type().Field(i) - if typeField.PkgPath != "" { - continue // Private field - } - structResult := true - if valueField.Kind() == reflect.Interface { - valueField = valueField.Elem() - } - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - typeField.Tag.Get(tagName) != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, typeField.Name) - errs = append(errs, err) - } - } - resultField, err2 := typeCheck(valueField, typeField, val, nil) - if err2 != nil { - - // Replace structure name with JSON name if there is a tag on the variable - jsonTag := toJSONName(typeField.Tag.Get("json")) - if jsonTag != "" { - switch jsonError := err2.(type) { - case Error: - jsonError.Name = jsonTag - err2 = jsonError - case Errors: - for i2, err3 := range jsonError { - switch customErr := err3.(type) { - case Error: - customErr.Name = jsonTag - jsonError[i2] = customErr - } - } - - err2 = jsonError - } - } - - errs = append(errs, err2) - } - result = result && resultField && structResult - } - if len(errs) > 0 { - err = errs - } - return result, err -} - -// ValidateStructAsync performs async validation of the struct and returns results through the channels -func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateStruct(s) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// ValidateMapAsync performs async validation of the map and returns results through the channels -func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateMap(s, m) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} -func parseTagIntoMap(tag string) tagOptionsMap { - optionsMap := make(tagOptionsMap) - options := strings.Split(tag, ",") - - for i, option := range options { - option = strings.TrimSpace(option) - - validationOptions := strings.Split(option, "~") - if !isValidTag(validationOptions[0]) { - continue - } - if len(validationOptions) == 2 { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} - } else { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} - } - } - return optionsMap -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -// IsSSN will validate the given string as a U.S. Social Security Number -func IsSSN(str string) bool { - if str == "" || len(str) != 11 { - return false - } - return rxSSN.MatchString(str) -} - -// IsSemver checks if string is valid semantic version -func IsSemver(str string) bool { - return rxSemver.MatchString(str) -} - -// IsType checks if interface is of some type -func IsType(v interface{}, params ...string) bool { - if len(params) == 1 { - typ := params[0] - return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) - } - return false -} - -// IsTime checks if string is valid according to given format -func IsTime(str string, format string) bool { - _, err := time.Parse(format, str) - return err == nil -} - -// IsUnixTime checks if string is valid unix timestamp value -func IsUnixTime(str string) bool { - if _, err := strconv.Atoi(str); err == nil { - return true - } - return false -} - -// IsRFC3339 checks if string is valid timestamp value according to RFC3339 -func IsRFC3339(str string) bool { - return IsTime(str, time.RFC3339) -} - -// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. -func IsRFC3339WithoutZone(str string) bool { - return IsTime(str, rfc3339WithoutZone) -} - -// IsISO4217 checks if string is valid ISO currency code -func IsISO4217(str string) bool { - for _, currency := range ISO4217List { - if str == currency { - return true - } - } - - return false -} - -// ByteLength checks string's length -func ByteLength(str string, params ...string) bool { - if len(params) == 2 { - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return len(str) >= int(min) && len(str) <= int(max) - } - - return false -} - -// RuneLength checks string's length -// Alias for StringLength -func RuneLength(str string, params ...string) bool { - return StringLength(str, params...) -} - -// IsRsaPub checks whether string is valid RSA key -// Alias for IsRsaPublicKey -func IsRsaPub(str string, params ...string) bool { - if len(params) == 1 { - len, _ := ToInt(params[0]) - return IsRsaPublicKey(str, int(len)) - } - - return false -} - -// StringMatches checks if a string matches a given pattern. -func StringMatches(s string, params ...string) bool { - if len(params) == 1 { - pattern := params[0] - return Matches(s, pattern) - } - return false -} - -// StringLength checks string's length (including multi byte strings) -func StringLength(str string, params ...string) bool { - - if len(params) == 2 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return strLength >= int(min) && strLength <= int(max) - } - - return false -} - -// MinStringLength checks string's minimum length (including multi byte strings) -func MinStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - return strLength >= int(min) - } - - return false -} - -// MaxStringLength checks string's maximum length (including multi byte strings) -func MaxStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - max, _ := ToInt(params[0]) - return strLength <= int(max) - } - - return false -} - -// Range checks string's length -func Range(str string, params ...string) bool { - if len(params) == 2 { - value, _ := ToFloat(str) - min, _ := ToFloat(params[0]) - max, _ := ToFloat(params[1]) - return InRange(value, min, max) - } - - return false -} - -// IsInRaw checks if string is in list of allowed values -func IsInRaw(str string, params ...string) bool { - if len(params) == 1 { - rawParams := params[0] - - parsedParams := strings.Split(rawParams, "|") - - return IsIn(str, parsedParams...) - } - - return false -} - -// IsIn checks if string str is a member of the set of strings params -func IsIn(str string, params ...string) bool { - for _, param := range params { - if str == param { - return true - } - } - - return false -} - -func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { - if nilPtrAllowedByRequired { - k := v.Kind() - if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { - return true, nil - } - } - - if requiredOption, isRequired := options["required"]; isRequired { - if len(requiredOption.customErrorMessage) > 0 { - return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} - } - return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} - } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} - } - // not required and empty is valid - return true, nil -} - -func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { - if !v.IsValid() { - return false, nil - } - - tag := t.Tag.Get(tagName) - - // checks if the field should be ignored - switch tag { - case "": - if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { - if !fieldsRequiredByDefault { - return true, nil - } - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} - } - case "-": - return true, nil - } - - isRootType := false - if options == nil { - isRootType = true - options = parseTagIntoMap(tag) - } - - if isEmptyValue(v) { - // an empty value is not validated, checks only required - isValid, resultErr = checkRequired(v, t, options) - for key := range options { - delete(options, key) - } - return isValid, resultErr - } - - var customTypeErrors Errors - optionsOrder := options.orderedKeys() - for _, validatorName := range optionsOrder { - validatorStruct := options[validatorName] - if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { - delete(options, validatorName) - - if result := validatefunc(v.Interface(), o.Interface()); !result { - if len(validatorStruct.customErrorMessage) > 0 { - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) - continue - } - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) - } - } - } - - if len(customTypeErrors.Errors()) > 0 { - return false, customTypeErrors - } - - if isRootType { - // Ensure that we've checked the value by all specified validators before report that the value is valid - defer func() { - delete(options, "optional") - delete(options, "required") - - if isValid && resultErr == nil && len(options) != 0 { - optionsOrder := options.orderedKeys() - for _, validator := range optionsOrder { - isValid = false - resultErr = Error{t.Name, fmt.Errorf( - "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} - return - } - } - }() - } - - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for interface param validators - for key, value := range InterfaceParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := InterfaceParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - field := fmt.Sprint(v) - if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - } - } - - switch v.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, - reflect.String: - // for each tag option checks the map of validator functions - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for param validators - for key, value := range ParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := ParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - // type not yet supported, fail - return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} - } - } - - if validatefunc, ok := TagMap[validator]; ok { - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field); !result && !negate || result && negate { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - //Not Yet Supported Types (Fail here!) - err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) - return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} - } - } - } - return true, nil - case reflect.Map: - if v.Type().Key().Kind() != reflect.String { - return false, &UnsupportedTypeError{v.Type()} - } - var sv stringValues - sv = v.MapKeys() - sort.Sort(sv) - result := true - for i, k := range sv { - var resultItem bool - var err error - if v.MapIndex(k).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.MapIndex(k), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Slice, reflect.Array: - result := true - for i := 0; i < v.Len(); i++ { - var resultItem bool - var err error - if v.Index(i).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.Index(i), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.Index(i).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Interface: - // If the value is an interface then encode its element - if v.IsNil() { - return true, nil - } - return ValidateStruct(v.Interface()) - case reflect.Ptr: - // If the value is a pointer then checks its element - if v.IsNil() { - return true, nil - } - return typeCheck(v.Elem(), t, o, options) - case reflect.Struct: - return true, nil - default: - return false, &UnsupportedTypeError{v.Type()} - } -} - -func stripParams(validatorString string) string { - return paramsRegexp.ReplaceAllString(validatorString, "") -} - -// isEmptyValue checks whether value empty or not -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.String, reflect.Array: - return v.Len() == 0 - case reflect.Map, reflect.Slice: - return v.Len() == 0 || v.IsNil() - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) -} - -// ErrorByField returns error for specified field of the struct -// validated by ValidateStruct or empty string if there are no errors -// or this field doesn't exists or doesn't have any errors. -func ErrorByField(e error, field string) string { - if e == nil { - return "" - } - return ErrorsByField(e)[field] -} - -// ErrorsByField returns map of errors of the struct validated -// by ValidateStruct or empty map if there are no errors. -func ErrorsByField(e error) map[string]string { - m := make(map[string]string) - if e == nil { - return m - } - // prototype for ValidateStruct - - switch e := e.(type) { - case Error: - m[e.Name] = e.Err.Error() - case Errors: - for _, item := range e.Errors() { - n := ErrorsByField(item) - for k, v := range n { - m[k] = v - } - } - } - - return m -} - -// Error returns string equivalent for reflect.Type -func (e *UnsupportedTypeError) Error() string { - return "validator: unsupported type: " + e.Type.String() -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } -func (sv stringValues) get(i int) string { return sv[i].String() } - -func IsE164(str string) bool { - return rxE164.MatchString(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml deleted file mode 100644 index bc5f7b086..000000000 --- a/vendor/github.com/asaskevich/govalidator/wercker.yml +++ /dev/null @@ -1,15 +0,0 @@ -box: golang -build: - steps: - - setup-go-workspace - - - script: - name: go get - code: | - go version - go get -t ./... - - - script: - name: go test - code: | - go test -race -v ./... diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go deleted file mode 100644 index a64a692d5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ /dev/null @@ -1,9456 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sns - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -const opAddPermission = "AddPermission" - -// AddPermissionRequest generates a "aws/request.Request" representing the -// client's request for the AddPermission operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AddPermission for more information on using the AddPermission -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AddPermissionRequest method. -// req, resp := client.AddPermissionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/AddPermission -func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { - op := &request.Operation{ - Name: opAddPermission, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AddPermissionInput{} - } - - output = &AddPermissionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// AddPermission API operation for Amazon Simple Notification Service. -// -// Adds a statement to a topic's access control policy, granting access for -// the specified Amazon Web Services accounts to the specified actions. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation AddPermission for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/AddPermission -func (c *SNS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { - req, out := c.AddPermissionRequest(input) - return out, req.Send() -} - -// AddPermissionWithContext is the same as AddPermission with the addition of -// the ability to pass a context and additional request options. -// -// See AddPermission for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) AddPermissionWithContext(ctx aws.Context, input *AddPermissionInput, opts ...request.Option) (*AddPermissionOutput, error) { - req, out := c.AddPermissionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCheckIfPhoneNumberIsOptedOut = "CheckIfPhoneNumberIsOptedOut" - -// CheckIfPhoneNumberIsOptedOutRequest generates a "aws/request.Request" representing the -// client's request for the CheckIfPhoneNumberIsOptedOut operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CheckIfPhoneNumberIsOptedOut for more information on using the CheckIfPhoneNumberIsOptedOut -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CheckIfPhoneNumberIsOptedOutRequest method. -// req, resp := client.CheckIfPhoneNumberIsOptedOutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CheckIfPhoneNumberIsOptedOut -func (c *SNS) CheckIfPhoneNumberIsOptedOutRequest(input *CheckIfPhoneNumberIsOptedOutInput) (req *request.Request, output *CheckIfPhoneNumberIsOptedOutOutput) { - op := &request.Operation{ - Name: opCheckIfPhoneNumberIsOptedOut, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CheckIfPhoneNumberIsOptedOutInput{} - } - - output = &CheckIfPhoneNumberIsOptedOutOutput{} - req = c.newRequest(op, input, output) - return -} - -// CheckIfPhoneNumberIsOptedOut API operation for Amazon Simple Notification Service. -// -// Accepts a phone number and indicates whether the phone holder has opted out -// of receiving SMS messages from your Amazon Web Services account. You cannot -// send SMS messages to a number that is opted out. -// -// To resume sending messages, you can opt in the number by using the OptInPhoneNumber -// action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation CheckIfPhoneNumberIsOptedOut for usage and error information. -// -// Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CheckIfPhoneNumberIsOptedOut -func (c *SNS) CheckIfPhoneNumberIsOptedOut(input *CheckIfPhoneNumberIsOptedOutInput) (*CheckIfPhoneNumberIsOptedOutOutput, error) { - req, out := c.CheckIfPhoneNumberIsOptedOutRequest(input) - return out, req.Send() -} - -// CheckIfPhoneNumberIsOptedOutWithContext is the same as CheckIfPhoneNumberIsOptedOut with the addition of -// the ability to pass a context and additional request options. -// -// See CheckIfPhoneNumberIsOptedOut for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) CheckIfPhoneNumberIsOptedOutWithContext(ctx aws.Context, input *CheckIfPhoneNumberIsOptedOutInput, opts ...request.Option) (*CheckIfPhoneNumberIsOptedOutOutput, error) { - req, out := c.CheckIfPhoneNumberIsOptedOutRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opConfirmSubscription = "ConfirmSubscription" - -// ConfirmSubscriptionRequest generates a "aws/request.Request" representing the -// client's request for the ConfirmSubscription operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ConfirmSubscription for more information on using the ConfirmSubscription -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ConfirmSubscriptionRequest method. -// req, resp := client.ConfirmSubscriptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ConfirmSubscription -func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req *request.Request, output *ConfirmSubscriptionOutput) { - op := &request.Operation{ - Name: opConfirmSubscription, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ConfirmSubscriptionInput{} - } - - output = &ConfirmSubscriptionOutput{} - req = c.newRequest(op, input, output) - return -} - -// ConfirmSubscription API operation for Amazon Simple Notification Service. -// -// Verifies an endpoint owner's intent to receive messages by validating the -// token sent to the endpoint by an earlier Subscribe action. If the token is -// valid, the action creates a new subscription and returns its Amazon Resource -// Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe -// flag is set to "true". -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ConfirmSubscription for usage and error information. -// -// Returned Error Codes: -// * ErrCodeSubscriptionLimitExceededException "SubscriptionLimitExceeded" -// Indicates that the customer already owns the maximum allowed number of subscriptions. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" -// Indicates that the number of filter polices in your Amazon Web Services account -// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit -// Increase case in the Amazon Web Services Support Center. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ConfirmSubscription -func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubscriptionOutput, error) { - req, out := c.ConfirmSubscriptionRequest(input) - return out, req.Send() -} - -// ConfirmSubscriptionWithContext is the same as ConfirmSubscription with the addition of -// the ability to pass a context and additional request options. -// -// See ConfirmSubscription for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ConfirmSubscriptionWithContext(ctx aws.Context, input *ConfirmSubscriptionInput, opts ...request.Option) (*ConfirmSubscriptionOutput, error) { - req, out := c.ConfirmSubscriptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePlatformApplication = "CreatePlatformApplication" - -// CreatePlatformApplicationRequest generates a "aws/request.Request" representing the -// client's request for the CreatePlatformApplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePlatformApplication for more information on using the CreatePlatformApplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreatePlatformApplicationRequest method. -// req, resp := client.CreatePlatformApplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformApplication -func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationInput) (req *request.Request, output *CreatePlatformApplicationOutput) { - op := &request.Operation{ - Name: opCreatePlatformApplication, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePlatformApplicationInput{} - } - - output = &CreatePlatformApplicationOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePlatformApplication API operation for Amazon Simple Notification Service. -// -// Creates a platform application object for one of the supported push notification -// services, such as APNS and GCM (Firebase Cloud Messaging), to which devices -// and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential -// attributes when using the CreatePlatformApplication action. -// -// PlatformPrincipal and PlatformCredential are received from the notification -// service. -// -// * For ADM, PlatformPrincipal is client id and PlatformCredential is client -// secret. -// -// * For Baidu, PlatformPrincipal is API key and PlatformCredential is secret -// key. -// -// * For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal -// is SSL certificate and PlatformCredential is private key. -// -// * For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal -// is signing key ID and PlatformCredential is signing key. -// -// * For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and -// the PlatformCredential is API key. -// -// * For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential -// is private key. -// -// * For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential -// is secret key. -// -// You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint -// action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation CreatePlatformApplication for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformApplication -func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) (*CreatePlatformApplicationOutput, error) { - req, out := c.CreatePlatformApplicationRequest(input) - return out, req.Send() -} - -// CreatePlatformApplicationWithContext is the same as CreatePlatformApplication with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePlatformApplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) CreatePlatformApplicationWithContext(ctx aws.Context, input *CreatePlatformApplicationInput, opts ...request.Option) (*CreatePlatformApplicationOutput, error) { - req, out := c.CreatePlatformApplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePlatformEndpoint = "CreatePlatformEndpoint" - -// CreatePlatformEndpointRequest generates a "aws/request.Request" representing the -// client's request for the CreatePlatformEndpoint operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePlatformEndpoint for more information on using the CreatePlatformEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreatePlatformEndpointRequest method. -// req, resp := client.CreatePlatformEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformEndpoint -func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) (req *request.Request, output *CreatePlatformEndpointOutput) { - op := &request.Operation{ - Name: opCreatePlatformEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePlatformEndpointInput{} - } - - output = &CreatePlatformEndpointOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePlatformEndpoint API operation for Amazon Simple Notification Service. -// -// Creates an endpoint for a device and mobile app on one of the supported push -// notification services, such as GCM (Firebase Cloud Messaging) and APNS. CreatePlatformEndpoint -// requires the PlatformApplicationArn that is returned from CreatePlatformApplication. -// You can use the returned EndpointArn to send a message to a mobile app or -// by the Subscribe action for subscription to a topic. The CreatePlatformEndpoint -// action is idempotent, so if the requester already owns an endpoint with the -// same device token and attributes, that endpoint's ARN is returned without -// creating a new endpoint. For more information, see Using Amazon SNS Mobile -// Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// When using CreatePlatformEndpoint with Baidu, two attributes must be provided: -// ChannelId and UserId. The token field must also contain the ChannelId. For -// more information, see Creating an Amazon SNS Endpoint for Baidu (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePushBaiduEndpoint.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation CreatePlatformEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformEndpoint -func (c *SNS) CreatePlatformEndpoint(input *CreatePlatformEndpointInput) (*CreatePlatformEndpointOutput, error) { - req, out := c.CreatePlatformEndpointRequest(input) - return out, req.Send() -} - -// CreatePlatformEndpointWithContext is the same as CreatePlatformEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePlatformEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) CreatePlatformEndpointWithContext(ctx aws.Context, input *CreatePlatformEndpointInput, opts ...request.Option) (*CreatePlatformEndpointOutput, error) { - req, out := c.CreatePlatformEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateSMSSandboxPhoneNumber = "CreateSMSSandboxPhoneNumber" - -// CreateSMSSandboxPhoneNumberRequest generates a "aws/request.Request" representing the -// client's request for the CreateSMSSandboxPhoneNumber operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateSMSSandboxPhoneNumber for more information on using the CreateSMSSandboxPhoneNumber -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateSMSSandboxPhoneNumberRequest method. -// req, resp := client.CreateSMSSandboxPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateSMSSandboxPhoneNumber -func (c *SNS) CreateSMSSandboxPhoneNumberRequest(input *CreateSMSSandboxPhoneNumberInput) (req *request.Request, output *CreateSMSSandboxPhoneNumberOutput) { - op := &request.Operation{ - Name: opCreateSMSSandboxPhoneNumber, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateSMSSandboxPhoneNumberInput{} - } - - output = &CreateSMSSandboxPhoneNumberOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CreateSMSSandboxPhoneNumber API operation for Amazon Simple Notification Service. -// -// Adds a destination phone number to an Amazon Web Services account in the -// SMS sandbox and sends a one-time password (OTP) to that phone number. -// -// When you start using Amazon SNS to send SMS messages, your Amazon Web Services -// account is in the SMS sandbox. The SMS sandbox provides a safe environment -// for you to try Amazon SNS features without risking your reputation as an -// SMS sender. While your Amazon Web Services account is in the SMS sandbox, -// you can use all of the features of Amazon SNS. However, you can send SMS -// messages only to verified destination phone numbers. For more information, -// including how to move out of the sandbox to send messages without restrictions, -// see SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox.html) -// in the Amazon SNS Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation CreateSMSSandboxPhoneNumber for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeOptedOutException "OptedOut" -// Indicates that the specified phone number opted out of receiving SMS messages -// from your Amazon Web Services account. You can't send SMS messages to phone -// numbers that opt out. -// -// * ErrCodeUserErrorException "UserError" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateSMSSandboxPhoneNumber -func (c *SNS) CreateSMSSandboxPhoneNumber(input *CreateSMSSandboxPhoneNumberInput) (*CreateSMSSandboxPhoneNumberOutput, error) { - req, out := c.CreateSMSSandboxPhoneNumberRequest(input) - return out, req.Send() -} - -// CreateSMSSandboxPhoneNumberWithContext is the same as CreateSMSSandboxPhoneNumber with the addition of -// the ability to pass a context and additional request options. -// -// See CreateSMSSandboxPhoneNumber for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) CreateSMSSandboxPhoneNumberWithContext(ctx aws.Context, input *CreateSMSSandboxPhoneNumberInput, opts ...request.Option) (*CreateSMSSandboxPhoneNumberOutput, error) { - req, out := c.CreateSMSSandboxPhoneNumberRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateTopic = "CreateTopic" - -// CreateTopicRequest generates a "aws/request.Request" representing the -// client's request for the CreateTopic operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateTopic for more information on using the CreateTopic -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateTopicRequest method. -// req, resp := client.CreateTopicRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateTopic -func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, output *CreateTopicOutput) { - op := &request.Operation{ - Name: opCreateTopic, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateTopicInput{} - } - - output = &CreateTopicOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateTopic API operation for Amazon Simple Notification Service. -// -// Creates a topic to which notifications can be published. Users can create -// at most 100,000 standard topics (at most 1,000 FIFO topics). For more information, -// see Creating an Amazon SNS topic (https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html) -// in the Amazon SNS Developer Guide. This action is idempotent, so if the requester -// already owns a topic with the specified name, that topic's ARN is returned -// without creating a new topic. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation CreateTopic for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeTopicLimitExceededException "TopicLimitExceeded" -// Indicates that the customer already owns the maximum allowed number of topics. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. -// -// * ErrCodeTagLimitExceededException "TagLimitExceeded" -// Can't add more than 50 tags to a topic. -// -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. -// -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. -// -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateTopic -func (c *SNS) CreateTopic(input *CreateTopicInput) (*CreateTopicOutput, error) { - req, out := c.CreateTopicRequest(input) - return out, req.Send() -} - -// CreateTopicWithContext is the same as CreateTopic with the addition of -// the ability to pass a context and additional request options. -// -// See CreateTopic for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) CreateTopicWithContext(ctx aws.Context, input *CreateTopicInput, opts ...request.Option) (*CreateTopicOutput, error) { - req, out := c.CreateTopicRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteEndpoint = "DeleteEndpoint" - -// DeleteEndpointRequest generates a "aws/request.Request" representing the -// client's request for the DeleteEndpoint operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteEndpoint for more information on using the DeleteEndpoint -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteEndpointRequest method. -// req, resp := client.DeleteEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteEndpoint -func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { - op := &request.Operation{ - Name: opDeleteEndpoint, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteEndpointInput{} - } - - output = &DeleteEndpointOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteEndpoint API operation for Amazon Simple Notification Service. -// -// Deletes the endpoint for a device and mobile app from Amazon SNS. This action -// is idempotent. For more information, see Using Amazon SNS Mobile Push Notifications -// (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// When you delete an endpoint that is also subscribed to a topic, then you -// must also unsubscribe the endpoint from the topic. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation DeleteEndpoint for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteEndpoint -func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { - req, out := c.DeleteEndpointRequest(input) - return out, req.Send() -} - -// DeleteEndpointWithContext is the same as DeleteEndpoint with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteEndpoint for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) DeleteEndpointWithContext(ctx aws.Context, input *DeleteEndpointInput, opts ...request.Option) (*DeleteEndpointOutput, error) { - req, out := c.DeleteEndpointRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeletePlatformApplication = "DeletePlatformApplication" - -// DeletePlatformApplicationRequest generates a "aws/request.Request" representing the -// client's request for the DeletePlatformApplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeletePlatformApplication for more information on using the DeletePlatformApplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeletePlatformApplicationRequest method. -// req, resp := client.DeletePlatformApplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeletePlatformApplication -func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationInput) (req *request.Request, output *DeletePlatformApplicationOutput) { - op := &request.Operation{ - Name: opDeletePlatformApplication, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeletePlatformApplicationInput{} - } - - output = &DeletePlatformApplicationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeletePlatformApplication API operation for Amazon Simple Notification Service. -// -// Deletes a platform application object for one of the supported push notification -// services, such as APNS and GCM (Firebase Cloud Messaging). For more information, -// see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation DeletePlatformApplication for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeletePlatformApplication -func (c *SNS) DeletePlatformApplication(input *DeletePlatformApplicationInput) (*DeletePlatformApplicationOutput, error) { - req, out := c.DeletePlatformApplicationRequest(input) - return out, req.Send() -} - -// DeletePlatformApplicationWithContext is the same as DeletePlatformApplication with the addition of -// the ability to pass a context and additional request options. -// -// See DeletePlatformApplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) DeletePlatformApplicationWithContext(ctx aws.Context, input *DeletePlatformApplicationInput, opts ...request.Option) (*DeletePlatformApplicationOutput, error) { - req, out := c.DeletePlatformApplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteSMSSandboxPhoneNumber = "DeleteSMSSandboxPhoneNumber" - -// DeleteSMSSandboxPhoneNumberRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSMSSandboxPhoneNumber operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteSMSSandboxPhoneNumber for more information on using the DeleteSMSSandboxPhoneNumber -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteSMSSandboxPhoneNumberRequest method. -// req, resp := client.DeleteSMSSandboxPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteSMSSandboxPhoneNumber -func (c *SNS) DeleteSMSSandboxPhoneNumberRequest(input *DeleteSMSSandboxPhoneNumberInput) (req *request.Request, output *DeleteSMSSandboxPhoneNumberOutput) { - op := &request.Operation{ - Name: opDeleteSMSSandboxPhoneNumber, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteSMSSandboxPhoneNumberInput{} - } - - output = &DeleteSMSSandboxPhoneNumberOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteSMSSandboxPhoneNumber API operation for Amazon Simple Notification Service. -// -// Deletes an Amazon Web Services account's verified or pending phone number -// from the SMS sandbox. -// -// When you start using Amazon SNS to send SMS messages, your Amazon Web Services -// account is in the SMS sandbox. The SMS sandbox provides a safe environment -// for you to try Amazon SNS features without risking your reputation as an -// SMS sender. While your Amazon Web Services account is in the SMS sandbox, -// you can use all of the features of Amazon SNS. However, you can send SMS -// messages only to verified destination phone numbers. For more information, -// including how to move out of the sandbox to send messages without restrictions, -// see SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox.html) -// in the Amazon SNS Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation DeleteSMSSandboxPhoneNumber for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. -// -// * ErrCodeUserErrorException "UserError" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteSMSSandboxPhoneNumber -func (c *SNS) DeleteSMSSandboxPhoneNumber(input *DeleteSMSSandboxPhoneNumberInput) (*DeleteSMSSandboxPhoneNumberOutput, error) { - req, out := c.DeleteSMSSandboxPhoneNumberRequest(input) - return out, req.Send() -} - -// DeleteSMSSandboxPhoneNumberWithContext is the same as DeleteSMSSandboxPhoneNumber with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteSMSSandboxPhoneNumber for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) DeleteSMSSandboxPhoneNumberWithContext(ctx aws.Context, input *DeleteSMSSandboxPhoneNumberInput, opts ...request.Option) (*DeleteSMSSandboxPhoneNumberOutput, error) { - req, out := c.DeleteSMSSandboxPhoneNumberRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteTopic = "DeleteTopic" - -// DeleteTopicRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTopic operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteTopic for more information on using the DeleteTopic -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteTopicRequest method. -// req, resp := client.DeleteTopicRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteTopic -func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, output *DeleteTopicOutput) { - op := &request.Operation{ - Name: opDeleteTopic, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteTopicInput{} - } - - output = &DeleteTopicOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteTopic API operation for Amazon Simple Notification Service. -// -// Deletes a topic and all its subscriptions. Deleting a topic might prevent -// some messages previously sent to the topic from being delivered to subscribers. -// This action is idempotent, so deleting a topic that does not exist does not -// result in an error. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation DeleteTopic for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. -// -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. -// -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteTopic -func (c *SNS) DeleteTopic(input *DeleteTopicInput) (*DeleteTopicOutput, error) { - req, out := c.DeleteTopicRequest(input) - return out, req.Send() -} - -// DeleteTopicWithContext is the same as DeleteTopic with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteTopic for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) DeleteTopicWithContext(ctx aws.Context, input *DeleteTopicInput, opts ...request.Option) (*DeleteTopicOutput, error) { - req, out := c.DeleteTopicRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetEndpointAttributes = "GetEndpointAttributes" - -// GetEndpointAttributesRequest generates a "aws/request.Request" representing the -// client's request for the GetEndpointAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetEndpointAttributes for more information on using the GetEndpointAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetEndpointAttributesRequest method. -// req, resp := client.GetEndpointAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetEndpointAttributes -func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (req *request.Request, output *GetEndpointAttributesOutput) { - op := &request.Operation{ - Name: opGetEndpointAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetEndpointAttributesInput{} - } - - output = &GetEndpointAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetEndpointAttributes API operation for Amazon Simple Notification Service. -// -// Retrieves the endpoint attributes for a device on one of the supported push -// notification services, such as GCM (Firebase Cloud Messaging) and APNS. For -// more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation GetEndpointAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetEndpointAttributes -func (c *SNS) GetEndpointAttributes(input *GetEndpointAttributesInput) (*GetEndpointAttributesOutput, error) { - req, out := c.GetEndpointAttributesRequest(input) - return out, req.Send() -} - -// GetEndpointAttributesWithContext is the same as GetEndpointAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See GetEndpointAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) GetEndpointAttributesWithContext(ctx aws.Context, input *GetEndpointAttributesInput, opts ...request.Option) (*GetEndpointAttributesOutput, error) { - req, out := c.GetEndpointAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPlatformApplicationAttributes = "GetPlatformApplicationAttributes" - -// GetPlatformApplicationAttributesRequest generates a "aws/request.Request" representing the -// client's request for the GetPlatformApplicationAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPlatformApplicationAttributes for more information on using the GetPlatformApplicationAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPlatformApplicationAttributesRequest method. -// req, resp := client.GetPlatformApplicationAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetPlatformApplicationAttributes -func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicationAttributesInput) (req *request.Request, output *GetPlatformApplicationAttributesOutput) { - op := &request.Operation{ - Name: opGetPlatformApplicationAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetPlatformApplicationAttributesInput{} - } - - output = &GetPlatformApplicationAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPlatformApplicationAttributes API operation for Amazon Simple Notification Service. -// -// Retrieves the attributes of the platform application object for the supported -// push notification services, such as APNS and GCM (Firebase Cloud Messaging). -// For more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation GetPlatformApplicationAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetPlatformApplicationAttributes -func (c *SNS) GetPlatformApplicationAttributes(input *GetPlatformApplicationAttributesInput) (*GetPlatformApplicationAttributesOutput, error) { - req, out := c.GetPlatformApplicationAttributesRequest(input) - return out, req.Send() -} - -// GetPlatformApplicationAttributesWithContext is the same as GetPlatformApplicationAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See GetPlatformApplicationAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) GetPlatformApplicationAttributesWithContext(ctx aws.Context, input *GetPlatformApplicationAttributesInput, opts ...request.Option) (*GetPlatformApplicationAttributesOutput, error) { - req, out := c.GetPlatformApplicationAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSMSAttributes = "GetSMSAttributes" - -// GetSMSAttributesRequest generates a "aws/request.Request" representing the -// client's request for the GetSMSAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSMSAttributes for more information on using the GetSMSAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSMSAttributesRequest method. -// req, resp := client.GetSMSAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSAttributes -func (c *SNS) GetSMSAttributesRequest(input *GetSMSAttributesInput) (req *request.Request, output *GetSMSAttributesOutput) { - op := &request.Operation{ - Name: opGetSMSAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSMSAttributesInput{} - } - - output = &GetSMSAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSMSAttributes API operation for Amazon Simple Notification Service. -// -// Returns the settings for sending SMS messages from your Amazon Web Services -// account. -// -// These settings are set with the SetSMSAttributes action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation GetSMSAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSAttributes -func (c *SNS) GetSMSAttributes(input *GetSMSAttributesInput) (*GetSMSAttributesOutput, error) { - req, out := c.GetSMSAttributesRequest(input) - return out, req.Send() -} - -// GetSMSAttributesWithContext is the same as GetSMSAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See GetSMSAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) GetSMSAttributesWithContext(ctx aws.Context, input *GetSMSAttributesInput, opts ...request.Option) (*GetSMSAttributesOutput, error) { - req, out := c.GetSMSAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSMSSandboxAccountStatus = "GetSMSSandboxAccountStatus" - -// GetSMSSandboxAccountStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetSMSSandboxAccountStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSMSSandboxAccountStatus for more information on using the GetSMSSandboxAccountStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSMSSandboxAccountStatusRequest method. -// req, resp := client.GetSMSSandboxAccountStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSSandboxAccountStatus -func (c *SNS) GetSMSSandboxAccountStatusRequest(input *GetSMSSandboxAccountStatusInput) (req *request.Request, output *GetSMSSandboxAccountStatusOutput) { - op := &request.Operation{ - Name: opGetSMSSandboxAccountStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSMSSandboxAccountStatusInput{} - } - - output = &GetSMSSandboxAccountStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSMSSandboxAccountStatus API operation for Amazon Simple Notification Service. -// -// Retrieves the SMS sandbox status for the calling Amazon Web Services account -// in the target Amazon Web Services Region. -// -// When you start using Amazon SNS to send SMS messages, your Amazon Web Services -// account is in the SMS sandbox. The SMS sandbox provides a safe environment -// for you to try Amazon SNS features without risking your reputation as an -// SMS sender. While your Amazon Web Services account is in the SMS sandbox, -// you can use all of the features of Amazon SNS. However, you can send SMS -// messages only to verified destination phone numbers. For more information, -// including how to move out of the sandbox to send messages without restrictions, -// see SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox.html) -// in the Amazon SNS Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation GetSMSSandboxAccountStatus for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSSandboxAccountStatus -func (c *SNS) GetSMSSandboxAccountStatus(input *GetSMSSandboxAccountStatusInput) (*GetSMSSandboxAccountStatusOutput, error) { - req, out := c.GetSMSSandboxAccountStatusRequest(input) - return out, req.Send() -} - -// GetSMSSandboxAccountStatusWithContext is the same as GetSMSSandboxAccountStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetSMSSandboxAccountStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) GetSMSSandboxAccountStatusWithContext(ctx aws.Context, input *GetSMSSandboxAccountStatusInput, opts ...request.Option) (*GetSMSSandboxAccountStatusOutput, error) { - req, out := c.GetSMSSandboxAccountStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSubscriptionAttributes = "GetSubscriptionAttributes" - -// GetSubscriptionAttributesRequest generates a "aws/request.Request" representing the -// client's request for the GetSubscriptionAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSubscriptionAttributes for more information on using the GetSubscriptionAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSubscriptionAttributesRequest method. -// req, resp := client.GetSubscriptionAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSubscriptionAttributes -func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesInput) (req *request.Request, output *GetSubscriptionAttributesOutput) { - op := &request.Operation{ - Name: opGetSubscriptionAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSubscriptionAttributesInput{} - } - - output = &GetSubscriptionAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSubscriptionAttributes API operation for Amazon Simple Notification Service. -// -// Returns all of the properties of a subscription. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation GetSubscriptionAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSubscriptionAttributes -func (c *SNS) GetSubscriptionAttributes(input *GetSubscriptionAttributesInput) (*GetSubscriptionAttributesOutput, error) { - req, out := c.GetSubscriptionAttributesRequest(input) - return out, req.Send() -} - -// GetSubscriptionAttributesWithContext is the same as GetSubscriptionAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See GetSubscriptionAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) GetSubscriptionAttributesWithContext(ctx aws.Context, input *GetSubscriptionAttributesInput, opts ...request.Option) (*GetSubscriptionAttributesOutput, error) { - req, out := c.GetSubscriptionAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetTopicAttributes = "GetTopicAttributes" - -// GetTopicAttributesRequest generates a "aws/request.Request" representing the -// client's request for the GetTopicAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetTopicAttributes for more information on using the GetTopicAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetTopicAttributesRequest method. -// req, resp := client.GetTopicAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetTopicAttributes -func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *request.Request, output *GetTopicAttributesOutput) { - op := &request.Operation{ - Name: opGetTopicAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetTopicAttributesInput{} - } - - output = &GetTopicAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetTopicAttributes API operation for Amazon Simple Notification Service. -// -// Returns all of the properties of a topic. Topic properties returned might -// differ based on the authorization of the user. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation GetTopicAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetTopicAttributes -func (c *SNS) GetTopicAttributes(input *GetTopicAttributesInput) (*GetTopicAttributesOutput, error) { - req, out := c.GetTopicAttributesRequest(input) - return out, req.Send() -} - -// GetTopicAttributesWithContext is the same as GetTopicAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See GetTopicAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) GetTopicAttributesWithContext(ctx aws.Context, input *GetTopicAttributesInput, opts ...request.Option) (*GetTopicAttributesOutput, error) { - req, out := c.GetTopicAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListEndpointsByPlatformApplication = "ListEndpointsByPlatformApplication" - -// ListEndpointsByPlatformApplicationRequest generates a "aws/request.Request" representing the -// client's request for the ListEndpointsByPlatformApplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListEndpointsByPlatformApplication for more information on using the ListEndpointsByPlatformApplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListEndpointsByPlatformApplicationRequest method. -// req, resp := client.ListEndpointsByPlatformApplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListEndpointsByPlatformApplication -func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPlatformApplicationInput) (req *request.Request, output *ListEndpointsByPlatformApplicationOutput) { - op := &request.Operation{ - Name: opListEndpointsByPlatformApplication, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListEndpointsByPlatformApplicationInput{} - } - - output = &ListEndpointsByPlatformApplicationOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListEndpointsByPlatformApplication API operation for Amazon Simple Notification Service. -// -// Lists the endpoints and endpoint attributes for devices in a supported push -// notification service, such as GCM (Firebase Cloud Messaging) and APNS. The -// results for ListEndpointsByPlatformApplication are paginated and return a -// limited list of endpoints, up to 100. If additional records are available -// after the first page results, then a NextToken string will be returned. To -// receive the next page, you call ListEndpointsByPlatformApplication again -// using the NextToken string received from the previous call. When there are -// no more records to return, NextToken will be null. For more information, -// see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// This action is throttled at 30 transactions per second (TPS). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListEndpointsByPlatformApplication for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListEndpointsByPlatformApplication -func (c *SNS) ListEndpointsByPlatformApplication(input *ListEndpointsByPlatformApplicationInput) (*ListEndpointsByPlatformApplicationOutput, error) { - req, out := c.ListEndpointsByPlatformApplicationRequest(input) - return out, req.Send() -} - -// ListEndpointsByPlatformApplicationWithContext is the same as ListEndpointsByPlatformApplication with the addition of -// the ability to pass a context and additional request options. -// -// See ListEndpointsByPlatformApplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListEndpointsByPlatformApplicationWithContext(ctx aws.Context, input *ListEndpointsByPlatformApplicationInput, opts ...request.Option) (*ListEndpointsByPlatformApplicationOutput, error) { - req, out := c.ListEndpointsByPlatformApplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListEndpointsByPlatformApplicationPages iterates over the pages of a ListEndpointsByPlatformApplication operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListEndpointsByPlatformApplication method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListEndpointsByPlatformApplication operation. -// pageNum := 0 -// err := client.ListEndpointsByPlatformApplicationPages(params, -// func(page *sns.ListEndpointsByPlatformApplicationOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlatformApplicationInput, fn func(*ListEndpointsByPlatformApplicationOutput, bool) bool) error { - return c.ListEndpointsByPlatformApplicationPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListEndpointsByPlatformApplicationPagesWithContext same as ListEndpointsByPlatformApplicationPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListEndpointsByPlatformApplicationPagesWithContext(ctx aws.Context, input *ListEndpointsByPlatformApplicationInput, fn func(*ListEndpointsByPlatformApplicationOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListEndpointsByPlatformApplicationInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListEndpointsByPlatformApplicationRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListEndpointsByPlatformApplicationOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListOriginationNumbers = "ListOriginationNumbers" - -// ListOriginationNumbersRequest generates a "aws/request.Request" representing the -// client's request for the ListOriginationNumbers operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListOriginationNumbers for more information on using the ListOriginationNumbers -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListOriginationNumbersRequest method. -// req, resp := client.ListOriginationNumbersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListOriginationNumbers -func (c *SNS) ListOriginationNumbersRequest(input *ListOriginationNumbersInput) (req *request.Request, output *ListOriginationNumbersOutput) { - op := &request.Operation{ - Name: opListOriginationNumbers, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListOriginationNumbersInput{} - } - - output = &ListOriginationNumbersOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListOriginationNumbers API operation for Amazon Simple Notification Service. -// -// Lists the calling Amazon Web Services account's dedicated origination numbers -// and their metadata. For more information about origination numbers, see Origination -// numbers (https://docs.aws.amazon.com/sns/latest/dg/channels-sms-originating-identities-origination-numbers.html) -// in the Amazon SNS Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListOriginationNumbers for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeValidationException "ValidationException" -// Indicates that a parameter in the request is invalid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListOriginationNumbers -func (c *SNS) ListOriginationNumbers(input *ListOriginationNumbersInput) (*ListOriginationNumbersOutput, error) { - req, out := c.ListOriginationNumbersRequest(input) - return out, req.Send() -} - -// ListOriginationNumbersWithContext is the same as ListOriginationNumbers with the addition of -// the ability to pass a context and additional request options. -// -// See ListOriginationNumbers for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListOriginationNumbersWithContext(ctx aws.Context, input *ListOriginationNumbersInput, opts ...request.Option) (*ListOriginationNumbersOutput, error) { - req, out := c.ListOriginationNumbersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListOriginationNumbersPages iterates over the pages of a ListOriginationNumbers operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListOriginationNumbers method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListOriginationNumbers operation. -// pageNum := 0 -// err := client.ListOriginationNumbersPages(params, -// func(page *sns.ListOriginationNumbersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListOriginationNumbersPages(input *ListOriginationNumbersInput, fn func(*ListOriginationNumbersOutput, bool) bool) error { - return c.ListOriginationNumbersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListOriginationNumbersPagesWithContext same as ListOriginationNumbersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListOriginationNumbersPagesWithContext(ctx aws.Context, input *ListOriginationNumbersInput, fn func(*ListOriginationNumbersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListOriginationNumbersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListOriginationNumbersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListOriginationNumbersOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListPhoneNumbersOptedOut = "ListPhoneNumbersOptedOut" - -// ListPhoneNumbersOptedOutRequest generates a "aws/request.Request" representing the -// client's request for the ListPhoneNumbersOptedOut operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListPhoneNumbersOptedOut for more information on using the ListPhoneNumbersOptedOut -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListPhoneNumbersOptedOutRequest method. -// req, resp := client.ListPhoneNumbersOptedOutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPhoneNumbersOptedOut -func (c *SNS) ListPhoneNumbersOptedOutRequest(input *ListPhoneNumbersOptedOutInput) (req *request.Request, output *ListPhoneNumbersOptedOutOutput) { - op := &request.Operation{ - Name: opListPhoneNumbersOptedOut, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListPhoneNumbersOptedOutInput{} - } - - output = &ListPhoneNumbersOptedOutOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListPhoneNumbersOptedOut API operation for Amazon Simple Notification Service. -// -// Returns a list of phone numbers that are opted out, meaning you cannot send -// SMS messages to them. -// -// The results for ListPhoneNumbersOptedOut are paginated, and each page returns -// up to 100 phone numbers. If additional phone numbers are available after -// the first page of results, then a NextToken string will be returned. To receive -// the next page, you call ListPhoneNumbersOptedOut again using the NextToken -// string received from the previous call. When there are no more records to -// return, NextToken will be null. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListPhoneNumbersOptedOut for usage and error information. -// -// Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPhoneNumbersOptedOut -func (c *SNS) ListPhoneNumbersOptedOut(input *ListPhoneNumbersOptedOutInput) (*ListPhoneNumbersOptedOutOutput, error) { - req, out := c.ListPhoneNumbersOptedOutRequest(input) - return out, req.Send() -} - -// ListPhoneNumbersOptedOutWithContext is the same as ListPhoneNumbersOptedOut with the addition of -// the ability to pass a context and additional request options. -// -// See ListPhoneNumbersOptedOut for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListPhoneNumbersOptedOutWithContext(ctx aws.Context, input *ListPhoneNumbersOptedOutInput, opts ...request.Option) (*ListPhoneNumbersOptedOutOutput, error) { - req, out := c.ListPhoneNumbersOptedOutRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListPhoneNumbersOptedOutPages iterates over the pages of a ListPhoneNumbersOptedOut operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListPhoneNumbersOptedOut method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListPhoneNumbersOptedOut operation. -// pageNum := 0 -// err := client.ListPhoneNumbersOptedOutPages(params, -// func(page *sns.ListPhoneNumbersOptedOutOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListPhoneNumbersOptedOutPages(input *ListPhoneNumbersOptedOutInput, fn func(*ListPhoneNumbersOptedOutOutput, bool) bool) error { - return c.ListPhoneNumbersOptedOutPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListPhoneNumbersOptedOutPagesWithContext same as ListPhoneNumbersOptedOutPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListPhoneNumbersOptedOutPagesWithContext(ctx aws.Context, input *ListPhoneNumbersOptedOutInput, fn func(*ListPhoneNumbersOptedOutOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListPhoneNumbersOptedOutInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListPhoneNumbersOptedOutRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListPhoneNumbersOptedOutOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListPlatformApplications = "ListPlatformApplications" - -// ListPlatformApplicationsRequest generates a "aws/request.Request" representing the -// client's request for the ListPlatformApplications operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListPlatformApplications for more information on using the ListPlatformApplications -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListPlatformApplicationsRequest method. -// req, resp := client.ListPlatformApplicationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPlatformApplications -func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInput) (req *request.Request, output *ListPlatformApplicationsOutput) { - op := &request.Operation{ - Name: opListPlatformApplications, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListPlatformApplicationsInput{} - } - - output = &ListPlatformApplicationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListPlatformApplications API operation for Amazon Simple Notification Service. -// -// Lists the platform application objects for the supported push notification -// services, such as APNS and GCM (Firebase Cloud Messaging). The results for -// ListPlatformApplications are paginated and return a limited list of applications, -// up to 100. If additional records are available after the first page results, -// then a NextToken string will be returned. To receive the next page, you call -// ListPlatformApplications using the NextToken string received from the previous -// call. When there are no more records to return, NextToken will be null. For -// more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// This action is throttled at 15 transactions per second (TPS). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListPlatformApplications for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPlatformApplications -func (c *SNS) ListPlatformApplications(input *ListPlatformApplicationsInput) (*ListPlatformApplicationsOutput, error) { - req, out := c.ListPlatformApplicationsRequest(input) - return out, req.Send() -} - -// ListPlatformApplicationsWithContext is the same as ListPlatformApplications with the addition of -// the ability to pass a context and additional request options. -// -// See ListPlatformApplications for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListPlatformApplicationsWithContext(ctx aws.Context, input *ListPlatformApplicationsInput, opts ...request.Option) (*ListPlatformApplicationsOutput, error) { - req, out := c.ListPlatformApplicationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListPlatformApplicationsPages iterates over the pages of a ListPlatformApplications operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListPlatformApplications method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListPlatformApplications operation. -// pageNum := 0 -// err := client.ListPlatformApplicationsPages(params, -// func(page *sns.ListPlatformApplicationsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput, fn func(*ListPlatformApplicationsOutput, bool) bool) error { - return c.ListPlatformApplicationsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListPlatformApplicationsPagesWithContext same as ListPlatformApplicationsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListPlatformApplicationsPagesWithContext(ctx aws.Context, input *ListPlatformApplicationsInput, fn func(*ListPlatformApplicationsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListPlatformApplicationsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListPlatformApplicationsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListPlatformApplicationsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListSMSSandboxPhoneNumbers = "ListSMSSandboxPhoneNumbers" - -// ListSMSSandboxPhoneNumbersRequest generates a "aws/request.Request" representing the -// client's request for the ListSMSSandboxPhoneNumbers operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListSMSSandboxPhoneNumbers for more information on using the ListSMSSandboxPhoneNumbers -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListSMSSandboxPhoneNumbersRequest method. -// req, resp := client.ListSMSSandboxPhoneNumbersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSMSSandboxPhoneNumbers -func (c *SNS) ListSMSSandboxPhoneNumbersRequest(input *ListSMSSandboxPhoneNumbersInput) (req *request.Request, output *ListSMSSandboxPhoneNumbersOutput) { - op := &request.Operation{ - Name: opListSMSSandboxPhoneNumbers, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListSMSSandboxPhoneNumbersInput{} - } - - output = &ListSMSSandboxPhoneNumbersOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListSMSSandboxPhoneNumbers API operation for Amazon Simple Notification Service. -// -// Lists the calling Amazon Web Services account's current verified and pending -// destination phone numbers in the SMS sandbox. -// -// When you start using Amazon SNS to send SMS messages, your Amazon Web Services -// account is in the SMS sandbox. The SMS sandbox provides a safe environment -// for you to try Amazon SNS features without risking your reputation as an -// SMS sender. While your Amazon Web Services account is in the SMS sandbox, -// you can use all of the features of Amazon SNS. However, you can send SMS -// messages only to verified destination phone numbers. For more information, -// including how to move out of the sandbox to send messages without restrictions, -// see SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox.html) -// in the Amazon SNS Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListSMSSandboxPhoneNumbers for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. -// -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSMSSandboxPhoneNumbers -func (c *SNS) ListSMSSandboxPhoneNumbers(input *ListSMSSandboxPhoneNumbersInput) (*ListSMSSandboxPhoneNumbersOutput, error) { - req, out := c.ListSMSSandboxPhoneNumbersRequest(input) - return out, req.Send() -} - -// ListSMSSandboxPhoneNumbersWithContext is the same as ListSMSSandboxPhoneNumbers with the addition of -// the ability to pass a context and additional request options. -// -// See ListSMSSandboxPhoneNumbers for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListSMSSandboxPhoneNumbersWithContext(ctx aws.Context, input *ListSMSSandboxPhoneNumbersInput, opts ...request.Option) (*ListSMSSandboxPhoneNumbersOutput, error) { - req, out := c.ListSMSSandboxPhoneNumbersRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListSMSSandboxPhoneNumbersPages iterates over the pages of a ListSMSSandboxPhoneNumbers operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListSMSSandboxPhoneNumbers method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListSMSSandboxPhoneNumbers operation. -// pageNum := 0 -// err := client.ListSMSSandboxPhoneNumbersPages(params, -// func(page *sns.ListSMSSandboxPhoneNumbersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListSMSSandboxPhoneNumbersPages(input *ListSMSSandboxPhoneNumbersInput, fn func(*ListSMSSandboxPhoneNumbersOutput, bool) bool) error { - return c.ListSMSSandboxPhoneNumbersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListSMSSandboxPhoneNumbersPagesWithContext same as ListSMSSandboxPhoneNumbersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListSMSSandboxPhoneNumbersPagesWithContext(ctx aws.Context, input *ListSMSSandboxPhoneNumbersInput, fn func(*ListSMSSandboxPhoneNumbersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListSMSSandboxPhoneNumbersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListSMSSandboxPhoneNumbersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListSMSSandboxPhoneNumbersOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListSubscriptions = "ListSubscriptions" - -// ListSubscriptionsRequest generates a "aws/request.Request" representing the -// client's request for the ListSubscriptions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListSubscriptions for more information on using the ListSubscriptions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListSubscriptionsRequest method. -// req, resp := client.ListSubscriptionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptions -func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *request.Request, output *ListSubscriptionsOutput) { - op := &request.Operation{ - Name: opListSubscriptions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListSubscriptionsInput{} - } - - output = &ListSubscriptionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListSubscriptions API operation for Amazon Simple Notification Service. -// -// Returns a list of the requester's subscriptions. Each call returns a limited -// list of subscriptions, up to 100. If there are more subscriptions, a NextToken -// is also returned. Use the NextToken parameter in a new ListSubscriptions -// call to get further results. -// -// This action is throttled at 30 transactions per second (TPS). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListSubscriptions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptions -func (c *SNS) ListSubscriptions(input *ListSubscriptionsInput) (*ListSubscriptionsOutput, error) { - req, out := c.ListSubscriptionsRequest(input) - return out, req.Send() -} - -// ListSubscriptionsWithContext is the same as ListSubscriptions with the addition of -// the ability to pass a context and additional request options. -// -// See ListSubscriptions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListSubscriptionsWithContext(ctx aws.Context, input *ListSubscriptionsInput, opts ...request.Option) (*ListSubscriptionsOutput, error) { - req, out := c.ListSubscriptionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListSubscriptionsPages iterates over the pages of a ListSubscriptions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListSubscriptions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListSubscriptions operation. -// pageNum := 0 -// err := client.ListSubscriptionsPages(params, -// func(page *sns.ListSubscriptionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(*ListSubscriptionsOutput, bool) bool) error { - return c.ListSubscriptionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListSubscriptionsPagesWithContext same as ListSubscriptionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListSubscriptionsPagesWithContext(ctx aws.Context, input *ListSubscriptionsInput, fn func(*ListSubscriptionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListSubscriptionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListSubscriptionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListSubscriptionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListSubscriptionsByTopic = "ListSubscriptionsByTopic" - -// ListSubscriptionsByTopicRequest generates a "aws/request.Request" representing the -// client's request for the ListSubscriptionsByTopic operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListSubscriptionsByTopic for more information on using the ListSubscriptionsByTopic -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListSubscriptionsByTopicRequest method. -// req, resp := client.ListSubscriptionsByTopicRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptionsByTopic -func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInput) (req *request.Request, output *ListSubscriptionsByTopicOutput) { - op := &request.Operation{ - Name: opListSubscriptionsByTopic, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListSubscriptionsByTopicInput{} - } - - output = &ListSubscriptionsByTopicOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListSubscriptionsByTopic API operation for Amazon Simple Notification Service. -// -// Returns a list of the subscriptions to a specific topic. Each call returns -// a limited list of subscriptions, up to 100. If there are more subscriptions, -// a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic -// call to get further results. -// -// This action is throttled at 30 transactions per second (TPS). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListSubscriptionsByTopic for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptionsByTopic -func (c *SNS) ListSubscriptionsByTopic(input *ListSubscriptionsByTopicInput) (*ListSubscriptionsByTopicOutput, error) { - req, out := c.ListSubscriptionsByTopicRequest(input) - return out, req.Send() -} - -// ListSubscriptionsByTopicWithContext is the same as ListSubscriptionsByTopic with the addition of -// the ability to pass a context and additional request options. -// -// See ListSubscriptionsByTopic for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListSubscriptionsByTopicWithContext(ctx aws.Context, input *ListSubscriptionsByTopicInput, opts ...request.Option) (*ListSubscriptionsByTopicOutput, error) { - req, out := c.ListSubscriptionsByTopicRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListSubscriptionsByTopicPages iterates over the pages of a ListSubscriptionsByTopic operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListSubscriptionsByTopic method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListSubscriptionsByTopic operation. -// pageNum := 0 -// err := client.ListSubscriptionsByTopicPages(params, -// func(page *sns.ListSubscriptionsByTopicOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput, fn func(*ListSubscriptionsByTopicOutput, bool) bool) error { - return c.ListSubscriptionsByTopicPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListSubscriptionsByTopicPagesWithContext same as ListSubscriptionsByTopicPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListSubscriptionsByTopicPagesWithContext(ctx aws.Context, input *ListSubscriptionsByTopicInput, fn func(*ListSubscriptionsByTopicOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListSubscriptionsByTopicInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListSubscriptionsByTopicRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListSubscriptionsByTopicOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTagsForResource = "ListTagsForResource" - -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTagsForResource for more information on using the ListTagsForResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTagsForResource -func (c *SNS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { - op := &request.Operation{ - Name: opListTagsForResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListTagsForResourceInput{} - } - - output = &ListTagsForResourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTagsForResource API operation for Amazon Simple Notification Service. -// -// List all tags added to the specified Amazon SNS topic. For an overview, see -// Amazon SNS Tags (https://docs.aws.amazon.com/sns/latest/dg/sns-tags.html) -// in the Amazon Simple Notification Service Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListTagsForResource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. -// -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTagsForResource -func (c *SNS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) - return out, req.Send() -} - -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of -// the ability to pass a context and additional request options. -// -// See ListTagsForResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListTopics = "ListTopics" - -// ListTopicsRequest generates a "aws/request.Request" representing the -// client's request for the ListTopics operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTopics for more information on using the ListTopics -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListTopicsRequest method. -// req, resp := client.ListTopicsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTopics -func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, output *ListTopicsOutput) { - op := &request.Operation{ - Name: opListTopics, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListTopicsInput{} - } - - output = &ListTopicsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTopics API operation for Amazon Simple Notification Service. -// -// Returns a list of the requester's topics. Each call returns a limited list -// of topics, up to 100. If there are more topics, a NextToken is also returned. -// Use the NextToken parameter in a new ListTopics call to get further results. -// -// This action is throttled at 30 transactions per second (TPS). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation ListTopics for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTopics -func (c *SNS) ListTopics(input *ListTopicsInput) (*ListTopicsOutput, error) { - req, out := c.ListTopicsRequest(input) - return out, req.Send() -} - -// ListTopicsWithContext is the same as ListTopics with the addition of -// the ability to pass a context and additional request options. -// -// See ListTopics for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListTopicsWithContext(ctx aws.Context, input *ListTopicsInput, opts ...request.Option) (*ListTopicsOutput, error) { - req, out := c.ListTopicsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListTopicsPages iterates over the pages of a ListTopics operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTopics method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTopics operation. -// pageNum := 0 -// err := client.ListTopicsPages(params, -// func(page *sns.ListTopicsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(*ListTopicsOutput, bool) bool) error { - return c.ListTopicsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTopicsPagesWithContext same as ListTopicsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) ListTopicsPagesWithContext(ctx aws.Context, input *ListTopicsInput, fn func(*ListTopicsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTopicsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTopicsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTopicsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opOptInPhoneNumber = "OptInPhoneNumber" - -// OptInPhoneNumberRequest generates a "aws/request.Request" representing the -// client's request for the OptInPhoneNumber operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See OptInPhoneNumber for more information on using the OptInPhoneNumber -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the OptInPhoneNumberRequest method. -// req, resp := client.OptInPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/OptInPhoneNumber -func (c *SNS) OptInPhoneNumberRequest(input *OptInPhoneNumberInput) (req *request.Request, output *OptInPhoneNumberOutput) { - op := &request.Operation{ - Name: opOptInPhoneNumber, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &OptInPhoneNumberInput{} - } - - output = &OptInPhoneNumberOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// OptInPhoneNumber API operation for Amazon Simple Notification Service. -// -// Use this request to opt in a phone number that is opted out, which enables -// you to resume sending SMS messages to the number. -// -// You can opt in a phone number only once every 30 days. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation OptInPhoneNumber for usage and error information. -// -// Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/OptInPhoneNumber -func (c *SNS) OptInPhoneNumber(input *OptInPhoneNumberInput) (*OptInPhoneNumberOutput, error) { - req, out := c.OptInPhoneNumberRequest(input) - return out, req.Send() -} - -// OptInPhoneNumberWithContext is the same as OptInPhoneNumber with the addition of -// the ability to pass a context and additional request options. -// -// See OptInPhoneNumber for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) OptInPhoneNumberWithContext(ctx aws.Context, input *OptInPhoneNumberInput, opts ...request.Option) (*OptInPhoneNumberOutput, error) { - req, out := c.OptInPhoneNumberRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPublish = "Publish" - -// PublishRequest generates a "aws/request.Request" representing the -// client's request for the Publish operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Publish for more information on using the Publish -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PublishRequest method. -// req, resp := client.PublishRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Publish -func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { - op := &request.Operation{ - Name: opPublish, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PublishInput{} - } - - output = &PublishOutput{} - req = c.newRequest(op, input, output) - return -} - -// Publish API operation for Amazon Simple Notification Service. -// -// Sends a message to an Amazon SNS topic, a text message (SMS message) directly -// to a phone number, or a message to a mobile platform endpoint (when you specify -// the TargetArn). -// -// If you send a message to a topic, Amazon SNS delivers the message to each -// endpoint that is subscribed to the topic. The format of the message depends -// on the notification protocol for each subscribed endpoint. -// -// When a messageId is returned, the message is saved and Amazon SNS immediately -// delivers it to subscribers. -// -// To use the Publish action for publishing a message to a mobile endpoint, -// such as an app on a Kindle device or mobile phone, you must specify the EndpointArn -// for the TargetArn parameter. The EndpointArn is returned when making a call -// with the CreatePlatformEndpoint action. -// -// For more information about formatting messages, see Send Custom Platform-Specific -// Payloads in Messages to Mobile Devices (https://docs.aws.amazon.com/sns/latest/dg/mobile-push-send-custommessage.html). -// -// You can publish messages only to topics and endpoints in the same Amazon -// Web Services Region. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation Publish for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInvalidParameterValueException "ParameterValueInvalid" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeEndpointDisabledException "EndpointDisabled" -// Exception error indicating endpoint disabled. -// -// * ErrCodePlatformApplicationDisabledException "PlatformApplicationDisabled" -// Exception error indicating platform application disabled. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeKMSDisabledException "KMSDisabled" -// The request was rejected because the specified customer master key (CMK) -// isn't enabled. -// -// * ErrCodeKMSInvalidStateException "KMSInvalidState" -// The request was rejected because the state of the specified resource isn't -// valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// * ErrCodeKMSNotFoundException "KMSNotFound" -// The request was rejected because the specified entity or resource can't be -// found. -// -// * ErrCodeKMSOptInRequired "KMSOptInRequired" -// The Amazon Web Services access key ID needs a subscription for the service. -// -// * ErrCodeKMSThrottlingException "KMSThrottling" -// The request was denied due to request throttling. For more information about -// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) -// in the Key Management Service Developer Guide. -// -// * ErrCodeKMSAccessDeniedException "KMSAccessDenied" -// The ciphertext references a key that doesn't exist or that you don't have -// access to. -// -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Publish -func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { - req, out := c.PublishRequest(input) - return out, req.Send() -} - -// PublishWithContext is the same as Publish with the addition of -// the ability to pass a context and additional request options. -// -// See Publish for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) PublishWithContext(ctx aws.Context, input *PublishInput, opts ...request.Option) (*PublishOutput, error) { - req, out := c.PublishRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPublishBatch = "PublishBatch" - -// PublishBatchRequest generates a "aws/request.Request" representing the -// client's request for the PublishBatch operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PublishBatch for more information on using the PublishBatch -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PublishBatchRequest method. -// req, resp := client.PublishBatchRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/PublishBatch -func (c *SNS) PublishBatchRequest(input *PublishBatchInput) (req *request.Request, output *PublishBatchOutput) { - op := &request.Operation{ - Name: opPublishBatch, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PublishBatchInput{} - } - - output = &PublishBatchOutput{} - req = c.newRequest(op, input, output) - return -} - -// PublishBatch API operation for Amazon Simple Notification Service. -// -// Publishes up to ten messages to the specified topic. This is a batch version -// of Publish. For FIFO topics, multiple messages within a single batch are -// published in the order they are sent, and messages are deduplicated within -// the batch and across batches for 5 minutes. -// -// The result of publishing each message is reported individually in the response. -// Because the batch request can result in a combination of successful and unsuccessful -// actions, you should check for batch errors even when the call returns an -// HTTP status code of 200. -// -// The maximum allowed individual message size and the maximum total payload -// size (the sum of the individual lengths of all of the batched messages) are -// both 256 KB (262,144 bytes). -// -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &AttributeName.1=first -// -// &AttributeName.2=second -// -// If you send a batch message to a topic, Amazon SNS publishes the batch message -// to each endpoint that is subscribed to the topic. The format of the batch -// message depends on the notification protocol for each subscribed endpoint. -// -// When a messageId is returned, the batch message is saved and Amazon SNS immediately -// delivers the message to subscribers. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation PublishBatch for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInvalidParameterValueException "ParameterValueInvalid" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeEndpointDisabledException "EndpointDisabled" -// Exception error indicating endpoint disabled. -// -// * ErrCodePlatformApplicationDisabledException "PlatformApplicationDisabled" -// Exception error indicating platform application disabled. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeBatchEntryIdsNotDistinctException "BatchEntryIdsNotDistinct" -// Two or more batch entries in the request have the same Id. -// -// * ErrCodeBatchRequestTooLongException "BatchRequestTooLong" -// The length of all the batch messages put together is more than the limit. -// -// * ErrCodeEmptyBatchRequestException "EmptyBatchRequest" -// The batch request doesn't contain any entries. -// -// * ErrCodeInvalidBatchEntryIdException "InvalidBatchEntryId" -// The Id of a batch entry in a batch request doesn't abide by the specification. -// -// * ErrCodeTooManyEntriesInBatchRequestException "TooManyEntriesInBatchRequest" -// The batch request contains more entries than permissible. -// -// * ErrCodeKMSDisabledException "KMSDisabled" -// The request was rejected because the specified customer master key (CMK) -// isn't enabled. -// -// * ErrCodeKMSInvalidStateException "KMSInvalidState" -// The request was rejected because the state of the specified resource isn't -// valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// * ErrCodeKMSNotFoundException "KMSNotFound" -// The request was rejected because the specified entity or resource can't be -// found. -// -// * ErrCodeKMSOptInRequired "KMSOptInRequired" -// The Amazon Web Services access key ID needs a subscription for the service. -// -// * ErrCodeKMSThrottlingException "KMSThrottling" -// The request was denied due to request throttling. For more information about -// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) -// in the Key Management Service Developer Guide. -// -// * ErrCodeKMSAccessDeniedException "KMSAccessDenied" -// The ciphertext references a key that doesn't exist or that you don't have -// access to. -// -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/PublishBatch -func (c *SNS) PublishBatch(input *PublishBatchInput) (*PublishBatchOutput, error) { - req, out := c.PublishBatchRequest(input) - return out, req.Send() -} - -// PublishBatchWithContext is the same as PublishBatch with the addition of -// the ability to pass a context and additional request options. -// -// See PublishBatch for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) PublishBatchWithContext(ctx aws.Context, input *PublishBatchInput, opts ...request.Option) (*PublishBatchOutput, error) { - req, out := c.PublishBatchRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRemovePermission = "RemovePermission" - -// RemovePermissionRequest generates a "aws/request.Request" representing the -// client's request for the RemovePermission operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RemovePermission for more information on using the RemovePermission -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RemovePermissionRequest method. -// req, resp := client.RemovePermissionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/RemovePermission -func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { - op := &request.Operation{ - Name: opRemovePermission, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RemovePermissionInput{} - } - - output = &RemovePermissionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// RemovePermission API operation for Amazon Simple Notification Service. -// -// Removes a statement from a topic's access control policy. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation RemovePermission for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/RemovePermission -func (c *SNS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { - req, out := c.RemovePermissionRequest(input) - return out, req.Send() -} - -// RemovePermissionWithContext is the same as RemovePermission with the addition of -// the ability to pass a context and additional request options. -// -// See RemovePermission for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) RemovePermissionWithContext(ctx aws.Context, input *RemovePermissionInput, opts ...request.Option) (*RemovePermissionOutput, error) { - req, out := c.RemovePermissionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSetEndpointAttributes = "SetEndpointAttributes" - -// SetEndpointAttributesRequest generates a "aws/request.Request" representing the -// client's request for the SetEndpointAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SetEndpointAttributes for more information on using the SetEndpointAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SetEndpointAttributesRequest method. -// req, resp := client.SetEndpointAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetEndpointAttributes -func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (req *request.Request, output *SetEndpointAttributesOutput) { - op := &request.Operation{ - Name: opSetEndpointAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SetEndpointAttributesInput{} - } - - output = &SetEndpointAttributesOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// SetEndpointAttributes API operation for Amazon Simple Notification Service. -// -// Sets the attributes for an endpoint for a device on one of the supported -// push notification services, such as GCM (Firebase Cloud Messaging) and APNS. -// For more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation SetEndpointAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetEndpointAttributes -func (c *SNS) SetEndpointAttributes(input *SetEndpointAttributesInput) (*SetEndpointAttributesOutput, error) { - req, out := c.SetEndpointAttributesRequest(input) - return out, req.Send() -} - -// SetEndpointAttributesWithContext is the same as SetEndpointAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See SetEndpointAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) SetEndpointAttributesWithContext(ctx aws.Context, input *SetEndpointAttributesInput, opts ...request.Option) (*SetEndpointAttributesOutput, error) { - req, out := c.SetEndpointAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSetPlatformApplicationAttributes = "SetPlatformApplicationAttributes" - -// SetPlatformApplicationAttributesRequest generates a "aws/request.Request" representing the -// client's request for the SetPlatformApplicationAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SetPlatformApplicationAttributes for more information on using the SetPlatformApplicationAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SetPlatformApplicationAttributesRequest method. -// req, resp := client.SetPlatformApplicationAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetPlatformApplicationAttributes -func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicationAttributesInput) (req *request.Request, output *SetPlatformApplicationAttributesOutput) { - op := &request.Operation{ - Name: opSetPlatformApplicationAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SetPlatformApplicationAttributesInput{} - } - - output = &SetPlatformApplicationAttributesOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// SetPlatformApplicationAttributes API operation for Amazon Simple Notification Service. -// -// Sets the attributes of the platform application object for the supported -// push notification services, such as APNS and GCM (Firebase Cloud Messaging). -// For more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). -// For information on configuring attributes for message delivery status, see -// Using Amazon SNS Application Attributes for Message Delivery Status (https://docs.aws.amazon.com/sns/latest/dg/sns-msg-status.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation SetPlatformApplicationAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetPlatformApplicationAttributes -func (c *SNS) SetPlatformApplicationAttributes(input *SetPlatformApplicationAttributesInput) (*SetPlatformApplicationAttributesOutput, error) { - req, out := c.SetPlatformApplicationAttributesRequest(input) - return out, req.Send() -} - -// SetPlatformApplicationAttributesWithContext is the same as SetPlatformApplicationAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See SetPlatformApplicationAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) SetPlatformApplicationAttributesWithContext(ctx aws.Context, input *SetPlatformApplicationAttributesInput, opts ...request.Option) (*SetPlatformApplicationAttributesOutput, error) { - req, out := c.SetPlatformApplicationAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSetSMSAttributes = "SetSMSAttributes" - -// SetSMSAttributesRequest generates a "aws/request.Request" representing the -// client's request for the SetSMSAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SetSMSAttributes for more information on using the SetSMSAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SetSMSAttributesRequest method. -// req, resp := client.SetSMSAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSMSAttributes -func (c *SNS) SetSMSAttributesRequest(input *SetSMSAttributesInput) (req *request.Request, output *SetSMSAttributesOutput) { - op := &request.Operation{ - Name: opSetSMSAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SetSMSAttributesInput{} - } - - output = &SetSMSAttributesOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// SetSMSAttributes API operation for Amazon Simple Notification Service. -// -// Use this request to set the default settings for sending SMS messages and -// receiving daily SMS usage reports. -// -// You can override some of these settings for a single message when you use -// the Publish action with the MessageAttributes.entry.N parameter. For more -// information, see Publishing to a mobile phone (https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html) -// in the Amazon SNS Developer Guide. -// -// To use this operation, you must grant the Amazon SNS service principal (sns.amazonaws.com) -// permission to perform the s3:ListBucket action. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation SetSMSAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSMSAttributes -func (c *SNS) SetSMSAttributes(input *SetSMSAttributesInput) (*SetSMSAttributesOutput, error) { - req, out := c.SetSMSAttributesRequest(input) - return out, req.Send() -} - -// SetSMSAttributesWithContext is the same as SetSMSAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See SetSMSAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) SetSMSAttributesWithContext(ctx aws.Context, input *SetSMSAttributesInput, opts ...request.Option) (*SetSMSAttributesOutput, error) { - req, out := c.SetSMSAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSetSubscriptionAttributes = "SetSubscriptionAttributes" - -// SetSubscriptionAttributesRequest generates a "aws/request.Request" representing the -// client's request for the SetSubscriptionAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SetSubscriptionAttributes for more information on using the SetSubscriptionAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SetSubscriptionAttributesRequest method. -// req, resp := client.SetSubscriptionAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSubscriptionAttributes -func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesInput) (req *request.Request, output *SetSubscriptionAttributesOutput) { - op := &request.Operation{ - Name: opSetSubscriptionAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SetSubscriptionAttributesInput{} - } - - output = &SetSubscriptionAttributesOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// SetSubscriptionAttributes API operation for Amazon Simple Notification Service. -// -// Allows a subscription owner to set an attribute of the subscription to a -// new value. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation SetSubscriptionAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" -// Indicates that the number of filter polices in your Amazon Web Services account -// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit -// Increase case in the Amazon Web Services Support Center. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSubscriptionAttributes -func (c *SNS) SetSubscriptionAttributes(input *SetSubscriptionAttributesInput) (*SetSubscriptionAttributesOutput, error) { - req, out := c.SetSubscriptionAttributesRequest(input) - return out, req.Send() -} - -// SetSubscriptionAttributesWithContext is the same as SetSubscriptionAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See SetSubscriptionAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) SetSubscriptionAttributesWithContext(ctx aws.Context, input *SetSubscriptionAttributesInput, opts ...request.Option) (*SetSubscriptionAttributesOutput, error) { - req, out := c.SetSubscriptionAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSetTopicAttributes = "SetTopicAttributes" - -// SetTopicAttributesRequest generates a "aws/request.Request" representing the -// client's request for the SetTopicAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SetTopicAttributes for more information on using the SetTopicAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SetTopicAttributesRequest method. -// req, resp := client.SetTopicAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetTopicAttributes -func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *request.Request, output *SetTopicAttributesOutput) { - op := &request.Operation{ - Name: opSetTopicAttributes, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SetTopicAttributesInput{} - } - - output = &SetTopicAttributesOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// SetTopicAttributes API operation for Amazon Simple Notification Service. -// -// Allows a topic owner to set an attribute of the topic to a new value. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation SetTopicAttributes for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetTopicAttributes -func (c *SNS) SetTopicAttributes(input *SetTopicAttributesInput) (*SetTopicAttributesOutput, error) { - req, out := c.SetTopicAttributesRequest(input) - return out, req.Send() -} - -// SetTopicAttributesWithContext is the same as SetTopicAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See SetTopicAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) SetTopicAttributesWithContext(ctx aws.Context, input *SetTopicAttributesInput, opts ...request.Option) (*SetTopicAttributesOutput, error) { - req, out := c.SetTopicAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSubscribe = "Subscribe" - -// SubscribeRequest generates a "aws/request.Request" representing the -// client's request for the Subscribe operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Subscribe for more information on using the Subscribe -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SubscribeRequest method. -// req, resp := client.SubscribeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Subscribe -func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, output *SubscribeOutput) { - op := &request.Operation{ - Name: opSubscribe, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SubscribeInput{} - } - - output = &SubscribeOutput{} - req = c.newRequest(op, input, output) - return -} - -// Subscribe API operation for Amazon Simple Notification Service. -// -// Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S -// or email, or if the endpoint and the topic are not in the same Amazon Web -// Services account, the endpoint owner must run the ConfirmSubscription action -// to confirm the subscription. -// -// You call the ConfirmSubscription action with the token from the subscription -// response. Confirmation tokens are valid for three days. -// -// This action is throttled at 100 transactions per second (TPS). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation Subscribe for usage and error information. -// -// Returned Error Codes: -// * ErrCodeSubscriptionLimitExceededException "SubscriptionLimitExceeded" -// Indicates that the customer already owns the maximum allowed number of subscriptions. -// -// * ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" -// Indicates that the number of filter polices in your Amazon Web Services account -// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit -// Increase case in the Amazon Web Services Support Center. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Subscribe -func (c *SNS) Subscribe(input *SubscribeInput) (*SubscribeOutput, error) { - req, out := c.SubscribeRequest(input) - return out, req.Send() -} - -// SubscribeWithContext is the same as Subscribe with the addition of -// the ability to pass a context and additional request options. -// -// See Subscribe for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) SubscribeWithContext(ctx aws.Context, input *SubscribeInput, opts ...request.Option) (*SubscribeOutput, error) { - req, out := c.SubscribeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTagResource = "TagResource" - -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TagResource for more information on using the TagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/TagResource -func (c *SNS) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { - op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TagResourceInput{} - } - - output = &TagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// TagResource API operation for Amazon Simple Notification Service. -// -// Add tags to the specified Amazon SNS topic. For an overview, see Amazon SNS -// Tags (https://docs.aws.amazon.com/sns/latest/dg/sns-tags.html) in the Amazon -// SNS Developer Guide. -// -// When you use topic tags, keep the following guidelines in mind: -// -// * Adding more than 50 tags to a topic isn't recommended. -// -// * Tags don't have any semantic meaning. Amazon SNS interprets tags as -// character strings. -// -// * Tags are case-sensitive. -// -// * A new tag with a key identical to that of an existing tag overwrites -// the existing tag. -// -// * Tagging actions are limited to 10 TPS per Amazon Web Services account, -// per Amazon Web Services Region. If your application requires a higher -// throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation TagResource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. -// -// * ErrCodeTagLimitExceededException "TagLimitExceeded" -// Can't add more than 50 tags to a topic. -// -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. -// -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/TagResource -func (c *SNS) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - return out, req.Send() -} - -// TagResourceWithContext is the same as TagResource with the addition of -// the ability to pass a context and additional request options. -// -// See TagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUnsubscribe = "Unsubscribe" - -// UnsubscribeRequest generates a "aws/request.Request" representing the -// client's request for the Unsubscribe operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Unsubscribe for more information on using the Unsubscribe -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UnsubscribeRequest method. -// req, resp := client.UnsubscribeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Unsubscribe -func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, output *UnsubscribeOutput) { - op := &request.Operation{ - Name: opUnsubscribe, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UnsubscribeInput{} - } - - output = &UnsubscribeOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// Unsubscribe API operation for Amazon Simple Notification Service. -// -// Deletes a subscription. If the subscription requires authentication for deletion, -// only the owner of the subscription or the topic's owner can unsubscribe, -// and an Amazon Web Services signature is required. If the Unsubscribe call -// does not require authentication and the requester is not the subscription -// owner, a final cancellation message is delivered to the endpoint, so that -// the endpoint owner can easily resubscribe to the topic if the Unsubscribe -// request was unintended. -// -// This action is throttled at 100 transactions per second (TPS). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation Unsubscribe for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. -// -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Unsubscribe -func (c *SNS) Unsubscribe(input *UnsubscribeInput) (*UnsubscribeOutput, error) { - req, out := c.UnsubscribeRequest(input) - return out, req.Send() -} - -// UnsubscribeWithContext is the same as Unsubscribe with the addition of -// the ability to pass a context and additional request options. -// -// See Unsubscribe for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) UnsubscribeWithContext(ctx aws.Context, input *UnsubscribeInput, opts ...request.Option) (*UnsubscribeOutput, error) { - req, out := c.UnsubscribeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUntagResource = "UntagResource" - -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UntagResource for more information on using the UntagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/UntagResource -func (c *SNS) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { - op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UntagResourceInput{} - } - - output = &UntagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UntagResource API operation for Amazon Simple Notification Service. -// -// Remove tags from the specified Amazon SNS topic. For an overview, see Amazon -// SNS Tags (https://docs.aws.amazon.com/sns/latest/dg/sns-tags.html) in the -// Amazon SNS Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation UntagResource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. -// -// * ErrCodeTagLimitExceededException "TagLimitExceeded" -// Can't add more than 50 tags to a topic. -// -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. -// -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/UntagResource -func (c *SNS) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - return out, req.Send() -} - -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opVerifySMSSandboxPhoneNumber = "VerifySMSSandboxPhoneNumber" - -// VerifySMSSandboxPhoneNumberRequest generates a "aws/request.Request" representing the -// client's request for the VerifySMSSandboxPhoneNumber operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See VerifySMSSandboxPhoneNumber for more information on using the VerifySMSSandboxPhoneNumber -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the VerifySMSSandboxPhoneNumberRequest method. -// req, resp := client.VerifySMSSandboxPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/VerifySMSSandboxPhoneNumber -func (c *SNS) VerifySMSSandboxPhoneNumberRequest(input *VerifySMSSandboxPhoneNumberInput) (req *request.Request, output *VerifySMSSandboxPhoneNumberOutput) { - op := &request.Operation{ - Name: opVerifySMSSandboxPhoneNumber, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &VerifySMSSandboxPhoneNumberInput{} - } - - output = &VerifySMSSandboxPhoneNumberOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// VerifySMSSandboxPhoneNumber API operation for Amazon Simple Notification Service. -// -// Verifies a destination phone number with a one-time password (OTP) for the -// calling Amazon Web Services account. -// -// When you start using Amazon SNS to send SMS messages, your Amazon Web Services -// account is in the SMS sandbox. The SMS sandbox provides a safe environment -// for you to try Amazon SNS features without risking your reputation as an -// SMS sender. While your Amazon Web Services account is in the SMS sandbox, -// you can use all of the features of Amazon SNS. However, you can send SMS -// messages only to verified destination phone numbers. For more information, -// including how to move out of the sandbox to send messages without restrictions, -// see SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox.html) -// in the Amazon SNS Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Notification Service's -// API operation VerifySMSSandboxPhoneNumber for usage and error information. -// -// Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. -// -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. -// -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. -// -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. -// -// * ErrCodeVerificationException "VerificationException" -// Indicates that the one-time password (OTP) used for verification is invalid. -// -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/VerifySMSSandboxPhoneNumber -func (c *SNS) VerifySMSSandboxPhoneNumber(input *VerifySMSSandboxPhoneNumberInput) (*VerifySMSSandboxPhoneNumberOutput, error) { - req, out := c.VerifySMSSandboxPhoneNumberRequest(input) - return out, req.Send() -} - -// VerifySMSSandboxPhoneNumberWithContext is the same as VerifySMSSandboxPhoneNumber with the addition of -// the ability to pass a context and additional request options. -// -// See VerifySMSSandboxPhoneNumber for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SNS) VerifySMSSandboxPhoneNumberWithContext(ctx aws.Context, input *VerifySMSSandboxPhoneNumberInput, opts ...request.Option) (*VerifySMSSandboxPhoneNumberOutput, error) { - req, out := c.VerifySMSSandboxPhoneNumberRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type AddPermissionInput struct { - _ struct{} `type:"structure"` - - // The Amazon Web Services account IDs of the users (principals) who will be - // given access to the specified actions. The users must have Amazon Web Services - // account, but do not need to be signed up for this service. - // - // AWSAccountId is a required field - AWSAccountId []*string `type:"list" required:"true"` - - // The action you want to allow for the specified principal(s). - // - // Valid values: Any Amazon SNS action name, for example Publish. - // - // ActionName is a required field - ActionName []*string `type:"list" required:"true"` - - // A unique identifier for the new policy statement. - // - // Label is a required field - Label *string `type:"string" required:"true"` - - // The ARN of the topic whose access control policy you wish to modify. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AddPermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AddPermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddPermissionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddPermissionInput"} - if s.AWSAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AWSAccountId")) - } - if s.ActionName == nil { - invalidParams.Add(request.NewErrParamRequired("ActionName")) - } - if s.Label == nil { - invalidParams.Add(request.NewErrParamRequired("Label")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAWSAccountId sets the AWSAccountId field's value. -func (s *AddPermissionInput) SetAWSAccountId(v []*string) *AddPermissionInput { - s.AWSAccountId = v - return s -} - -// SetActionName sets the ActionName field's value. -func (s *AddPermissionInput) SetActionName(v []*string) *AddPermissionInput { - s.ActionName = v - return s -} - -// SetLabel sets the Label field's value. -func (s *AddPermissionInput) SetLabel(v string) *AddPermissionInput { - s.Label = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *AddPermissionInput) SetTopicArn(v string) *AddPermissionInput { - s.TopicArn = &v - return s -} - -type AddPermissionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AddPermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AddPermissionOutput) GoString() string { - return s.String() -} - -// Gives a detailed description of failed messages in the batch. -type BatchResultErrorEntry struct { - _ struct{} `type:"structure"` - - // An error code representing why the action failed on this entry. - // - // Code is a required field - Code *string `type:"string" required:"true"` - - // The Id of an entry in a batch request - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // A message explaining why the action failed on this entry. - Message *string `type:"string"` - - // Specifies whether the error happened due to the caller of the batch API action. - // - // SenderFault is a required field - SenderFault *bool `type:"boolean" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchResultErrorEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchResultErrorEntry) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *BatchResultErrorEntry) SetCode(v string) *BatchResultErrorEntry { - s.Code = &v - return s -} - -// SetId sets the Id field's value. -func (s *BatchResultErrorEntry) SetId(v string) *BatchResultErrorEntry { - s.Id = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *BatchResultErrorEntry) SetMessage(v string) *BatchResultErrorEntry { - s.Message = &v - return s -} - -// SetSenderFault sets the SenderFault field's value. -func (s *BatchResultErrorEntry) SetSenderFault(v bool) *BatchResultErrorEntry { - s.SenderFault = &v - return s -} - -// The input for the CheckIfPhoneNumberIsOptedOut action. -type CheckIfPhoneNumberIsOptedOutInput struct { - _ struct{} `type:"structure"` - - // The phone number for which you want to check the opt out status. - // - // PhoneNumber is a required field - PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CheckIfPhoneNumberIsOptedOutInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CheckIfPhoneNumberIsOptedOutInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CheckIfPhoneNumberIsOptedOutInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CheckIfPhoneNumberIsOptedOutInput"} - if s.PhoneNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *CheckIfPhoneNumberIsOptedOutInput) SetPhoneNumber(v string) *CheckIfPhoneNumberIsOptedOutInput { - s.PhoneNumber = &v - return s -} - -// The response from the CheckIfPhoneNumberIsOptedOut action. -type CheckIfPhoneNumberIsOptedOutOutput struct { - _ struct{} `type:"structure"` - - // Indicates whether the phone number is opted out: - // - // * true – The phone number is opted out, meaning you cannot publish SMS - // messages to it. - // - // * false – The phone number is opted in, meaning you can publish SMS - // messages to it. - IsOptedOut *bool `locationName:"isOptedOut" type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CheckIfPhoneNumberIsOptedOutOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CheckIfPhoneNumberIsOptedOutOutput) GoString() string { - return s.String() -} - -// SetIsOptedOut sets the IsOptedOut field's value. -func (s *CheckIfPhoneNumberIsOptedOutOutput) SetIsOptedOut(v bool) *CheckIfPhoneNumberIsOptedOutOutput { - s.IsOptedOut = &v - return s -} - -// Input for ConfirmSubscription action. -type ConfirmSubscriptionInput struct { - _ struct{} `type:"structure"` - - // Disallows unauthenticated unsubscribes of the subscription. If the value - // of this parameter is true and the request has an Amazon Web Services signature, - // then only the topic owner and the subscription owner can unsubscribe the - // endpoint. The unsubscribe action requires Amazon Web Services authentication. - AuthenticateOnUnsubscribe *string `type:"string"` - - // Short-lived token sent to an endpoint during the Subscribe action. - // - // Token is a required field - Token *string `type:"string" required:"true"` - - // The ARN of the topic for which you wish to confirm a subscription. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConfirmSubscriptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConfirmSubscriptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ConfirmSubscriptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConfirmSubscriptionInput"} - if s.Token == nil { - invalidParams.Add(request.NewErrParamRequired("Token")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAuthenticateOnUnsubscribe sets the AuthenticateOnUnsubscribe field's value. -func (s *ConfirmSubscriptionInput) SetAuthenticateOnUnsubscribe(v string) *ConfirmSubscriptionInput { - s.AuthenticateOnUnsubscribe = &v - return s -} - -// SetToken sets the Token field's value. -func (s *ConfirmSubscriptionInput) SetToken(v string) *ConfirmSubscriptionInput { - s.Token = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *ConfirmSubscriptionInput) SetTopicArn(v string) *ConfirmSubscriptionInput { - s.TopicArn = &v - return s -} - -// Response for ConfirmSubscriptions action. -type ConfirmSubscriptionOutput struct { - _ struct{} `type:"structure"` - - // The ARN of the created subscription. - SubscriptionArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConfirmSubscriptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConfirmSubscriptionOutput) GoString() string { - return s.String() -} - -// SetSubscriptionArn sets the SubscriptionArn field's value. -func (s *ConfirmSubscriptionOutput) SetSubscriptionArn(v string) *ConfirmSubscriptionOutput { - s.SubscriptionArn = &v - return s -} - -// Input for CreatePlatformApplication action. -type CreatePlatformApplicationInput struct { - _ struct{} `type:"structure"` - - // For a list of attributes, see SetPlatformApplicationAttributes (https://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html). - // - // Attributes is a required field - Attributes map[string]*string `type:"map" required:"true"` - - // Application names must be made up of only uppercase and lowercase ASCII letters, - // numbers, underscores, hyphens, and periods, and must be between 1 and 256 - // characters long. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // The following platforms are supported: ADM (Amazon Device Messaging), APNS - // (Apple Push Notification Service), APNS_SANDBOX, and GCM (Firebase Cloud - // Messaging). - // - // Platform is a required field - Platform *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformApplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformApplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePlatformApplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePlatformApplicationInput"} - if s.Attributes == nil { - invalidParams.Add(request.NewErrParamRequired("Attributes")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Platform == nil { - invalidParams.Add(request.NewErrParamRequired("Platform")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *CreatePlatformApplicationInput) SetAttributes(v map[string]*string) *CreatePlatformApplicationInput { - s.Attributes = v - return s -} - -// SetName sets the Name field's value. -func (s *CreatePlatformApplicationInput) SetName(v string) *CreatePlatformApplicationInput { - s.Name = &v - return s -} - -// SetPlatform sets the Platform field's value. -func (s *CreatePlatformApplicationInput) SetPlatform(v string) *CreatePlatformApplicationInput { - s.Platform = &v - return s -} - -// Response from CreatePlatformApplication action. -type CreatePlatformApplicationOutput struct { - _ struct{} `type:"structure"` - - // PlatformApplicationArn is returned. - PlatformApplicationArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformApplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformApplicationOutput) GoString() string { - return s.String() -} - -// SetPlatformApplicationArn sets the PlatformApplicationArn field's value. -func (s *CreatePlatformApplicationOutput) SetPlatformApplicationArn(v string) *CreatePlatformApplicationOutput { - s.PlatformApplicationArn = &v - return s -} - -// Input for CreatePlatformEndpoint action. -type CreatePlatformEndpointInput struct { - _ struct{} `type:"structure"` - - // For a list of attributes, see SetEndpointAttributes (https://docs.aws.amazon.com/sns/latest/api/API_SetEndpointAttributes.html). - Attributes map[string]*string `type:"map"` - - // Arbitrary user data to associate with the endpoint. Amazon SNS does not use - // this data. The data must be in UTF-8 format and less than 2KB. - CustomUserData *string `type:"string"` - - // PlatformApplicationArn returned from CreatePlatformApplication is used to - // create a an endpoint. - // - // PlatformApplicationArn is a required field - PlatformApplicationArn *string `type:"string" required:"true"` - - // Unique identifier created by the notification service for an app on a device. - // The specific name for Token will vary, depending on which notification service - // is being used. For example, when using APNS as the notification service, - // you need the device token. Alternatively, when using GCM (Firebase Cloud - // Messaging) or ADM, the device token equivalent is called the registration - // ID. - // - // Token is a required field - Token *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePlatformEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePlatformEndpointInput"} - if s.PlatformApplicationArn == nil { - invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) - } - if s.Token == nil { - invalidParams.Add(request.NewErrParamRequired("Token")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *CreatePlatformEndpointInput) SetAttributes(v map[string]*string) *CreatePlatformEndpointInput { - s.Attributes = v - return s -} - -// SetCustomUserData sets the CustomUserData field's value. -func (s *CreatePlatformEndpointInput) SetCustomUserData(v string) *CreatePlatformEndpointInput { - s.CustomUserData = &v - return s -} - -// SetPlatformApplicationArn sets the PlatformApplicationArn field's value. -func (s *CreatePlatformEndpointInput) SetPlatformApplicationArn(v string) *CreatePlatformEndpointInput { - s.PlatformApplicationArn = &v - return s -} - -// SetToken sets the Token field's value. -func (s *CreatePlatformEndpointInput) SetToken(v string) *CreatePlatformEndpointInput { - s.Token = &v - return s -} - -// Response from CreateEndpoint action. -type CreatePlatformEndpointOutput struct { - _ struct{} `type:"structure"` - - // EndpointArn returned from CreateEndpoint action. - EndpointArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePlatformEndpointOutput) GoString() string { - return s.String() -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *CreatePlatformEndpointOutput) SetEndpointArn(v string) *CreatePlatformEndpointOutput { - s.EndpointArn = &v - return s -} - -type CreateSMSSandboxPhoneNumberInput struct { - _ struct{} `type:"structure"` - - // The language to use for sending the OTP. The default value is en-US. - LanguageCode *string `type:"string" enum:"LanguageCodeString"` - - // The destination phone number to verify. On verification, Amazon SNS adds - // this phone number to the list of verified phone numbers that you can send - // SMS messages to. - // - // PhoneNumber is a required field - PhoneNumber *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateSMSSandboxPhoneNumberInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateSMSSandboxPhoneNumberInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSMSSandboxPhoneNumberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSMSSandboxPhoneNumberInput"} - if s.PhoneNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLanguageCode sets the LanguageCode field's value. -func (s *CreateSMSSandboxPhoneNumberInput) SetLanguageCode(v string) *CreateSMSSandboxPhoneNumberInput { - s.LanguageCode = &v - return s -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *CreateSMSSandboxPhoneNumberInput) SetPhoneNumber(v string) *CreateSMSSandboxPhoneNumberInput { - s.PhoneNumber = &v - return s -} - -type CreateSMSSandboxPhoneNumberOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateSMSSandboxPhoneNumberOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateSMSSandboxPhoneNumberOutput) GoString() string { - return s.String() -} - -// Input for CreateTopic action. -type CreateTopicInput struct { - _ struct{} `type:"structure"` - - // A map of attributes with their corresponding values. - // - // The following lists the names, descriptions, and values of the special request - // parameters that the CreateTopic action uses: - // - // * DeliveryPolicy – The policy that defines how Amazon SNS retries failed - // deliveries to HTTP/S endpoints. - // - // * DisplayName – The display name to use for a topic with SMS subscriptions. - // - // * FifoTopic – Set to true to create a FIFO topic. - // - // * Policy – The policy that defines who can access your topic. By default, - // only the topic owner can publish or subscribe to the topic. - // - // The following attribute applies only to server-side encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): - // - // * KmsMasterKeyId – The ID of an Amazon Web Services managed customer - // master key (CMK) for Amazon SNS or a custom CMK. For more information, - // see Key Terms (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms). - // For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the Key Management Service API Reference. - // - // The following attributes apply only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): - // - // * FifoTopic – When this is set to true, a FIFO topic is created. - // - // * ContentBasedDeduplication – Enables content-based deduplication for - // FIFO topics. By default, ContentBasedDeduplication is set to false. If - // you create a FIFO topic and this attribute is false, you must specify - // a value for the MessageDeduplicationId parameter for the Publish (https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) - // action. When you set ContentBasedDeduplication to true, Amazon SNS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). (Optional) To override - // the generated value, you can specify a value for the MessageDeduplicationId - // parameter for the Publish action. - Attributes map[string]*string `type:"map"` - - // The name of the topic you want to create. - // - // Constraints: Topic names must be made up of only uppercase and lowercase - // ASCII letters, numbers, underscores, and hyphens, and must be between 1 and - // 256 characters long. - // - // For a FIFO (first-in-first-out) topic, the name must end with the .fifo suffix. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // The list of tags to add to a new topic. - // - // To be able to tag a topic on creation, you must have the sns:CreateTopic - // and sns:TagResource permissions. - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTopicInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTopicInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTopicInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTopicInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *CreateTopicInput) SetAttributes(v map[string]*string) *CreateTopicInput { - s.Attributes = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateTopicInput) SetName(v string) *CreateTopicInput { - s.Name = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateTopicInput) SetTags(v []*Tag) *CreateTopicInput { - s.Tags = v - return s -} - -// Response from CreateTopic action. -type CreateTopicOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) assigned to the created topic. - TopicArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTopicOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTopicOutput) GoString() string { - return s.String() -} - -// SetTopicArn sets the TopicArn field's value. -func (s *CreateTopicOutput) SetTopicArn(v string) *CreateTopicOutput { - s.TopicArn = &v - return s -} - -// Input for DeleteEndpoint action. -type DeleteEndpointInput struct { - _ struct{} `type:"structure"` - - // EndpointArn of endpoint to delete. - // - // EndpointArn is a required field - EndpointArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteEndpointInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteEndpointInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteEndpointInput"} - if s.EndpointArn == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *DeleteEndpointInput) SetEndpointArn(v string) *DeleteEndpointInput { - s.EndpointArn = &v - return s -} - -type DeleteEndpointOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteEndpointOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteEndpointOutput) GoString() string { - return s.String() -} - -// Input for DeletePlatformApplication action. -type DeletePlatformApplicationInput struct { - _ struct{} `type:"structure"` - - // PlatformApplicationArn of platform application object to delete. - // - // PlatformApplicationArn is a required field - PlatformApplicationArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePlatformApplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePlatformApplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePlatformApplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePlatformApplicationInput"} - if s.PlatformApplicationArn == nil { - invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPlatformApplicationArn sets the PlatformApplicationArn field's value. -func (s *DeletePlatformApplicationInput) SetPlatformApplicationArn(v string) *DeletePlatformApplicationInput { - s.PlatformApplicationArn = &v - return s -} - -type DeletePlatformApplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePlatformApplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePlatformApplicationOutput) GoString() string { - return s.String() -} - -type DeleteSMSSandboxPhoneNumberInput struct { - _ struct{} `type:"structure"` - - // The destination phone number to delete. - // - // PhoneNumber is a required field - PhoneNumber *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteSMSSandboxPhoneNumberInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteSMSSandboxPhoneNumberInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSMSSandboxPhoneNumberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSMSSandboxPhoneNumberInput"} - if s.PhoneNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *DeleteSMSSandboxPhoneNumberInput) SetPhoneNumber(v string) *DeleteSMSSandboxPhoneNumberInput { - s.PhoneNumber = &v - return s -} - -type DeleteSMSSandboxPhoneNumberOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteSMSSandboxPhoneNumberOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteSMSSandboxPhoneNumberOutput) GoString() string { - return s.String() -} - -type DeleteTopicInput struct { - _ struct{} `type:"structure"` - - // The ARN of the topic you want to delete. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTopicInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTopicInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTopicInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTopicInput"} - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTopicArn sets the TopicArn field's value. -func (s *DeleteTopicInput) SetTopicArn(v string) *DeleteTopicInput { - s.TopicArn = &v - return s -} - -type DeleteTopicOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTopicOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTopicOutput) GoString() string { - return s.String() -} - -// The endpoint for mobile app and device. -type Endpoint struct { - _ struct{} `type:"structure"` - - // Attributes for endpoint. - Attributes map[string]*string `type:"map"` - - // The EndpointArn for mobile app and device. - EndpointArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Endpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Endpoint) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *Endpoint) SetAttributes(v map[string]*string) *Endpoint { - s.Attributes = v - return s -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *Endpoint) SetEndpointArn(v string) *Endpoint { - s.EndpointArn = &v - return s -} - -// Input for GetEndpointAttributes action. -type GetEndpointAttributesInput struct { - _ struct{} `type:"structure"` - - // EndpointArn for GetEndpointAttributes input. - // - // EndpointArn is a required field - EndpointArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetEndpointAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetEndpointAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetEndpointAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetEndpointAttributesInput"} - if s.EndpointArn == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *GetEndpointAttributesInput) SetEndpointArn(v string) *GetEndpointAttributesInput { - s.EndpointArn = &v - return s -} - -// Response from GetEndpointAttributes of the EndpointArn. -type GetEndpointAttributesOutput struct { - _ struct{} `type:"structure"` - - // Attributes include the following: - // - // * CustomUserData – arbitrary user data to associate with the endpoint. - // Amazon SNS does not use this data. The data must be in UTF-8 format and - // less than 2KB. - // - // * Enabled – flag that enables/disables delivery to the endpoint. Amazon - // SNS will set this to false when a notification service indicates to Amazon - // SNS that the endpoint is invalid. Users can set it back to true, typically - // after updating Token. - // - // * Token – device token, also referred to as a registration id, for an - // app and mobile device. This is returned from the notification service - // when an app and mobile device are registered with the notification service. - // The device token for the iOS platform is returned in lowercase. - Attributes map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetEndpointAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetEndpointAttributesOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *GetEndpointAttributesOutput) SetAttributes(v map[string]*string) *GetEndpointAttributesOutput { - s.Attributes = v - return s -} - -// Input for GetPlatformApplicationAttributes action. -type GetPlatformApplicationAttributesInput struct { - _ struct{} `type:"structure"` - - // PlatformApplicationArn for GetPlatformApplicationAttributesInput. - // - // PlatformApplicationArn is a required field - PlatformApplicationArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPlatformApplicationAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPlatformApplicationAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPlatformApplicationAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPlatformApplicationAttributesInput"} - if s.PlatformApplicationArn == nil { - invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPlatformApplicationArn sets the PlatformApplicationArn field's value. -func (s *GetPlatformApplicationAttributesInput) SetPlatformApplicationArn(v string) *GetPlatformApplicationAttributesInput { - s.PlatformApplicationArn = &v - return s -} - -// Response for GetPlatformApplicationAttributes action. -type GetPlatformApplicationAttributesOutput struct { - _ struct{} `type:"structure"` - - // Attributes include the following: - // - // * AppleCertificateExpiryDate – The expiry date of the SSL certificate - // used to configure certificate-based authentication. - // - // * ApplePlatformTeamID – The Apple developer account ID used to configure - // token-based authentication. - // - // * ApplePlatformBundleID – The app identifier used to configure token-based - // authentication. - // - // * EventEndpointCreated – Topic ARN to which EndpointCreated event notifications - // should be sent. - // - // * EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications - // should be sent. - // - // * EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications - // should be sent. - // - // * EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications - // should be sent upon Direct Publish delivery failure (permanent) to one - // of the application's endpoints. - Attributes map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPlatformApplicationAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPlatformApplicationAttributesOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *GetPlatformApplicationAttributesOutput) SetAttributes(v map[string]*string) *GetPlatformApplicationAttributesOutput { - s.Attributes = v - return s -} - -// The input for the GetSMSAttributes request. -type GetSMSAttributesInput struct { - _ struct{} `type:"structure"` - - // A list of the individual attribute names, such as MonthlySpendLimit, for - // which you want values. - // - // For all attribute names, see SetSMSAttributes (https://docs.aws.amazon.com/sns/latest/api/API_SetSMSAttributes.html). - // - // If you don't use this parameter, Amazon SNS returns all SMS attributes. - Attributes []*string `locationName:"attributes" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSAttributesInput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *GetSMSAttributesInput) SetAttributes(v []*string) *GetSMSAttributesInput { - s.Attributes = v - return s -} - -// The response from the GetSMSAttributes request. -type GetSMSAttributesOutput struct { - _ struct{} `type:"structure"` - - // The SMS attribute names and their values. - Attributes map[string]*string `locationName:"attributes" type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSAttributesOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *GetSMSAttributesOutput) SetAttributes(v map[string]*string) *GetSMSAttributesOutput { - s.Attributes = v - return s -} - -type GetSMSSandboxAccountStatusInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSSandboxAccountStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSSandboxAccountStatusInput) GoString() string { - return s.String() -} - -type GetSMSSandboxAccountStatusOutput struct { - _ struct{} `type:"structure"` - - // Indicates whether the calling Amazon Web Services account is in the SMS sandbox. - // - // IsInSandbox is a required field - IsInSandbox *bool `type:"boolean" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSSandboxAccountStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSMSSandboxAccountStatusOutput) GoString() string { - return s.String() -} - -// SetIsInSandbox sets the IsInSandbox field's value. -func (s *GetSMSSandboxAccountStatusOutput) SetIsInSandbox(v bool) *GetSMSSandboxAccountStatusOutput { - s.IsInSandbox = &v - return s -} - -// Input for GetSubscriptionAttributes. -type GetSubscriptionAttributesInput struct { - _ struct{} `type:"structure"` - - // The ARN of the subscription whose properties you want to get. - // - // SubscriptionArn is a required field - SubscriptionArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSubscriptionAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSubscriptionAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSubscriptionAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSubscriptionAttributesInput"} - if s.SubscriptionArn == nil { - invalidParams.Add(request.NewErrParamRequired("SubscriptionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSubscriptionArn sets the SubscriptionArn field's value. -func (s *GetSubscriptionAttributesInput) SetSubscriptionArn(v string) *GetSubscriptionAttributesInput { - s.SubscriptionArn = &v - return s -} - -// Response for GetSubscriptionAttributes action. -type GetSubscriptionAttributesOutput struct { - _ struct{} `type:"structure"` - - // A map of the subscription's attributes. Attributes in this map include the - // following: - // - // * ConfirmationWasAuthenticated – true if the subscription confirmation - // request was authenticated. - // - // * DeliveryPolicy – The JSON serialization of the subscription's delivery - // policy. - // - // * EffectiveDeliveryPolicy – The JSON serialization of the effective - // delivery policy that takes into account the topic delivery policy and - // account system defaults. - // - // * FilterPolicy – The filter policy JSON that is assigned to the subscription. - // For more information, see Amazon SNS Message Filtering (https://docs.aws.amazon.com/sns/latest/dg/sns-message-filtering.html) - // in the Amazon SNS Developer Guide. - // - // * Owner – The Amazon Web Services account ID of the subscription's owner. - // - // * PendingConfirmation – true if the subscription hasn't been confirmed. - // To confirm a pending subscription, call the ConfirmSubscription action - // with a confirmation token. - // - // * RawMessageDelivery – true if raw message delivery is enabled for the - // subscription. Raw messages are free of JSON formatting and can be sent - // to HTTP/S and Amazon SQS endpoints. - // - // * RedrivePolicy – When specified, sends undeliverable messages to the - // specified Amazon SQS dead-letter queue. Messages that can't be delivered - // due to client errors (for example, when the subscribed endpoint is unreachable) - // or server errors (for example, when the service that powers the subscribed - // endpoint becomes unavailable) are held in the dead-letter queue for further - // analysis or reprocessing. - // - // * SubscriptionArn – The subscription's ARN. - // - // * TopicArn – The topic ARN that the subscription is associated with. - // - // The following attribute applies only to Amazon Kinesis Data Firehose delivery - // stream subscriptions: - // - // * SubscriptionRoleArn – The ARN of the IAM role that has the following: - // Permission to write to the Kinesis Data Firehose delivery stream Amazon - // SNS listed as a trusted entity Specifying a valid ARN for this attribute - // is required for Kinesis Data Firehose delivery stream subscriptions. For - // more information, see Fanout to Kinesis Data Firehose delivery streams - // (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) - // in the Amazon SNS Developer Guide. - Attributes map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSubscriptionAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSubscriptionAttributesOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *GetSubscriptionAttributesOutput) SetAttributes(v map[string]*string) *GetSubscriptionAttributesOutput { - s.Attributes = v - return s -} - -// Input for GetTopicAttributes action. -type GetTopicAttributesInput struct { - _ struct{} `type:"structure"` - - // The ARN of the topic whose properties you want to get. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTopicAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTopicAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTopicAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTopicAttributesInput"} - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTopicArn sets the TopicArn field's value. -func (s *GetTopicAttributesInput) SetTopicArn(v string) *GetTopicAttributesInput { - s.TopicArn = &v - return s -} - -// Response for GetTopicAttributes action. -type GetTopicAttributesOutput struct { - _ struct{} `type:"structure"` - - // A map of the topic's attributes. Attributes in this map include the following: - // - // * DeliveryPolicy – The JSON serialization of the topic's delivery policy. - // - // * DisplayName – The human-readable name used in the From field for notifications - // to email and email-json endpoints. - // - // * Owner – The Amazon Web Services account ID of the topic's owner. - // - // * Policy – The JSON serialization of the topic's access control policy. - // - // * SubscriptionsConfirmed – The number of confirmed subscriptions for - // the topic. - // - // * SubscriptionsDeleted – The number of deleted subscriptions for the - // topic. - // - // * SubscriptionsPending – The number of subscriptions pending confirmation - // for the topic. - // - // * TopicArn – The topic's ARN. - // - // * EffectiveDeliveryPolicy – The JSON serialization of the effective - // delivery policy, taking system defaults into account. - // - // The following attribute applies only to server-side-encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): - // - // * KmsMasterKeyId - The ID of an Amazon Web Services managed customer master - // key (CMK) for Amazon SNS or a custom CMK. For more information, see Key - // Terms (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms). - // For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the Key Management Service API Reference. - // - // The following attributes apply only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): - // - // * FifoTopic – When this is set to true, a FIFO topic is created. - // - // * ContentBasedDeduplication – Enables content-based deduplication for - // FIFO topics. By default, ContentBasedDeduplication is set to false. If - // you create a FIFO topic and this attribute is false, you must specify - // a value for the MessageDeduplicationId parameter for the Publish (https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) - // action. When you set ContentBasedDeduplication to true, Amazon SNS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). (Optional) To override - // the generated value, you can specify a value for the MessageDeduplicationId - // parameter for the Publish action. - Attributes map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTopicAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTopicAttributesOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *GetTopicAttributesOutput) SetAttributes(v map[string]*string) *GetTopicAttributesOutput { - s.Attributes = v - return s -} - -// Input for ListEndpointsByPlatformApplication action. -type ListEndpointsByPlatformApplicationInput struct { - _ struct{} `type:"structure"` - - // NextToken string is used when calling ListEndpointsByPlatformApplication - // action to retrieve additional records that are available after the first - // page results. - NextToken *string `type:"string"` - - // PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action. - // - // PlatformApplicationArn is a required field - PlatformApplicationArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEndpointsByPlatformApplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEndpointsByPlatformApplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListEndpointsByPlatformApplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListEndpointsByPlatformApplicationInput"} - if s.PlatformApplicationArn == nil { - invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEndpointsByPlatformApplicationInput) SetNextToken(v string) *ListEndpointsByPlatformApplicationInput { - s.NextToken = &v - return s -} - -// SetPlatformApplicationArn sets the PlatformApplicationArn field's value. -func (s *ListEndpointsByPlatformApplicationInput) SetPlatformApplicationArn(v string) *ListEndpointsByPlatformApplicationInput { - s.PlatformApplicationArn = &v - return s -} - -// Response for ListEndpointsByPlatformApplication action. -type ListEndpointsByPlatformApplicationOutput struct { - _ struct{} `type:"structure"` - - // Endpoints returned for ListEndpointsByPlatformApplication action. - Endpoints []*Endpoint `type:"list"` - - // NextToken string is returned when calling ListEndpointsByPlatformApplication - // action if additional records are available after the first page results. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEndpointsByPlatformApplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEndpointsByPlatformApplicationOutput) GoString() string { - return s.String() -} - -// SetEndpoints sets the Endpoints field's value. -func (s *ListEndpointsByPlatformApplicationOutput) SetEndpoints(v []*Endpoint) *ListEndpointsByPlatformApplicationOutput { - s.Endpoints = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEndpointsByPlatformApplicationOutput) SetNextToken(v string) *ListEndpointsByPlatformApplicationOutput { - s.NextToken = &v - return s -} - -type ListOriginationNumbersInput struct { - _ struct{} `type:"structure"` - - // The maximum number of origination numbers to return. - MaxResults *int64 `min:"1" type:"integer"` - - // Token that the previous ListOriginationNumbers request returns. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListOriginationNumbersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListOriginationNumbersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListOriginationNumbersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListOriginationNumbersInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListOriginationNumbersInput) SetMaxResults(v int64) *ListOriginationNumbersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListOriginationNumbersInput) SetNextToken(v string) *ListOriginationNumbersInput { - s.NextToken = &v - return s -} - -type ListOriginationNumbersOutput struct { - _ struct{} `type:"structure"` - - // A NextToken string is returned when you call the ListOriginationNumbers operation - // if additional pages of records are available. - NextToken *string `type:"string"` - - // A list of the calling account's verified and pending origination numbers. - PhoneNumbers []*PhoneNumberInformation `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListOriginationNumbersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListOriginationNumbersOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListOriginationNumbersOutput) SetNextToken(v string) *ListOriginationNumbersOutput { - s.NextToken = &v - return s -} - -// SetPhoneNumbers sets the PhoneNumbers field's value. -func (s *ListOriginationNumbersOutput) SetPhoneNumbers(v []*PhoneNumberInformation) *ListOriginationNumbersOutput { - s.PhoneNumbers = v - return s -} - -// The input for the ListPhoneNumbersOptedOut action. -type ListPhoneNumbersOptedOutInput struct { - _ struct{} `type:"structure"` - - // A NextToken string is used when you call the ListPhoneNumbersOptedOut action - // to retrieve additional records that are available after the first page of - // results. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPhoneNumbersOptedOutInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPhoneNumbersOptedOutInput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPhoneNumbersOptedOutInput) SetNextToken(v string) *ListPhoneNumbersOptedOutInput { - s.NextToken = &v - return s -} - -// The response from the ListPhoneNumbersOptedOut action. -type ListPhoneNumbersOptedOutOutput struct { - _ struct{} `type:"structure"` - - // A NextToken string is returned when you call the ListPhoneNumbersOptedOut - // action if additional records are available after the first page of results. - NextToken *string `locationName:"nextToken" type:"string"` - - // A list of phone numbers that are opted out of receiving SMS messages. The - // list is paginated, and each page can contain up to 100 phone numbers. - PhoneNumbers []*string `locationName:"phoneNumbers" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPhoneNumbersOptedOutOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPhoneNumbersOptedOutOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPhoneNumbersOptedOutOutput) SetNextToken(v string) *ListPhoneNumbersOptedOutOutput { - s.NextToken = &v - return s -} - -// SetPhoneNumbers sets the PhoneNumbers field's value. -func (s *ListPhoneNumbersOptedOutOutput) SetPhoneNumbers(v []*string) *ListPhoneNumbersOptedOutOutput { - s.PhoneNumbers = v - return s -} - -// Input for ListPlatformApplications action. -type ListPlatformApplicationsInput struct { - _ struct{} `type:"structure"` - - // NextToken string is used when calling ListPlatformApplications action to - // retrieve additional records that are available after the first page results. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPlatformApplicationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPlatformApplicationsInput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPlatformApplicationsInput) SetNextToken(v string) *ListPlatformApplicationsInput { - s.NextToken = &v - return s -} - -// Response for ListPlatformApplications action. -type ListPlatformApplicationsOutput struct { - _ struct{} `type:"structure"` - - // NextToken string is returned when calling ListPlatformApplications action - // if additional records are available after the first page results. - NextToken *string `type:"string"` - - // Platform applications returned when calling ListPlatformApplications action. - PlatformApplications []*PlatformApplication `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPlatformApplicationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPlatformApplicationsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPlatformApplicationsOutput) SetNextToken(v string) *ListPlatformApplicationsOutput { - s.NextToken = &v - return s -} - -// SetPlatformApplications sets the PlatformApplications field's value. -func (s *ListPlatformApplicationsOutput) SetPlatformApplications(v []*PlatformApplication) *ListPlatformApplicationsOutput { - s.PlatformApplications = v - return s -} - -type ListSMSSandboxPhoneNumbersInput struct { - _ struct{} `type:"structure"` - - // The maximum number of phone numbers to return. - MaxResults *int64 `min:"1" type:"integer"` - - // Token that the previous ListSMSSandboxPhoneNumbersInput request returns. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSMSSandboxPhoneNumbersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSMSSandboxPhoneNumbersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListSMSSandboxPhoneNumbersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListSMSSandboxPhoneNumbersInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListSMSSandboxPhoneNumbersInput) SetMaxResults(v int64) *ListSMSSandboxPhoneNumbersInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSMSSandboxPhoneNumbersInput) SetNextToken(v string) *ListSMSSandboxPhoneNumbersInput { - s.NextToken = &v - return s -} - -type ListSMSSandboxPhoneNumbersOutput struct { - _ struct{} `type:"structure"` - - // A NextToken string is returned when you call the ListSMSSandboxPhoneNumbersInput - // operation if additional pages of records are available. - NextToken *string `type:"string"` - - // A list of the calling account's pending and verified phone numbers. - // - // PhoneNumbers is a required field - PhoneNumbers []*SMSSandboxPhoneNumber `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSMSSandboxPhoneNumbersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSMSSandboxPhoneNumbersOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSMSSandboxPhoneNumbersOutput) SetNextToken(v string) *ListSMSSandboxPhoneNumbersOutput { - s.NextToken = &v - return s -} - -// SetPhoneNumbers sets the PhoneNumbers field's value. -func (s *ListSMSSandboxPhoneNumbersOutput) SetPhoneNumbers(v []*SMSSandboxPhoneNumber) *ListSMSSandboxPhoneNumbersOutput { - s.PhoneNumbers = v - return s -} - -// Input for ListSubscriptionsByTopic action. -type ListSubscriptionsByTopicInput struct { - _ struct{} `type:"structure"` - - // Token returned by the previous ListSubscriptionsByTopic request. - NextToken *string `type:"string"` - - // The ARN of the topic for which you wish to find subscriptions. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsByTopicInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsByTopicInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListSubscriptionsByTopicInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListSubscriptionsByTopicInput"} - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSubscriptionsByTopicInput) SetNextToken(v string) *ListSubscriptionsByTopicInput { - s.NextToken = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *ListSubscriptionsByTopicInput) SetTopicArn(v string) *ListSubscriptionsByTopicInput { - s.TopicArn = &v - return s -} - -// Response for ListSubscriptionsByTopic action. -type ListSubscriptionsByTopicOutput struct { - _ struct{} `type:"structure"` - - // Token to pass along to the next ListSubscriptionsByTopic request. This element - // is returned if there are more subscriptions to retrieve. - NextToken *string `type:"string"` - - // A list of subscriptions. - Subscriptions []*Subscription `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsByTopicOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsByTopicOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSubscriptionsByTopicOutput) SetNextToken(v string) *ListSubscriptionsByTopicOutput { - s.NextToken = &v - return s -} - -// SetSubscriptions sets the Subscriptions field's value. -func (s *ListSubscriptionsByTopicOutput) SetSubscriptions(v []*Subscription) *ListSubscriptionsByTopicOutput { - s.Subscriptions = v - return s -} - -// Input for ListSubscriptions action. -type ListSubscriptionsInput struct { - _ struct{} `type:"structure"` - - // Token returned by the previous ListSubscriptions request. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsInput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSubscriptionsInput) SetNextToken(v string) *ListSubscriptionsInput { - s.NextToken = &v - return s -} - -// Response for ListSubscriptions action -type ListSubscriptionsOutput struct { - _ struct{} `type:"structure"` - - // Token to pass along to the next ListSubscriptions request. This element is - // returned if there are more subscriptions to retrieve. - NextToken *string `type:"string"` - - // A list of subscriptions. - Subscriptions []*Subscription `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSubscriptionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSubscriptionsOutput) SetNextToken(v string) *ListSubscriptionsOutput { - s.NextToken = &v - return s -} - -// SetSubscriptions sets the Subscriptions field's value. -func (s *ListSubscriptionsOutput) SetSubscriptions(v []*Subscription) *ListSubscriptionsOutput { - s.Subscriptions = v - return s -} - -type ListTagsForResourceInput struct { - _ struct{} `type:"structure"` - - // The ARN of the topic for which to list tags. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { - s.ResourceArn = &v - return s -} - -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure"` - - // The tags associated with the specified topic. - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceOutput) GoString() string { - return s.String() -} - -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { - s.Tags = v - return s -} - -type ListTopicsInput struct { - _ struct{} `type:"structure"` - - // Token returned by the previous ListTopics request. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTopicsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTopicsInput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTopicsInput) SetNextToken(v string) *ListTopicsInput { - s.NextToken = &v - return s -} - -// Response for ListTopics action. -type ListTopicsOutput struct { - _ struct{} `type:"structure"` - - // Token to pass along to the next ListTopics request. This element is returned - // if there are additional topics to retrieve. - NextToken *string `type:"string"` - - // A list of topic ARNs. - Topics []*Topic `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTopicsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTopicsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTopicsOutput) SetNextToken(v string) *ListTopicsOutput { - s.NextToken = &v - return s -} - -// SetTopics sets the Topics field's value. -func (s *ListTopicsOutput) SetTopics(v []*Topic) *ListTopicsOutput { - s.Topics = v - return s -} - -// The user-specified message attribute value. For string data types, the value -// attribute has the same restrictions on the content as the message body. For -// more information, see Publish (https://docs.aws.amazon.com/sns/latest/api/API_Publish.html). -// -// Name, type, and value must not be empty or null. In addition, the message -// body should not be empty or null. All parts of the message attribute, including -// name, type, and value, are included in the message size restriction, which -// is currently 256 KB (262,144 bytes). For more information, see Amazon SNS -// message attributes (https://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html) -// and Publishing to a mobile phone (https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html) -// in the Amazon SNS Developer Guide. -type MessageAttributeValue struct { - _ struct{} `type:"structure"` - - // Binary type attributes can store any binary data, for example, compressed - // data, encrypted data, or images. - // BinaryValue is automatically base64 encoded/decoded by the SDK. - BinaryValue []byte `type:"blob"` - - // Amazon SNS supports the following logical data types: String, String.Array, - // Number, and Binary. For more information, see Message Attribute Data Types - // (https://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html#SNSMessageAttributes.DataTypes). - // - // DataType is a required field - DataType *string `type:"string" required:"true"` - - // Strings are Unicode with UTF8 binary encoding. For a list of code values, - // see ASCII Printable Characters (https://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). - StringValue *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MessageAttributeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MessageAttributeValue) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MessageAttributeValue) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MessageAttributeValue"} - if s.DataType == nil { - invalidParams.Add(request.NewErrParamRequired("DataType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBinaryValue sets the BinaryValue field's value. -func (s *MessageAttributeValue) SetBinaryValue(v []byte) *MessageAttributeValue { - s.BinaryValue = v - return s -} - -// SetDataType sets the DataType field's value. -func (s *MessageAttributeValue) SetDataType(v string) *MessageAttributeValue { - s.DataType = &v - return s -} - -// SetStringValue sets the StringValue field's value. -func (s *MessageAttributeValue) SetStringValue(v string) *MessageAttributeValue { - s.StringValue = &v - return s -} - -// Input for the OptInPhoneNumber action. -type OptInPhoneNumberInput struct { - _ struct{} `type:"structure"` - - // The phone number to opt in. Use E.164 format. - // - // PhoneNumber is a required field - PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OptInPhoneNumberInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OptInPhoneNumberInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *OptInPhoneNumberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OptInPhoneNumberInput"} - if s.PhoneNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *OptInPhoneNumberInput) SetPhoneNumber(v string) *OptInPhoneNumberInput { - s.PhoneNumber = &v - return s -} - -// The response for the OptInPhoneNumber action. -type OptInPhoneNumberOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OptInPhoneNumberOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OptInPhoneNumberOutput) GoString() string { - return s.String() -} - -// A list of phone numbers and their metadata. -type PhoneNumberInformation struct { - _ struct{} `type:"structure"` - - // The date and time when the phone number was created. - CreatedAt *time.Time `type:"timestamp"` - - // The two-character code for the country or region, in ISO 3166-1 alpha-2 format. - Iso2CountryCode *string `type:"string"` - - // The capabilities of each phone number. - NumberCapabilities []*string `type:"list" enum:"NumberCapability"` - - // The phone number. - PhoneNumber *string `type:"string"` - - // The list of supported routes. - RouteType *string `type:"string" enum:"RouteType"` - - // The status of the phone number. - Status *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PhoneNumberInformation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PhoneNumberInformation) GoString() string { - return s.String() -} - -// SetCreatedAt sets the CreatedAt field's value. -func (s *PhoneNumberInformation) SetCreatedAt(v time.Time) *PhoneNumberInformation { - s.CreatedAt = &v - return s -} - -// SetIso2CountryCode sets the Iso2CountryCode field's value. -func (s *PhoneNumberInformation) SetIso2CountryCode(v string) *PhoneNumberInformation { - s.Iso2CountryCode = &v - return s -} - -// SetNumberCapabilities sets the NumberCapabilities field's value. -func (s *PhoneNumberInformation) SetNumberCapabilities(v []*string) *PhoneNumberInformation { - s.NumberCapabilities = v - return s -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *PhoneNumberInformation) SetPhoneNumber(v string) *PhoneNumberInformation { - s.PhoneNumber = &v - return s -} - -// SetRouteType sets the RouteType field's value. -func (s *PhoneNumberInformation) SetRouteType(v string) *PhoneNumberInformation { - s.RouteType = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *PhoneNumberInformation) SetStatus(v string) *PhoneNumberInformation { - s.Status = &v - return s -} - -// Platform application object. -type PlatformApplication struct { - _ struct{} `type:"structure"` - - // Attributes for platform application object. - Attributes map[string]*string `type:"map"` - - // PlatformApplicationArn for platform application object. - PlatformApplicationArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PlatformApplication) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PlatformApplication) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *PlatformApplication) SetAttributes(v map[string]*string) *PlatformApplication { - s.Attributes = v - return s -} - -// SetPlatformApplicationArn sets the PlatformApplicationArn field's value. -func (s *PlatformApplication) SetPlatformApplicationArn(v string) *PlatformApplication { - s.PlatformApplicationArn = &v - return s -} - -type PublishBatchInput struct { - _ struct{} `type:"structure"` - - // A list of PublishBatch request entries to be sent to the SNS topic. - // - // PublishBatchRequestEntries is a required field - PublishBatchRequestEntries []*PublishBatchRequestEntry `type:"list" required:"true"` - - // The Amazon resource name (ARN) of the topic you want to batch publish to. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PublishBatchInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PublishBatchInput"} - if s.PublishBatchRequestEntries == nil { - invalidParams.Add(request.NewErrParamRequired("PublishBatchRequestEntries")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - if s.PublishBatchRequestEntries != nil { - for i, v := range s.PublishBatchRequestEntries { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PublishBatchRequestEntries", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPublishBatchRequestEntries sets the PublishBatchRequestEntries field's value. -func (s *PublishBatchInput) SetPublishBatchRequestEntries(v []*PublishBatchRequestEntry) *PublishBatchInput { - s.PublishBatchRequestEntries = v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *PublishBatchInput) SetTopicArn(v string) *PublishBatchInput { - s.TopicArn = &v - return s -} - -type PublishBatchOutput struct { - _ struct{} `type:"structure"` - - // A list of failed PublishBatch responses. - Failed []*BatchResultErrorEntry `type:"list"` - - // A list of successful PublishBatch responses. - Successful []*PublishBatchResultEntry `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchOutput) GoString() string { - return s.String() -} - -// SetFailed sets the Failed field's value. -func (s *PublishBatchOutput) SetFailed(v []*BatchResultErrorEntry) *PublishBatchOutput { - s.Failed = v - return s -} - -// SetSuccessful sets the Successful field's value. -func (s *PublishBatchOutput) SetSuccessful(v []*PublishBatchResultEntry) *PublishBatchOutput { - s.Successful = v - return s -} - -// Contains the details of a single Amazon SNS message along with an Id that -// identifies a message within the batch. -type PublishBatchRequestEntry struct { - _ struct{} `type:"structure"` - - // An identifier for the message in this batch. - // - // The Ids of a batch request must be unique within a request. - // - // This identifier can have up to 80 characters. The following characters are - // accepted: alphanumeric characters, hyphens(-), and underscores (_). - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // The body of the message. - // - // Message is a required field - Message *string `type:"string" required:"true"` - - // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SNS message attributes (https://docs.aws.amazon.com/sns/latest/dg/sns-message-attributes.html) - // in the Amazon SNS Developer Guide. - MessageAttributes map[string]*MessageAttributeValue `locationNameKey:"Name" locationNameValue:"Value" type:"map"` - - // This parameter applies only to FIFO (first-in-first-out) topics. - // - // The token used for deduplication of messages within a 5-minute minimum deduplication - // interval. If a message with a particular MessageDeduplicationId is sent successfully, - // subsequent messages with the same MessageDeduplicationId are accepted successfully - // but aren't delivered. - // - // * Every message must have a unique MessageDeduplicationId. You may provide - // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId - // and you enable ContentBasedDeduplication for your topic, Amazon SNS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). If you don't provide - // a MessageDeduplicationId and the topic doesn't have ContentBasedDeduplication - // set, the action fails with an error. If the topic has a ContentBasedDeduplication - // set, your MessageDeduplicationId overrides the generated one. - // - // * When ContentBasedDeduplication is in effect, messages with identical - // content sent within the deduplication interval are treated as duplicates - // and only one copy of the message is delivered. - // - // * If you send one message with ContentBasedDeduplication enabled, and - // then another message with a MessageDeduplicationId that is the same as - // the one generated for the first MessageDeduplicationId, the two messages - // are treated as duplicates and only one copy of the message is delivered. - // - // The MessageDeduplicationId is available to the consumer of the message (this - // can be useful for troubleshooting delivery issues). - // - // If a message is sent successfully but the acknowledgement is lost and the - // message is resent with the same MessageDeduplicationId after the deduplication - // interval, Amazon SNS can't detect duplicate messages. - // - // Amazon SNS continues to keep track of the message deduplication ID even after - // the message is received and deleted. - // - // The length of MessageDeduplicationId is 128 characters. - // - // MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) - // and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - MessageDeduplicationId *string `type:"string"` - - // This parameter applies only to FIFO (first-in-first-out) topics. - // - // The tag that specifies that a message belongs to a specific message group. - // Messages that belong to the same message group are processed in a FIFO manner - // (however, messages in different message groups might be processed out of - // order). To interleave multiple ordered streams within a single topic, use - // MessageGroupId values (for example, session data for multiple users). In - // this scenario, multiple consumers can process the topic, but the session - // data of each user is processed in a FIFO fashion. - // - // You must associate a non-empty MessageGroupId with a message. If you don't - // provide a MessageGroupId, the action fails. - // - // The length of MessageGroupId is 128 characters. - // - // MessageGroupId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation - // (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // MessageGroupId is required for FIFO topics. You can't use it for standard - // topics. - MessageGroupId *string `type:"string"` - - // Set MessageStructure to json if you want to send a different message for - // each protocol. For example, using one publish action, you can send a short - // message to your SMS subscribers and a longer message to your email subscribers. - // If you set MessageStructure to json, the value of the Message parameter must: - // - // * be a syntactically valid JSON object; and - // - // * contain at least a top-level JSON key of "default" with a value that - // is a string. - // - // You can define other top-level keys that define the message you want to send - // to a specific transport protocol (e.g. http). - MessageStructure *string `type:"string"` - - // The subject of the batch message. - Subject *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchRequestEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchRequestEntry) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PublishBatchRequestEntry) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PublishBatchRequestEntry"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.MessageAttributes != nil { - for i, v := range s.MessageAttributes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetId sets the Id field's value. -func (s *PublishBatchRequestEntry) SetId(v string) *PublishBatchRequestEntry { - s.Id = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *PublishBatchRequestEntry) SetMessage(v string) *PublishBatchRequestEntry { - s.Message = &v - return s -} - -// SetMessageAttributes sets the MessageAttributes field's value. -func (s *PublishBatchRequestEntry) SetMessageAttributes(v map[string]*MessageAttributeValue) *PublishBatchRequestEntry { - s.MessageAttributes = v - return s -} - -// SetMessageDeduplicationId sets the MessageDeduplicationId field's value. -func (s *PublishBatchRequestEntry) SetMessageDeduplicationId(v string) *PublishBatchRequestEntry { - s.MessageDeduplicationId = &v - return s -} - -// SetMessageGroupId sets the MessageGroupId field's value. -func (s *PublishBatchRequestEntry) SetMessageGroupId(v string) *PublishBatchRequestEntry { - s.MessageGroupId = &v - return s -} - -// SetMessageStructure sets the MessageStructure field's value. -func (s *PublishBatchRequestEntry) SetMessageStructure(v string) *PublishBatchRequestEntry { - s.MessageStructure = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *PublishBatchRequestEntry) SetSubject(v string) *PublishBatchRequestEntry { - s.Subject = &v - return s -} - -// Encloses data related to a successful message in a batch request for topic. -type PublishBatchResultEntry struct { - _ struct{} `type:"structure"` - - // The Id of an entry in a batch request. - Id *string `type:"string"` - - // An identifier for the message. - MessageId *string `type:"string"` - - // This parameter applies only to FIFO (first-in-first-out) topics. - // - // The large, non-consecutive number that Amazon SNS assigns to each message. - // - // The length of SequenceNumber is 128 bits. SequenceNumber continues to increase - // for a particular MessageGroupId. - SequenceNumber *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchResultEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishBatchResultEntry) GoString() string { - return s.String() -} - -// SetId sets the Id field's value. -func (s *PublishBatchResultEntry) SetId(v string) *PublishBatchResultEntry { - s.Id = &v - return s -} - -// SetMessageId sets the MessageId field's value. -func (s *PublishBatchResultEntry) SetMessageId(v string) *PublishBatchResultEntry { - s.MessageId = &v - return s -} - -// SetSequenceNumber sets the SequenceNumber field's value. -func (s *PublishBatchResultEntry) SetSequenceNumber(v string) *PublishBatchResultEntry { - s.SequenceNumber = &v - return s -} - -// Input for Publish action. -type PublishInput struct { - _ struct{} `type:"structure"` - - // The message you want to send. - // - // If you are publishing to a topic and you want to send the same message to - // all transport protocols, include the text of the message as a String value. - // If you want to send different messages for each transport protocol, set the - // value of the MessageStructure parameter to json and use a JSON object for - // the Message parameter. - // - // Constraints: - // - // * With the exception of SMS, messages must be UTF-8 encoded strings and - // at most 256 KB in size (262,144 bytes, not 262,144 characters). - // - // * For SMS, each message can contain up to 140 characters. This character - // limit depends on the encoding schema. For example, an SMS message can - // contain 160 GSM characters, 140 ASCII characters, or 70 UCS-2 characters. - // If you publish a message that exceeds this size limit, Amazon SNS sends - // the message as multiple messages, each fitting within the size limit. - // Messages aren't truncated mid-word but are cut off at whole-word boundaries. - // The total size limit for a single SMS Publish action is 1,600 characters. - // - // JSON-specific constraints: - // - // * Keys in the JSON object that correspond to supported transport protocols - // must have simple JSON string values. - // - // * The values will be parsed (unescaped) before they are used in outgoing - // messages. - // - // * Outbound notifications are JSON encoded (meaning that the characters - // will be reescaped for sending). - // - // * Values have a minimum length of 0 (the empty string, "", is allowed). - // - // * Values have a maximum length bounded by the overall message size (so, - // including multiple protocols may limit message sizes). - // - // * Non-string values will cause the key to be ignored. - // - // * Keys that do not correspond to supported transport protocols are ignored. - // - // * Duplicate keys are not allowed. - // - // * Failure to parse or validate any key or value in the message will cause - // the Publish call to return an error (no partial delivery). - // - // Message is a required field - Message *string `type:"string" required:"true"` - - // Message attributes for Publish action. - MessageAttributes map[string]*MessageAttributeValue `locationNameKey:"Name" locationNameValue:"Value" type:"map"` - - // This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId - // can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation - // (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // Every message must have a unique MessageDeduplicationId, which is a token - // used for deduplication of sent messages. If a message with a particular MessageDeduplicationId - // is sent successfully, any message sent with the same MessageDeduplicationId - // during the 5-minute deduplication interval is treated as a duplicate. - // - // If the topic has ContentBasedDeduplication set, the system generates a MessageDeduplicationId - // based on the contents of the message. Your MessageDeduplicationId overrides - // the generated one. - MessageDeduplicationId *string `type:"string"` - - // This parameter applies only to FIFO (first-in-first-out) topics. The MessageGroupId - // can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation - // (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). - // - // The MessageGroupId is a tag that specifies that a message belongs to a specific - // message group. Messages that belong to the same message group are processed - // in a FIFO manner (however, messages in different message groups might be - // processed out of order). Every message must include a MessageGroupId. - MessageGroupId *string `type:"string"` - - // Set MessageStructure to json if you want to send a different message for - // each protocol. For example, using one publish action, you can send a short - // message to your SMS subscribers and a longer message to your email subscribers. - // If you set MessageStructure to json, the value of the Message parameter must: - // - // * be a syntactically valid JSON object; and - // - // * contain at least a top-level JSON key of "default" with a value that - // is a string. - // - // You can define other top-level keys that define the message you want to send - // to a specific transport protocol (e.g., "http"). - // - // Valid value: json - MessageStructure *string `type:"string"` - - // The phone number to which you want to deliver an SMS message. Use E.164 format. - // - // If you don't specify a value for the PhoneNumber parameter, you must specify - // a value for the TargetArn or TopicArn parameters. - PhoneNumber *string `type:"string"` - - // Optional parameter to be used as the "Subject" line when the message is delivered - // to email endpoints. This field will also be included, if present, in the - // standard JSON messages delivered to other endpoints. - // - // Constraints: Subjects must be ASCII text that begins with a letter, number, - // or punctuation mark; must not include line breaks or control characters; - // and must be less than 100 characters long. - Subject *string `type:"string"` - - // If you don't specify a value for the TargetArn parameter, you must specify - // a value for the PhoneNumber or TopicArn parameters. - TargetArn *string `type:"string"` - - // The topic you want to publish to. - // - // If you don't specify a value for the TopicArn parameter, you must specify - // a value for the PhoneNumber or TargetArn parameters. - TopicArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PublishInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PublishInput"} - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.MessageAttributes != nil { - for i, v := range s.MessageAttributes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMessage sets the Message field's value. -func (s *PublishInput) SetMessage(v string) *PublishInput { - s.Message = &v - return s -} - -// SetMessageAttributes sets the MessageAttributes field's value. -func (s *PublishInput) SetMessageAttributes(v map[string]*MessageAttributeValue) *PublishInput { - s.MessageAttributes = v - return s -} - -// SetMessageDeduplicationId sets the MessageDeduplicationId field's value. -func (s *PublishInput) SetMessageDeduplicationId(v string) *PublishInput { - s.MessageDeduplicationId = &v - return s -} - -// SetMessageGroupId sets the MessageGroupId field's value. -func (s *PublishInput) SetMessageGroupId(v string) *PublishInput { - s.MessageGroupId = &v - return s -} - -// SetMessageStructure sets the MessageStructure field's value. -func (s *PublishInput) SetMessageStructure(v string) *PublishInput { - s.MessageStructure = &v - return s -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *PublishInput) SetPhoneNumber(v string) *PublishInput { - s.PhoneNumber = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *PublishInput) SetSubject(v string) *PublishInput { - s.Subject = &v - return s -} - -// SetTargetArn sets the TargetArn field's value. -func (s *PublishInput) SetTargetArn(v string) *PublishInput { - s.TargetArn = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *PublishInput) SetTopicArn(v string) *PublishInput { - s.TopicArn = &v - return s -} - -// Response for Publish action. -type PublishOutput struct { - _ struct{} `type:"structure"` - - // Unique identifier assigned to the published message. - // - // Length Constraint: Maximum 100 characters - MessageId *string `type:"string"` - - // This response element applies only to FIFO (first-in-first-out) topics. - // - // The sequence number is a large, non-consecutive number that Amazon SNS assigns - // to each message. The length of SequenceNumber is 128 bits. SequenceNumber - // continues to increase for each MessageGroupId. - SequenceNumber *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublishOutput) GoString() string { - return s.String() -} - -// SetMessageId sets the MessageId field's value. -func (s *PublishOutput) SetMessageId(v string) *PublishOutput { - s.MessageId = &v - return s -} - -// SetSequenceNumber sets the SequenceNumber field's value. -func (s *PublishOutput) SetSequenceNumber(v string) *PublishOutput { - s.SequenceNumber = &v - return s -} - -// Input for RemovePermission action. -type RemovePermissionInput struct { - _ struct{} `type:"structure"` - - // The unique label of the statement you want to remove. - // - // Label is a required field - Label *string `type:"string" required:"true"` - - // The ARN of the topic whose access control policy you wish to modify. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RemovePermissionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RemovePermissionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RemovePermissionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RemovePermissionInput"} - if s.Label == nil { - invalidParams.Add(request.NewErrParamRequired("Label")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLabel sets the Label field's value. -func (s *RemovePermissionInput) SetLabel(v string) *RemovePermissionInput { - s.Label = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *RemovePermissionInput) SetTopicArn(v string) *RemovePermissionInput { - s.TopicArn = &v - return s -} - -type RemovePermissionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RemovePermissionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RemovePermissionOutput) GoString() string { - return s.String() -} - -// A verified or pending destination phone number in the SMS sandbox. -// -// When you start using Amazon SNS to send SMS messages, your Amazon Web Services -// account is in the SMS sandbox. The SMS sandbox provides a safe environment -// for you to try Amazon SNS features without risking your reputation as an -// SMS sender. While your Amazon Web Services account is in the SMS sandbox, -// you can use all of the features of Amazon SNS. However, you can send SMS -// messages only to verified destination phone numbers. For more information, -// including how to move out of the sandbox to send messages without restrictions, -// see SMS sandbox (https://docs.aws.amazon.com/sns/latest/dg/sns-sms-sandbox.html) -// in the Amazon SNS Developer Guide. -type SMSSandboxPhoneNumber struct { - _ struct{} `type:"structure"` - - // The destination phone number. - PhoneNumber *string `type:"string"` - - // The destination phone number's verification status. - Status *string `type:"string" enum:"SMSSandboxPhoneNumberVerificationStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SMSSandboxPhoneNumber) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SMSSandboxPhoneNumber) GoString() string { - return s.String() -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *SMSSandboxPhoneNumber) SetPhoneNumber(v string) *SMSSandboxPhoneNumber { - s.PhoneNumber = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *SMSSandboxPhoneNumber) SetStatus(v string) *SMSSandboxPhoneNumber { - s.Status = &v - return s -} - -// Input for SetEndpointAttributes action. -type SetEndpointAttributesInput struct { - _ struct{} `type:"structure"` - - // A map of the endpoint attributes. Attributes in this map include the following: - // - // * CustomUserData – arbitrary user data to associate with the endpoint. - // Amazon SNS does not use this data. The data must be in UTF-8 format and - // less than 2KB. - // - // * Enabled – flag that enables/disables delivery to the endpoint. Amazon - // SNS will set this to false when a notification service indicates to Amazon - // SNS that the endpoint is invalid. Users can set it back to true, typically - // after updating Token. - // - // * Token – device token, also referred to as a registration id, for an - // app and mobile device. This is returned from the notification service - // when an app and mobile device are registered with the notification service. - // - // Attributes is a required field - Attributes map[string]*string `type:"map" required:"true"` - - // EndpointArn used for SetEndpointAttributes action. - // - // EndpointArn is a required field - EndpointArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetEndpointAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetEndpointAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SetEndpointAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SetEndpointAttributesInput"} - if s.Attributes == nil { - invalidParams.Add(request.NewErrParamRequired("Attributes")) - } - if s.EndpointArn == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *SetEndpointAttributesInput) SetAttributes(v map[string]*string) *SetEndpointAttributesInput { - s.Attributes = v - return s -} - -// SetEndpointArn sets the EndpointArn field's value. -func (s *SetEndpointAttributesInput) SetEndpointArn(v string) *SetEndpointAttributesInput { - s.EndpointArn = &v - return s -} - -type SetEndpointAttributesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetEndpointAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetEndpointAttributesOutput) GoString() string { - return s.String() -} - -// Input for SetPlatformApplicationAttributes action. -type SetPlatformApplicationAttributesInput struct { - _ struct{} `type:"structure"` - - // A map of the platform application attributes. Attributes in this map include - // the following: - // - // * PlatformCredential – The credential received from the notification - // service. For ADM, PlatformCredentialis client secret. For Apple Services - // using certificate credentials, PlatformCredential is private key. For - // Apple Services using token credentials, PlatformCredential is signing - // key. For GCM (Firebase Cloud Messaging), PlatformCredential is API key. - // - // * PlatformPrincipal – The principal received from the notification service. - // For ADM, PlatformPrincipalis client id. For Apple Services using certificate - // credentials, PlatformPrincipal is SSL certificate. For Apple Services - // using token credentials, PlatformPrincipal is signing key ID. For GCM - // (Firebase Cloud Messaging), there is no PlatformPrincipal. - // - // * EventEndpointCreated – Topic ARN to which EndpointCreated event notifications - // are sent. - // - // * EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications - // are sent. - // - // * EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications - // are sent. - // - // * EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications - // are sent upon Direct Publish delivery failure (permanent) to one of the - // application's endpoints. - // - // * SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write - // access to use CloudWatch Logs on your behalf. - // - // * FailureFeedbackRoleArn – IAM role ARN used to give Amazon SNS write - // access to use CloudWatch Logs on your behalf. - // - // * SuccessFeedbackSampleRate – Sample rate percentage (0-100) of successfully - // delivered messages. - // - // The following attributes only apply to APNs token-based authentication: - // - // * ApplePlatformTeamID – The identifier that's assigned to your Apple - // developer account team. - // - // * ApplePlatformBundleID – The bundle identifier that's assigned to your - // iOS app. - // - // Attributes is a required field - Attributes map[string]*string `type:"map" required:"true"` - - // PlatformApplicationArn for SetPlatformApplicationAttributes action. - // - // PlatformApplicationArn is a required field - PlatformApplicationArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetPlatformApplicationAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetPlatformApplicationAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SetPlatformApplicationAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SetPlatformApplicationAttributesInput"} - if s.Attributes == nil { - invalidParams.Add(request.NewErrParamRequired("Attributes")) - } - if s.PlatformApplicationArn == nil { - invalidParams.Add(request.NewErrParamRequired("PlatformApplicationArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *SetPlatformApplicationAttributesInput) SetAttributes(v map[string]*string) *SetPlatformApplicationAttributesInput { - s.Attributes = v - return s -} - -// SetPlatformApplicationArn sets the PlatformApplicationArn field's value. -func (s *SetPlatformApplicationAttributesInput) SetPlatformApplicationArn(v string) *SetPlatformApplicationAttributesInput { - s.PlatformApplicationArn = &v - return s -} - -type SetPlatformApplicationAttributesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetPlatformApplicationAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetPlatformApplicationAttributesOutput) GoString() string { - return s.String() -} - -// The input for the SetSMSAttributes action. -type SetSMSAttributesInput struct { - _ struct{} `type:"structure"` - - // The default settings for sending SMS messages from your Amazon Web Services - // account. You can set values for the following attribute names: - // - // MonthlySpendLimit – The maximum amount in USD that you are willing to spend - // each month to send SMS messages. When Amazon SNS determines that sending - // an SMS message would incur a cost that exceeds this limit, it stops sending - // SMS messages within minutes. - // - // Amazon SNS stops sending SMS messages within minutes of the limit being crossed. - // During that interval, if you continue to send SMS messages, you will incur - // costs that exceed your limit. - // - // By default, the spend limit is set to the maximum allowed by Amazon SNS. - // If you want to raise the limit, submit an SNS Limit Increase case (https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-sns). - // For New limit value, enter your desired monthly spend limit. In the Use Case - // Description field, explain that you are requesting an SMS monthly spend limit - // increase. - // - // DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS - // to write logs about SMS deliveries in CloudWatch Logs. For each SMS message - // that you send, Amazon SNS writes a log that includes the message price, the - // success or failure status, the reason for failure (if the message failed), - // the message dwell time, and other information. - // - // DeliveryStatusSuccessSamplingRate – The percentage of successful SMS deliveries - // for which Amazon SNS will write logs in CloudWatch Logs. The value can be - // an integer from 0 - 100. For example, to write logs only for failed deliveries, - // set this value to 0. To write logs for 10% of your successful deliveries, - // set it to 10. - // - // DefaultSenderID – A string, such as your business brand, that is displayed - // as the sender on the receiving device. Support for sender IDs varies by country. - // The sender ID can be 1 - 11 alphanumeric characters, and it must contain - // at least one letter. - // - // DefaultSMSType – The type of SMS message that you will send by default. - // You can assign the following values: - // - // * Promotional – (Default) Noncritical messages, such as marketing messages. - // Amazon SNS optimizes the message delivery to incur the lowest cost. - // - // * Transactional – Critical messages that support customer transactions, - // such as one-time passcodes for multi-factor authentication. Amazon SNS - // optimizes the message delivery to achieve the highest reliability. - // - // UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily - // SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage - // report as a CSV file to the bucket. The report includes the following information - // for each SMS message that was successfully delivered by your Amazon Web Services - // account: - // - // * Time that the message was published (in UTC) - // - // * Message ID - // - // * Destination phone number - // - // * Message type - // - // * Delivery status - // - // * Message price (in USD) - // - // * Part number (a message is split into multiple parts if it is too long - // for a single message) - // - // * Total number of parts - // - // To receive the report, the bucket must have a policy that allows the Amazon - // SNS service principal to perform the s3:PutObject and s3:GetBucketLocation - // actions. - // - // For an example bucket policy and usage report, see Monitoring SMS Activity - // (https://docs.aws.amazon.com/sns/latest/dg/sms_stats.html) in the Amazon - // SNS Developer Guide. - // - // Attributes is a required field - Attributes map[string]*string `locationName:"attributes" type:"map" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSMSAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSMSAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SetSMSAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SetSMSAttributesInput"} - if s.Attributes == nil { - invalidParams.Add(request.NewErrParamRequired("Attributes")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *SetSMSAttributesInput) SetAttributes(v map[string]*string) *SetSMSAttributesInput { - s.Attributes = v - return s -} - -// The response for the SetSMSAttributes action. -type SetSMSAttributesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSMSAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSMSAttributesOutput) GoString() string { - return s.String() -} - -// Input for SetSubscriptionAttributes action. -type SetSubscriptionAttributesInput struct { - _ struct{} `type:"structure"` - - // A map of attributes with their corresponding values. - // - // The following lists the names, descriptions, and values of the special request - // parameters that this action uses: - // - // * DeliveryPolicy – The policy that defines how Amazon SNS retries failed - // deliveries to HTTP/S endpoints. - // - // * FilterPolicy – The simple JSON object that lets your subscriber receive - // only a subset of messages, rather than receiving every message published - // to the topic. - // - // * RawMessageDelivery – When set to true, enables raw message delivery - // to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints - // to process JSON formatting, which is otherwise created for Amazon SNS - // metadata. - // - // * RedrivePolicy – When specified, sends undeliverable messages to the - // specified Amazon SQS dead-letter queue. Messages that can't be delivered - // due to client errors (for example, when the subscribed endpoint is unreachable) - // or server errors (for example, when the service that powers the subscribed - // endpoint becomes unavailable) are held in the dead-letter queue for further - // analysis or reprocessing. - // - // The following attribute applies only to Amazon Kinesis Data Firehose delivery - // stream subscriptions: - // - // * SubscriptionRoleArn – The ARN of the IAM role that has the following: - // Permission to write to the Kinesis Data Firehose delivery stream Amazon - // SNS listed as a trusted entity Specifying a valid ARN for this attribute - // is required for Kinesis Data Firehose delivery stream subscriptions. For - // more information, see Fanout to Kinesis Data Firehose delivery streams - // (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) - // in the Amazon SNS Developer Guide. - // - // AttributeName is a required field - AttributeName *string `type:"string" required:"true"` - - // The new value for the attribute in JSON format. - AttributeValue *string `type:"string"` - - // The ARN of the subscription to modify. - // - // SubscriptionArn is a required field - SubscriptionArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSubscriptionAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSubscriptionAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SetSubscriptionAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SetSubscriptionAttributesInput"} - if s.AttributeName == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeName")) - } - if s.SubscriptionArn == nil { - invalidParams.Add(request.NewErrParamRequired("SubscriptionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeName sets the AttributeName field's value. -func (s *SetSubscriptionAttributesInput) SetAttributeName(v string) *SetSubscriptionAttributesInput { - s.AttributeName = &v - return s -} - -// SetAttributeValue sets the AttributeValue field's value. -func (s *SetSubscriptionAttributesInput) SetAttributeValue(v string) *SetSubscriptionAttributesInput { - s.AttributeValue = &v - return s -} - -// SetSubscriptionArn sets the SubscriptionArn field's value. -func (s *SetSubscriptionAttributesInput) SetSubscriptionArn(v string) *SetSubscriptionAttributesInput { - s.SubscriptionArn = &v - return s -} - -type SetSubscriptionAttributesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSubscriptionAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetSubscriptionAttributesOutput) GoString() string { - return s.String() -} - -// Input for SetTopicAttributes action. -type SetTopicAttributesInput struct { - _ struct{} `type:"structure"` - - // A map of attributes with their corresponding values. - // - // The following lists the names, descriptions, and values of the special request - // parameters that the SetTopicAttributes action uses: - // - // * DeliveryPolicy – The policy that defines how Amazon SNS retries failed - // deliveries to HTTP/S endpoints. - // - // * DisplayName – The display name to use for a topic with SMS subscriptions. - // - // * Policy – The policy that defines who can access your topic. By default, - // only the topic owner can publish or subscribe to the topic. - // - // The following attribute applies only to server-side-encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): - // - // * KmsMasterKeyId – The ID of an Amazon Web Services managed customer - // master key (CMK) for Amazon SNS or a custom CMK. For more information, - // see Key Terms (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms). - // For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the Key Management Service API Reference. - // - // The following attribute applies only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): - // - // * ContentBasedDeduplication – Enables content-based deduplication for - // FIFO topics. By default, ContentBasedDeduplication is set to false. If - // you create a FIFO topic and this attribute is false, you must specify - // a value for the MessageDeduplicationId parameter for the Publish (https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) - // action. When you set ContentBasedDeduplication to true, Amazon SNS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of - // the message (but not the attributes of the message). (Optional) To override - // the generated value, you can specify a value for the MessageDeduplicationId - // parameter for the Publish action. - // - // AttributeName is a required field - AttributeName *string `type:"string" required:"true"` - - // The new value for the attribute. - AttributeValue *string `type:"string"` - - // The ARN of the topic to modify. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetTopicAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetTopicAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SetTopicAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SetTopicAttributesInput"} - if s.AttributeName == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeName")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeName sets the AttributeName field's value. -func (s *SetTopicAttributesInput) SetAttributeName(v string) *SetTopicAttributesInput { - s.AttributeName = &v - return s -} - -// SetAttributeValue sets the AttributeValue field's value. -func (s *SetTopicAttributesInput) SetAttributeValue(v string) *SetTopicAttributesInput { - s.AttributeValue = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *SetTopicAttributesInput) SetTopicArn(v string) *SetTopicAttributesInput { - s.TopicArn = &v - return s -} - -type SetTopicAttributesOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetTopicAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SetTopicAttributesOutput) GoString() string { - return s.String() -} - -// Input for Subscribe action. -type SubscribeInput struct { - _ struct{} `type:"structure"` - - // A map of attributes with their corresponding values. - // - // The following lists the names, descriptions, and values of the special request - // parameters that the Subscribe action uses: - // - // * DeliveryPolicy – The policy that defines how Amazon SNS retries failed - // deliveries to HTTP/S endpoints. - // - // * FilterPolicy – The simple JSON object that lets your subscriber receive - // only a subset of messages, rather than receiving every message published - // to the topic. - // - // * RawMessageDelivery – When set to true, enables raw message delivery - // to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints - // to process JSON formatting, which is otherwise created for Amazon SNS - // metadata. - // - // * RedrivePolicy – When specified, sends undeliverable messages to the - // specified Amazon SQS dead-letter queue. Messages that can't be delivered - // due to client errors (for example, when the subscribed endpoint is unreachable) - // or server errors (for example, when the service that powers the subscribed - // endpoint becomes unavailable) are held in the dead-letter queue for further - // analysis or reprocessing. - // - // The following attribute applies only to Amazon Kinesis Data Firehose delivery - // stream subscriptions: - // - // * SubscriptionRoleArn – The ARN of the IAM role that has the following: - // Permission to write to the Kinesis Data Firehose delivery stream Amazon - // SNS listed as a trusted entity Specifying a valid ARN for this attribute - // is required for Kinesis Data Firehose delivery stream subscriptions. For - // more information, see Fanout to Kinesis Data Firehose delivery streams - // (https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) - // in the Amazon SNS Developer Guide. - Attributes map[string]*string `type:"map"` - - // The endpoint that you want to receive notifications. Endpoints vary by protocol: - // - // * For the http protocol, the (public) endpoint is a URL beginning with - // http://. - // - // * For the https protocol, the (public) endpoint is a URL beginning with - // https://. - // - // * For the email protocol, the endpoint is an email address. - // - // * For the email-json protocol, the endpoint is an email address. - // - // * For the sms protocol, the endpoint is a phone number of an SMS-enabled - // device. - // - // * For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue. - // - // * For the application protocol, the endpoint is the EndpointArn of a mobile - // app and device. - // - // * For the lambda protocol, the endpoint is the ARN of an Lambda function. - // - // * For the firehose protocol, the endpoint is the ARN of an Amazon Kinesis - // Data Firehose delivery stream. - Endpoint *string `type:"string"` - - // The protocol that you want to use. Supported protocols include: - // - // * http – delivery of JSON-encoded message via HTTP POST - // - // * https – delivery of JSON-encoded message via HTTPS POST - // - // * email – delivery of message via SMTP - // - // * email-json – delivery of JSON-encoded message via SMTP - // - // * sms – delivery of message via SMS - // - // * sqs – delivery of JSON-encoded message to an Amazon SQS queue - // - // * application – delivery of JSON-encoded message to an EndpointArn for - // a mobile app and device - // - // * lambda – delivery of JSON-encoded message to an Lambda function - // - // * firehose – delivery of JSON-encoded message to an Amazon Kinesis Data - // Firehose delivery stream. - // - // Protocol is a required field - Protocol *string `type:"string" required:"true"` - - // Sets whether the response from the Subscribe request includes the subscription - // ARN, even if the subscription is not yet confirmed. - // - // If you set this parameter to true, the response includes the ARN in all cases, - // even if the subscription is not yet confirmed. In addition to the ARN for - // confirmed subscriptions, the response also includes the pending subscription - // ARN value for subscriptions that aren't yet confirmed. A subscription becomes - // confirmed when the subscriber calls the ConfirmSubscription action with a - // confirmation token. - // - // The default value is false. - ReturnSubscriptionArn *bool `type:"boolean"` - - // The ARN of the topic you want to subscribe to. - // - // TopicArn is a required field - TopicArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SubscribeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SubscribeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SubscribeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SubscribeInput"} - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributes sets the Attributes field's value. -func (s *SubscribeInput) SetAttributes(v map[string]*string) *SubscribeInput { - s.Attributes = v - return s -} - -// SetEndpoint sets the Endpoint field's value. -func (s *SubscribeInput) SetEndpoint(v string) *SubscribeInput { - s.Endpoint = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *SubscribeInput) SetProtocol(v string) *SubscribeInput { - s.Protocol = &v - return s -} - -// SetReturnSubscriptionArn sets the ReturnSubscriptionArn field's value. -func (s *SubscribeInput) SetReturnSubscriptionArn(v bool) *SubscribeInput { - s.ReturnSubscriptionArn = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *SubscribeInput) SetTopicArn(v string) *SubscribeInput { - s.TopicArn = &v - return s -} - -// Response for Subscribe action. -type SubscribeOutput struct { - _ struct{} `type:"structure"` - - // The ARN of the subscription if it is confirmed, or the string "pending confirmation" - // if the subscription requires confirmation. However, if the API request parameter - // ReturnSubscriptionArn is true, then the value is always the subscription - // ARN, even if the subscription requires confirmation. - SubscriptionArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SubscribeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SubscribeOutput) GoString() string { - return s.String() -} - -// SetSubscriptionArn sets the SubscriptionArn field's value. -func (s *SubscribeOutput) SetSubscriptionArn(v string) *SubscribeOutput { - s.SubscriptionArn = &v - return s -} - -// A wrapper type for the attributes of an Amazon SNS subscription. -type Subscription struct { - _ struct{} `type:"structure"` - - // The subscription's endpoint (format depends on the protocol). - Endpoint *string `type:"string"` - - // The subscription's owner. - Owner *string `type:"string"` - - // The subscription's protocol. - Protocol *string `type:"string"` - - // The subscription's ARN. - SubscriptionArn *string `type:"string"` - - // The ARN of the subscription's topic. - TopicArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Subscription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Subscription) GoString() string { - return s.String() -} - -// SetEndpoint sets the Endpoint field's value. -func (s *Subscription) SetEndpoint(v string) *Subscription { - s.Endpoint = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *Subscription) SetOwner(v string) *Subscription { - s.Owner = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *Subscription) SetProtocol(v string) *Subscription { - s.Protocol = &v - return s -} - -// SetSubscriptionArn sets the SubscriptionArn field's value. -func (s *Subscription) SetSubscriptionArn(v string) *Subscription { - s.SubscriptionArn = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *Subscription) SetTopicArn(v string) *Subscription { - s.TopicArn = &v - return s -} - -// The list of tags to be added to the specified topic. -type Tag struct { - _ struct{} `type:"structure"` - - // The required key portion of the tag. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The optional value portion of the tag. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -type TagResourceInput struct { - _ struct{} `type:"structure"` - - // The ARN of the topic to which to add tags. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` - - // The tags to be added to the specified topic. A tag consists of a required - // key and an optional value. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { - s.ResourceArn = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v - return s -} - -type TagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) GoString() string { - return s.String() -} - -// A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a -// topic's attributes, use GetTopicAttributes. -type Topic struct { - _ struct{} `type:"structure"` - - // The topic's ARN. - TopicArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Topic) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Topic) GoString() string { - return s.String() -} - -// SetTopicArn sets the TopicArn field's value. -func (s *Topic) SetTopicArn(v string) *Topic { - s.TopicArn = &v - return s -} - -// Input for Unsubscribe action. -type UnsubscribeInput struct { - _ struct{} `type:"structure"` - - // The ARN of the subscription to be deleted. - // - // SubscriptionArn is a required field - SubscriptionArn *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsubscribeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsubscribeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UnsubscribeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UnsubscribeInput"} - if s.SubscriptionArn == nil { - invalidParams.Add(request.NewErrParamRequired("SubscriptionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSubscriptionArn sets the SubscriptionArn field's value. -func (s *UnsubscribeInput) SetSubscriptionArn(v string) *UnsubscribeInput { - s.SubscriptionArn = &v - return s -} - -type UnsubscribeOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsubscribeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsubscribeOutput) GoString() string { - return s.String() -} - -type UntagResourceInput struct { - _ struct{} `type:"structure"` - - // The ARN of the topic from which to remove tags. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` - - // The list of tag keys to remove from the specified topic. - // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { - s.ResourceArn = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v - return s -} - -type UntagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) GoString() string { - return s.String() -} - -type VerifySMSSandboxPhoneNumberInput struct { - _ struct{} `type:"structure"` - - // The OTP sent to the destination number from the CreateSMSSandBoxPhoneNumber - // call. - // - // OneTimePassword is a required field - OneTimePassword *string `min:"5" type:"string" required:"true"` - - // The destination phone number to verify. - // - // PhoneNumber is a required field - PhoneNumber *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifySMSSandboxPhoneNumberInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifySMSSandboxPhoneNumberInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *VerifySMSSandboxPhoneNumberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "VerifySMSSandboxPhoneNumberInput"} - if s.OneTimePassword == nil { - invalidParams.Add(request.NewErrParamRequired("OneTimePassword")) - } - if s.OneTimePassword != nil && len(*s.OneTimePassword) < 5 { - invalidParams.Add(request.NewErrParamMinLen("OneTimePassword", 5)) - } - if s.PhoneNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetOneTimePassword sets the OneTimePassword field's value. -func (s *VerifySMSSandboxPhoneNumberInput) SetOneTimePassword(v string) *VerifySMSSandboxPhoneNumberInput { - s.OneTimePassword = &v - return s -} - -// SetPhoneNumber sets the PhoneNumber field's value. -func (s *VerifySMSSandboxPhoneNumberInput) SetPhoneNumber(v string) *VerifySMSSandboxPhoneNumberInput { - s.PhoneNumber = &v - return s -} - -// The destination phone number's verification status. -type VerifySMSSandboxPhoneNumberOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifySMSSandboxPhoneNumberOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifySMSSandboxPhoneNumberOutput) GoString() string { - return s.String() -} - -// Supported language code for sending OTP message -const ( - // LanguageCodeStringEnUs is a LanguageCodeString enum value - LanguageCodeStringEnUs = "en-US" - - // LanguageCodeStringEnGb is a LanguageCodeString enum value - LanguageCodeStringEnGb = "en-GB" - - // LanguageCodeStringEs419 is a LanguageCodeString enum value - LanguageCodeStringEs419 = "es-419" - - // LanguageCodeStringEsEs is a LanguageCodeString enum value - LanguageCodeStringEsEs = "es-ES" - - // LanguageCodeStringDeDe is a LanguageCodeString enum value - LanguageCodeStringDeDe = "de-DE" - - // LanguageCodeStringFrCa is a LanguageCodeString enum value - LanguageCodeStringFrCa = "fr-CA" - - // LanguageCodeStringFrFr is a LanguageCodeString enum value - LanguageCodeStringFrFr = "fr-FR" - - // LanguageCodeStringItIt is a LanguageCodeString enum value - LanguageCodeStringItIt = "it-IT" - - // LanguageCodeStringJaJp is a LanguageCodeString enum value - LanguageCodeStringJaJp = "ja-JP" - - // LanguageCodeStringPtBr is a LanguageCodeString enum value - LanguageCodeStringPtBr = "pt-BR" - - // LanguageCodeStringKrKr is a LanguageCodeString enum value - LanguageCodeStringKrKr = "kr-KR" - - // LanguageCodeStringZhCn is a LanguageCodeString enum value - LanguageCodeStringZhCn = "zh-CN" - - // LanguageCodeStringZhTw is a LanguageCodeString enum value - LanguageCodeStringZhTw = "zh-TW" -) - -// LanguageCodeString_Values returns all elements of the LanguageCodeString enum -func LanguageCodeString_Values() []string { - return []string{ - LanguageCodeStringEnUs, - LanguageCodeStringEnGb, - LanguageCodeStringEs419, - LanguageCodeStringEsEs, - LanguageCodeStringDeDe, - LanguageCodeStringFrCa, - LanguageCodeStringFrFr, - LanguageCodeStringItIt, - LanguageCodeStringJaJp, - LanguageCodeStringPtBr, - LanguageCodeStringKrKr, - LanguageCodeStringZhCn, - LanguageCodeStringZhTw, - } -} - -// Enum listing out all supported number capabilities. -const ( - // NumberCapabilitySms is a NumberCapability enum value - NumberCapabilitySms = "SMS" - - // NumberCapabilityMms is a NumberCapability enum value - NumberCapabilityMms = "MMS" - - // NumberCapabilityVoice is a NumberCapability enum value - NumberCapabilityVoice = "VOICE" -) - -// NumberCapability_Values returns all elements of the NumberCapability enum -func NumberCapability_Values() []string { - return []string{ - NumberCapabilitySms, - NumberCapabilityMms, - NumberCapabilityVoice, - } -} - -// Enum listing out all supported route types. The following enum values are -// supported. 1. Transactional : Non-marketing traffic 2. Promotional : Marketing -// 3. Premium : Premium routes for OTP delivery to the carriers -const ( - // RouteTypeTransactional is a RouteType enum value - RouteTypeTransactional = "Transactional" - - // RouteTypePromotional is a RouteType enum value - RouteTypePromotional = "Promotional" - - // RouteTypePremium is a RouteType enum value - RouteTypePremium = "Premium" -) - -// RouteType_Values returns all elements of the RouteType enum -func RouteType_Values() []string { - return []string{ - RouteTypeTransactional, - RouteTypePromotional, - RouteTypePremium, - } -} - -// Enum listing out all supported destination phone number verification statuses. -// The following enum values are supported. 1. PENDING : The destination phone -// number is pending verification. 2. VERIFIED : The destination phone number -// is verified. -const ( - // SMSSandboxPhoneNumberVerificationStatusPending is a SMSSandboxPhoneNumberVerificationStatus enum value - SMSSandboxPhoneNumberVerificationStatusPending = "Pending" - - // SMSSandboxPhoneNumberVerificationStatusVerified is a SMSSandboxPhoneNumberVerificationStatus enum value - SMSSandboxPhoneNumberVerificationStatusVerified = "Verified" -) - -// SMSSandboxPhoneNumberVerificationStatus_Values returns all elements of the SMSSandboxPhoneNumberVerificationStatus enum -func SMSSandboxPhoneNumberVerificationStatus_Values() []string { - return []string{ - SMSSandboxPhoneNumberVerificationStatusPending, - SMSSandboxPhoneNumberVerificationStatusVerified, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sns/doc.go deleted file mode 100644 index 714e3b87f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sns provides the client and types for making API -// requests to Amazon Simple Notification Service. -// -// Amazon Simple Notification Service (Amazon SNS) is a web service that enables -// you to build distributed web-enabled applications. Applications can use Amazon -// SNS to easily push real-time notification messages to interested subscribers -// over multiple delivery protocols. For more information about this product -// see the Amazon SNS product page (http://aws.amazon.com/sns/). For detailed -// information about Amazon SNS features and their associated API calls, see -// the Amazon SNS Developer Guide (https://docs.aws.amazon.com/sns/latest/dg/). -// -// For information on the permissions you need to use this API, see Identity -// and access management in Amazon SNS (https://docs.aws.amazon.com/sns/latest/dg/sns-authentication-and-access-control.html) -// in the Amazon SNS Developer Guide. -// -// We also provide SDKs that enable you to access Amazon SNS from your preferred -// programming language. The SDKs contain functionality that automatically takes -// care of tasks such as: cryptographically signing your service requests, retrying -// requests, and handling error responses. For a list of available SDKs, go -// to Tools for Amazon Web Services (http://aws.amazon.com/tools/). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31 for more information on this service. -// -// See sns package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sns/ -// -// Using the Client -// -// To contact Amazon Simple Notification Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Simple Notification Service client SNS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sns/#New -package sns diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sns/errors.go deleted file mode 100644 index 979eee05e..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/errors.go +++ /dev/null @@ -1,216 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sns - -const ( - - // ErrCodeAuthorizationErrorException for service response error code - // "AuthorizationError". - // - // Indicates that the user has been denied access to the requested resource. - ErrCodeAuthorizationErrorException = "AuthorizationError" - - // ErrCodeBatchEntryIdsNotDistinctException for service response error code - // "BatchEntryIdsNotDistinct". - // - // Two or more batch entries in the request have the same Id. - ErrCodeBatchEntryIdsNotDistinctException = "BatchEntryIdsNotDistinct" - - // ErrCodeBatchRequestTooLongException for service response error code - // "BatchRequestTooLong". - // - // The length of all the batch messages put together is more than the limit. - ErrCodeBatchRequestTooLongException = "BatchRequestTooLong" - - // ErrCodeConcurrentAccessException for service response error code - // "ConcurrentAccess". - // - // Can't perform multiple operations on a tag simultaneously. Perform the operations - // sequentially. - ErrCodeConcurrentAccessException = "ConcurrentAccess" - - // ErrCodeEmptyBatchRequestException for service response error code - // "EmptyBatchRequest". - // - // The batch request doesn't contain any entries. - ErrCodeEmptyBatchRequestException = "EmptyBatchRequest" - - // ErrCodeEndpointDisabledException for service response error code - // "EndpointDisabled". - // - // Exception error indicating endpoint disabled. - ErrCodeEndpointDisabledException = "EndpointDisabled" - - // ErrCodeFilterPolicyLimitExceededException for service response error code - // "FilterPolicyLimitExceeded". - // - // Indicates that the number of filter polices in your Amazon Web Services account - // exceeds the limit. To add more filter polices, submit an Amazon SNS Limit - // Increase case in the Amazon Web Services Support Center. - ErrCodeFilterPolicyLimitExceededException = "FilterPolicyLimitExceeded" - - // ErrCodeInternalErrorException for service response error code - // "InternalError". - // - // Indicates an internal service error. - ErrCodeInternalErrorException = "InternalError" - - // ErrCodeInvalidBatchEntryIdException for service response error code - // "InvalidBatchEntryId". - // - // The Id of a batch entry in a batch request doesn't abide by the specification. - ErrCodeInvalidBatchEntryIdException = "InvalidBatchEntryId" - - // ErrCodeInvalidParameterException for service response error code - // "InvalidParameter". - // - // Indicates that a request parameter does not comply with the associated constraints. - ErrCodeInvalidParameterException = "InvalidParameter" - - // ErrCodeInvalidParameterValueException for service response error code - // "ParameterValueInvalid". - // - // Indicates that a request parameter does not comply with the associated constraints. - ErrCodeInvalidParameterValueException = "ParameterValueInvalid" - - // ErrCodeInvalidSecurityException for service response error code - // "InvalidSecurity". - // - // The credential signature isn't valid. You must use an HTTPS endpoint and - // sign your request using Signature Version 4. - ErrCodeInvalidSecurityException = "InvalidSecurity" - - // ErrCodeKMSAccessDeniedException for service response error code - // "KMSAccessDenied". - // - // The ciphertext references a key that doesn't exist or that you don't have - // access to. - ErrCodeKMSAccessDeniedException = "KMSAccessDenied" - - // ErrCodeKMSDisabledException for service response error code - // "KMSDisabled". - // - // The request was rejected because the specified customer master key (CMK) - // isn't enabled. - ErrCodeKMSDisabledException = "KMSDisabled" - - // ErrCodeKMSInvalidStateException for service response error code - // "KMSInvalidState". - // - // The request was rejected because the state of the specified resource isn't - // valid for this request. For more information, see How Key State Affects Use - // of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) - // in the Key Management Service Developer Guide. - ErrCodeKMSInvalidStateException = "KMSInvalidState" - - // ErrCodeKMSNotFoundException for service response error code - // "KMSNotFound". - // - // The request was rejected because the specified entity or resource can't be - // found. - ErrCodeKMSNotFoundException = "KMSNotFound" - - // ErrCodeKMSOptInRequired for service response error code - // "KMSOptInRequired". - // - // The Amazon Web Services access key ID needs a subscription for the service. - ErrCodeKMSOptInRequired = "KMSOptInRequired" - - // ErrCodeKMSThrottlingException for service response error code - // "KMSThrottling". - // - // The request was denied due to request throttling. For more information about - // throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) - // in the Key Management Service Developer Guide. - ErrCodeKMSThrottlingException = "KMSThrottling" - - // ErrCodeNotFoundException for service response error code - // "NotFound". - // - // Indicates that the requested resource does not exist. - ErrCodeNotFoundException = "NotFound" - - // ErrCodeOptedOutException for service response error code - // "OptedOut". - // - // Indicates that the specified phone number opted out of receiving SMS messages - // from your Amazon Web Services account. You can't send SMS messages to phone - // numbers that opt out. - ErrCodeOptedOutException = "OptedOut" - - // ErrCodePlatformApplicationDisabledException for service response error code - // "PlatformApplicationDisabled". - // - // Exception error indicating platform application disabled. - ErrCodePlatformApplicationDisabledException = "PlatformApplicationDisabled" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFound". - // - // Can’t perform the action on the specified resource. Make sure that the - // resource exists. - ErrCodeResourceNotFoundException = "ResourceNotFound" - - // ErrCodeStaleTagException for service response error code - // "StaleTag". - // - // A tag has been added to a resource with the same ARN as a deleted resource. - // Wait a short while and then retry the operation. - ErrCodeStaleTagException = "StaleTag" - - // ErrCodeSubscriptionLimitExceededException for service response error code - // "SubscriptionLimitExceeded". - // - // Indicates that the customer already owns the maximum allowed number of subscriptions. - ErrCodeSubscriptionLimitExceededException = "SubscriptionLimitExceeded" - - // ErrCodeTagLimitExceededException for service response error code - // "TagLimitExceeded". - // - // Can't add more than 50 tags to a topic. - ErrCodeTagLimitExceededException = "TagLimitExceeded" - - // ErrCodeTagPolicyException for service response error code - // "TagPolicy". - // - // The request doesn't comply with the IAM tag policy. Correct your request - // and then retry it. - ErrCodeTagPolicyException = "TagPolicy" - - // ErrCodeThrottledException for service response error code - // "Throttled". - // - // Indicates that the rate at which requests have been submitted for this action - // exceeds the limit for your Amazon Web Services account. - ErrCodeThrottledException = "Throttled" - - // ErrCodeTooManyEntriesInBatchRequestException for service response error code - // "TooManyEntriesInBatchRequest". - // - // The batch request contains more entries than permissible. - ErrCodeTooManyEntriesInBatchRequestException = "TooManyEntriesInBatchRequest" - - // ErrCodeTopicLimitExceededException for service response error code - // "TopicLimitExceeded". - // - // Indicates that the customer already owns the maximum allowed number of topics. - ErrCodeTopicLimitExceededException = "TopicLimitExceeded" - - // ErrCodeUserErrorException for service response error code - // "UserError". - // - // Indicates that a request parameter does not comply with the associated constraints. - ErrCodeUserErrorException = "UserError" - - // ErrCodeValidationException for service response error code - // "ValidationException". - // - // Indicates that a parameter in the request is invalid. - ErrCodeValidationException = "ValidationException" - - // ErrCodeVerificationException for service response error code - // "VerificationException". - // - // Indicates that the one-time password (OTP) used for verification is invalid. - ErrCodeVerificationException = "VerificationException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go deleted file mode 100644 index b56ae0bc9..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sns - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// SNS provides the API operation methods for making requests to -// Amazon Simple Notification Service. See this package's package overview docs -// for details on the service. -// -// SNS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type SNS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sns" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "SNS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the SNS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a SNS client from just a session. -// svc := sns.New(mySession) -// -// // Create a SNS client with additional configuration -// svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = EndpointsID - // No Fallback - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SNS { - svc := &SNS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2010-03-31", - ResolvedRegion: resolvedRegion, - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a SNS operation and runs any -// custom request initialization. -func (c *SNS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore deleted file mode 100644 index 50d95c548..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# IDEs -.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml deleted file mode 100644 index c79105c2f..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.13 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md deleted file mode 100644 index 16abdfc08..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. - -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go deleted file mode 100644 index 3676ee405..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 48482330e..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index 3d3453215..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,158 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index 1ce2507eb..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,112 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - var err error - var next time.Duration - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return cerr - } - - return err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go deleted file mode 100644 index df9d68bce..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ /dev/null @@ -1,97 +0,0 @@ -package backoff - -import ( - "context" - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOff - ctx context.Context - timer Timer - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: b, - ctx: getContext(b), - timer: timer, - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.ctx.Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - t.timer.Start(next) - return t.timer.C() -} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go deleted file mode 100644 index 8120d0213..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/timer.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -type Timer interface { - Start(duration time.Duration) - Stop() - C() <-chan time.Time -} - -// defaultTimer implements Timer interface using time.Timer -type defaultTimer struct { - timer *time.Timer -} - -// C returns the timers channel which receives the current time when the timer fires. -func (t *defaultTimer) C() <-chan time.Time { - return t.timer.C -} - -// Start starts the timer to fire after the given duration -func (t *defaultTimer) Start(duration time.Duration) { - if t.timer == nil { - t.timer = time.NewTimer(duration) - } else { - t.timer.Reset(duration) - } -} - -// Stop is called when the timer is not used anymore and resources may be freed. -func (t *defaultTimer) Stop() { - if t.timer != nil { - t.timer.Stop() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca37..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go deleted file mode 100644 index 5917d4c8c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go +++ /dev/null @@ -1,734 +0,0 @@ -package alertmanager - -import ( - "context" - "crypto/md5" - "encoding/binary" - "fmt" - "net/http" - "net/url" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/alertmanager/api" - "github.com/prometheus/alertmanager/cluster" - "github.com/prometheus/alertmanager/cluster/clusterpb" - "github.com/prometheus/alertmanager/config" - "github.com/prometheus/alertmanager/dispatch" - "github.com/prometheus/alertmanager/inhibit" - "github.com/prometheus/alertmanager/nflog" - "github.com/prometheus/alertmanager/notify" - "github.com/prometheus/alertmanager/notify/email" - "github.com/prometheus/alertmanager/notify/opsgenie" - "github.com/prometheus/alertmanager/notify/pagerduty" - "github.com/prometheus/alertmanager/notify/pushover" - "github.com/prometheus/alertmanager/notify/slack" - "github.com/prometheus/alertmanager/notify/sns" - "github.com/prometheus/alertmanager/notify/victorops" - "github.com/prometheus/alertmanager/notify/webhook" - "github.com/prometheus/alertmanager/notify/wechat" - "github.com/prometheus/alertmanager/provider/mem" - "github.com/prometheus/alertmanager/silence" - "github.com/prometheus/alertmanager/template" - "github.com/prometheus/alertmanager/timeinterval" - "github.com/prometheus/alertmanager/types" - "github.com/prometheus/alertmanager/ui" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - commoncfg "github.com/prometheus/common/config" - "github.com/prometheus/common/model" - "github.com/prometheus/common/route" - "golang.org/x/time/rate" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_net "github.com/cortexproject/cortex/pkg/util/net" - "github.com/cortexproject/cortex/pkg/util/services" -) - -const ( - // MaintenancePeriod is used for periodic storing of silences and notifications to local file. - maintenancePeriod = 15 * time.Minute - - // Filenames used within tenant-directory - notificationLogSnapshot = "notifications" - silencesSnapshot = "silences" - templatesDir = "templates" -) - -// Config configures an Alertmanager. -type Config struct { - UserID string - Logger log.Logger - Peer *cluster.Peer - PeerTimeout time.Duration - Retention time.Duration - ExternalURL *url.URL - Limits Limits - - // Tenant-specific local directory where AM can store its state (notifications, silences, templates). When AM is stopped, entire dir is removed. - TenantDataDir string - - ShardingEnabled bool - ReplicationFactor int - Replicator Replicator - Store alertstore.AlertStore - PersisterConfig PersisterConfig -} - -// An Alertmanager manages the alerts for one user. -type Alertmanager struct { - cfg *Config - api *api.API - logger log.Logger - state State - persister *statePersister - nflog *nflog.Log - silences *silence.Silences - marker types.Marker - alerts *mem.Alerts - dispatcher *dispatch.Dispatcher - inhibitor *inhibit.Inhibitor - pipelineBuilder *notify.PipelineBuilder - stop chan struct{} - wg sync.WaitGroup - mux *http.ServeMux - registry *prometheus.Registry - - // Pipeline created during last ApplyConfig call. Used for testing only. - lastPipeline notify.Stage - - // The Dispatcher is the only component we need to recreate when we call ApplyConfig. - // Given its metrics don't have any variable labels we need to re-use the same metrics. - dispatcherMetrics *dispatch.DispatcherMetrics - // This needs to be set to the hash of the config. All the hashes need to be same - // for deduping of alerts to work, hence we need this metric. See https://github.com/prometheus/alertmanager/issues/596 - // Further, in upstream AM, this metric is handled using the config coordinator which we don't use - // hence we need to generate the metric ourselves. - configHashMetric prometheus.Gauge - - rateLimitedNotifications *prometheus.CounterVec -} - -var ( - webReload = make(chan chan error) -) - -func init() { - go func() { - // Since this is not a "normal" Alertmanager which reads its config - // from disk, we just accept and ignore web-based reload signals. Config - // updates are only applied externally via ApplyConfig(). - for range webReload { - } - }() -} - -// State helps with replication and synchronization of notifications and silences across several alertmanager replicas. -type State interface { - AddState(string, cluster.State, prometheus.Registerer) cluster.ClusterChannel - Position() int - WaitReady(context.Context) error -} - -// Replicator is used to exchange state with peers via the ring when sharding is enabled. -type Replicator interface { - // ReplicateStateForUser writes the given partial state to the necessary replicas. - ReplicateStateForUser(ctx context.Context, userID string, part *clusterpb.Part) error - // The alertmanager replication protocol relies on a position related to other replicas. - // This position is then used to identify who should notify about the alert first. - GetPositionForUser(userID string) int - // ReadFullStateForUser obtains the full state from other replicas in the cluster. - ReadFullStateForUser(context.Context, string) ([]*clusterpb.FullState, error) -} - -// New creates a new Alertmanager. -func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { - if cfg.TenantDataDir == "" { - return nil, fmt.Errorf("directory for tenant-specific AlertManager is not configured") - } - - am := &Alertmanager{ - cfg: cfg, - logger: log.With(cfg.Logger, "user", cfg.UserID), - stop: make(chan struct{}), - configHashMetric: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "alertmanager_config_hash", - Help: "Hash of the currently loaded alertmanager configuration.", - }), - - rateLimitedNotifications: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "alertmanager_notification_rate_limited_total", - Help: "Number of rate-limited notifications per integration.", - }, []string{"integration"}), // "integration" is consistent with other alertmanager metrics. - - } - - am.registry = reg - - // We currently have 3 operational modes: - // 1) Alertmanager clustering with upstream Gossip - // 2) Alertmanager sharding and ring-based replication - // 3) Alertmanager no replication - // These are covered in order. - if cfg.Peer != nil { - level.Debug(am.logger).Log("msg", "starting tenant alertmanager with gossip-based replication") - am.state = cfg.Peer - } else if cfg.ShardingEnabled { - level.Debug(am.logger).Log("msg", "starting tenant alertmanager with ring-based replication") - state := newReplicatedStates(cfg.UserID, cfg.ReplicationFactor, cfg.Replicator, cfg.Store, am.logger, am.registry) - am.state = state - am.persister = newStatePersister(cfg.PersisterConfig, cfg.UserID, state, cfg.Store, am.logger, am.registry) - } else { - level.Debug(am.logger).Log("msg", "starting tenant alertmanager without replication") - am.state = &NilPeer{} - } - - am.wg.Add(1) - var err error - am.nflog, err = nflog.New( - nflog.WithRetention(cfg.Retention), - nflog.WithSnapshot(filepath.Join(cfg.TenantDataDir, notificationLogSnapshot)), - nflog.WithMaintenance(maintenancePeriod, am.stop, am.wg.Done, nil), - nflog.WithMetrics(am.registry), - nflog.WithLogger(log.With(am.logger, "component", "nflog")), - ) - if err != nil { - return nil, fmt.Errorf("failed to create notification log: %v", err) - } - - c := am.state.AddState("nfl:"+cfg.UserID, am.nflog, am.registry) - am.nflog.SetBroadcast(c.Broadcast) - - am.marker = types.NewMarker(am.registry) - - silencesFile := filepath.Join(cfg.TenantDataDir, silencesSnapshot) - am.silences, err = silence.New(silence.Options{ - SnapshotFile: silencesFile, - Retention: cfg.Retention, - Logger: log.With(am.logger, "component", "silences"), - Metrics: am.registry, - }) - if err != nil { - return nil, fmt.Errorf("failed to create silences: %v", err) - } - - c = am.state.AddState("sil:"+cfg.UserID, am.silences, am.registry) - am.silences.SetBroadcast(c.Broadcast) - - // State replication needs to be started after the state keys are defined. - if service, ok := am.state.(services.Service); ok { - if err := service.StartAsync(context.Background()); err != nil { - return nil, errors.Wrap(err, "failed to start ring-based replication service") - } - } - - if am.persister != nil { - if err := am.persister.StartAsync(context.Background()); err != nil { - return nil, errors.Wrap(err, "failed to start state persister service") - } - } - - am.pipelineBuilder = notify.NewPipelineBuilder(am.registry) - - am.wg.Add(1) - go func() { - am.silences.Maintenance(maintenancePeriod, silencesFile, am.stop, nil) - am.wg.Done() - }() - - var callback mem.AlertStoreCallback - if am.cfg.Limits != nil { - callback = newAlertsLimiter(am.cfg.UserID, am.cfg.Limits, reg) - } - - am.alerts, err = mem.NewAlerts(context.Background(), am.marker, 30*time.Minute, callback, am.logger) - if err != nil { - return nil, fmt.Errorf("failed to create alerts: %v", err) - } - - am.api, err = api.New(api.Options{ - Alerts: am.alerts, - Silences: am.silences, - StatusFunc: am.marker.Status, - // Cortex should not expose cluster information back to its tenants. - Peer: &NilPeer{}, - Registry: am.registry, - Logger: log.With(am.logger, "component", "api"), - GroupFunc: func(f1 func(*dispatch.Route) bool, f2 func(*types.Alert, time.Time) bool) (dispatch.AlertGroups, map[model.Fingerprint][]string) { - return am.dispatcher.Groups(f1, f2) - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to create api: %v", err) - } - - router := route.New().WithPrefix(am.cfg.ExternalURL.Path) - - ui.Register(router, webReload, log.With(am.logger, "component", "ui")) - am.mux = am.api.Register(router, am.cfg.ExternalURL.Path) - - // Override some extra paths registered in the router (eg. /metrics which by default exposes prometheus.DefaultRegisterer). - // Entire router is registered in Mux to "/" path, so there is no conflict with overwriting specific paths. - for _, p := range []string{"/metrics", "/-/reload", "/debug/"} { - a := path.Join(am.cfg.ExternalURL.Path, p) - // Preserve end slash, as for Mux it means entire subtree. - if strings.HasSuffix(p, "/") { - a = a + "/" - } - am.mux.Handle(a, http.NotFoundHandler()) - } - - am.dispatcherMetrics = dispatch.NewDispatcherMetrics(true, am.registry) - - //TODO: From this point onward, the alertmanager _might_ receive requests - we need to make sure we've settled and are ready. - return am, nil -} - -func (am *Alertmanager) WaitInitialStateSync(ctx context.Context) error { - if service, ok := am.state.(services.Service); ok { - if err := service.AwaitRunning(ctx); err != nil { - return errors.Wrap(err, "failed to wait for ring-based replication service") - } - } - return nil -} - -// clusterWait returns a function that inspects the current peer state and returns -// a duration of one base timeout for each peer with a higher ID than ourselves. -func clusterWait(position func() int, timeout time.Duration) func() time.Duration { - return func() time.Duration { - return time.Duration(position()) * timeout - } -} - -// ApplyConfig applies a new configuration to an Alertmanager. -func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config, rawCfg string) error { - templateFiles := make([]string, len(conf.Templates)) - for i, t := range conf.Templates { - templateFilepath, err := safeTemplateFilepath(filepath.Join(am.cfg.TenantDataDir, templatesDir), t) - if err != nil { - return err - } - - templateFiles[i] = templateFilepath - } - - tmpl, err := template.FromGlobs(templateFiles...) - if err != nil { - return err - } - tmpl.ExternalURL = am.cfg.ExternalURL - - am.api.Update(conf, func(_ model.LabelSet) {}) - - // Ensure inhibitor is set before being called - if am.inhibitor != nil { - am.inhibitor.Stop() - } - - // Ensure dispatcher is set before being called - if am.dispatcher != nil { - am.dispatcher.Stop() - } - - am.inhibitor = inhibit.NewInhibitor(am.alerts, conf.InhibitRules, am.marker, log.With(am.logger, "component", "inhibitor")) - - waitFunc := clusterWait(am.state.Position, am.cfg.PeerTimeout) - - timeoutFunc := func(d time.Duration) time.Duration { - if d < notify.MinTimeout { - d = notify.MinTimeout - } - return d + waitFunc() - } - - // Create a firewall binded to the per-tenant config. - firewallDialer := util_net.NewFirewallDialer(newFirewallDialerConfigProvider(userID, am.cfg.Limits)) - - integrationsMap, err := buildIntegrationsMap(conf.Receivers, tmpl, firewallDialer, am.logger, func(integrationName string, notifier notify.Notifier) notify.Notifier { - if am.cfg.Limits != nil { - rl := &tenantRateLimits{ - tenant: userID, - limits: am.cfg.Limits, - integration: integrationName, - } - - return newRateLimitedNotifier(notifier, rl, 10*time.Second, am.rateLimitedNotifications.WithLabelValues(integrationName)) - } - return notifier - }) - if err != nil { - return nil - } - - muteTimes := make(map[string][]timeinterval.TimeInterval, len(conf.MuteTimeIntervals)) - for _, ti := range conf.MuteTimeIntervals { - muteTimes[ti.Name] = ti.TimeIntervals - } - - pipeline := am.pipelineBuilder.New( - integrationsMap, - waitFunc, - am.inhibitor, - silence.NewSilencer(am.silences, am.marker, am.logger), - muteTimes, - am.nflog, - am.state, - ) - am.lastPipeline = pipeline - am.dispatcher = dispatch.NewDispatcher( - am.alerts, - dispatch.NewRoute(conf.Route, nil), - pipeline, - am.marker, - timeoutFunc, - &dispatcherLimits{tenant: am.cfg.UserID, limits: am.cfg.Limits}, - log.With(am.logger, "component", "dispatcher"), - am.dispatcherMetrics, - ) - - go am.dispatcher.Run() - go am.inhibitor.Run() - - am.configHashMetric.Set(md5HashAsMetricValue([]byte(rawCfg))) - return nil -} - -// Stop stops the Alertmanager. -func (am *Alertmanager) Stop() { - if am.inhibitor != nil { - am.inhibitor.Stop() - } - - if am.dispatcher != nil { - am.dispatcher.Stop() - } - - if am.persister != nil { - am.persister.StopAsync() - } - - if service, ok := am.state.(services.Service); ok { - service.StopAsync() - } - - am.alerts.Close() - close(am.stop) -} - -func (am *Alertmanager) StopAndWait() { - am.Stop() - - if am.persister != nil { - if err := am.persister.AwaitTerminated(context.Background()); err != nil { - level.Warn(am.logger).Log("msg", "error while stopping state persister service", "err", err) - } - } - - if service, ok := am.state.(services.Service); ok { - if err := service.AwaitTerminated(context.Background()); err != nil { - level.Warn(am.logger).Log("msg", "error while stopping ring-based replication service", "err", err) - } - } - - am.wg.Wait() -} - -func (am *Alertmanager) mergePartialExternalState(part *clusterpb.Part) error { - if state, ok := am.state.(*state); ok { - return state.MergePartialState(part) - } - return errors.New("ring-based sharding not enabled") -} - -func (am *Alertmanager) getFullState() (*clusterpb.FullState, error) { - if state, ok := am.state.(*state); ok { - return state.GetFullState() - } - return nil, errors.New("ring-based sharding not enabled") -} - -// buildIntegrationsMap builds a map of name to the list of integration notifiers off of a -// list of receiver config. -func buildIntegrationsMap(nc []*config.Receiver, tmpl *template.Template, firewallDialer *util_net.FirewallDialer, logger log.Logger, notifierWrapper func(string, notify.Notifier) notify.Notifier) (map[string][]notify.Integration, error) { - integrationsMap := make(map[string][]notify.Integration, len(nc)) - for _, rcv := range nc { - integrations, err := buildReceiverIntegrations(rcv, tmpl, firewallDialer, logger, notifierWrapper) - if err != nil { - return nil, err - } - integrationsMap[rcv.Name] = integrations - } - return integrationsMap, nil -} - -// buildReceiverIntegrations builds a list of integration notifiers off of a -// receiver config. -// Taken from https://github.com/prometheus/alertmanager/blob/94d875f1227b29abece661db1a68c001122d1da5/cmd/alertmanager/main.go#L112-L159. -func buildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, firewallDialer *util_net.FirewallDialer, logger log.Logger, wrapper func(string, notify.Notifier) notify.Notifier) ([]notify.Integration, error) { - var ( - errs types.MultiError - integrations []notify.Integration - add = func(name string, i int, rs notify.ResolvedSender, f func(l log.Logger) (notify.Notifier, error)) { - n, err := f(log.With(logger, "integration", name)) - if err != nil { - errs.Add(err) - return - } - n = wrapper(name, n) - integrations = append(integrations, notify.NewIntegration(n, rs, name, i)) - } - ) - - // Inject the firewall to any receiver integration supporting it. - httpOps := []commoncfg.HTTPClientOption{ - commoncfg.WithDialContextFunc(firewallDialer.DialContext), - } - - for i, c := range nc.WebhookConfigs { - add("webhook", i, c, func(l log.Logger) (notify.Notifier, error) { return webhook.New(c, tmpl, l, httpOps...) }) - } - for i, c := range nc.EmailConfigs { - add("email", i, c, func(l log.Logger) (notify.Notifier, error) { return email.New(c, tmpl, l), nil }) - } - for i, c := range nc.PagerdutyConfigs { - add("pagerduty", i, c, func(l log.Logger) (notify.Notifier, error) { return pagerduty.New(c, tmpl, l, httpOps...) }) - } - for i, c := range nc.OpsGenieConfigs { - add("opsgenie", i, c, func(l log.Logger) (notify.Notifier, error) { return opsgenie.New(c, tmpl, l, httpOps...) }) - } - for i, c := range nc.WechatConfigs { - add("wechat", i, c, func(l log.Logger) (notify.Notifier, error) { return wechat.New(c, tmpl, l, httpOps...) }) - } - for i, c := range nc.SlackConfigs { - add("slack", i, c, func(l log.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l, httpOps...) }) - } - for i, c := range nc.VictorOpsConfigs { - add("victorops", i, c, func(l log.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l, httpOps...) }) - } - for i, c := range nc.PushoverConfigs { - add("pushover", i, c, func(l log.Logger) (notify.Notifier, error) { return pushover.New(c, tmpl, l, httpOps...) }) - } - for i, c := range nc.SNSConfigs { - add("sns", i, c, func(l log.Logger) (notify.Notifier, error) { return sns.New(c, tmpl, l, httpOps...) }) - } - // If we add support for more integrations, we need to add them to validation as well. See validation.allowedIntegrationNames field. - if errs.Len() > 0 { - return nil, &errs - } - return integrations, nil -} - -func md5HashAsMetricValue(data []byte) float64 { - sum := md5.Sum(data) - // We only want 48 bits as a float64 only has a 53 bit mantissa. - smallSum := sum[0:6] - var bytes = make([]byte, 8) - copy(bytes, smallSum) - return float64(binary.LittleEndian.Uint64(bytes)) -} - -// NilPeer and NilChannel implements the Alertmanager clustering interface used by the API to expose cluster information. -// In a multi-tenant environment, we choose not to expose these to tenants and thus are not implemented. -type NilPeer struct{} - -func (p *NilPeer) Name() string { return "" } -func (p *NilPeer) Status() string { return "ready" } -func (p *NilPeer) Peers() []cluster.ClusterMember { return nil } -func (p *NilPeer) Position() int { return 0 } -func (p *NilPeer) WaitReady(context.Context) error { return nil } -func (p *NilPeer) AddState(string, cluster.State, prometheus.Registerer) cluster.ClusterChannel { - return &NilChannel{} -} - -type NilChannel struct{} - -func (c *NilChannel) Broadcast([]byte) {} - -type firewallDialerConfigProvider struct { - userID string - limits Limits -} - -func newFirewallDialerConfigProvider(userID string, limits Limits) firewallDialerConfigProvider { - return firewallDialerConfigProvider{ - userID: userID, - limits: limits, - } -} - -func (p firewallDialerConfigProvider) BlockCIDRNetworks() []flagext.CIDR { - return p.limits.AlertmanagerReceiversBlockCIDRNetworks(p.userID) -} - -func (p firewallDialerConfigProvider) BlockPrivateAddresses() bool { - return p.limits.AlertmanagerReceiversBlockPrivateAddresses(p.userID) -} - -type tenantRateLimits struct { - tenant string - integration string - limits Limits -} - -func (t *tenantRateLimits) RateLimit() rate.Limit { - return t.limits.NotificationRateLimit(t.tenant, t.integration) -} - -func (t *tenantRateLimits) Burst() int { - return t.limits.NotificationBurstSize(t.tenant, t.integration) -} - -type dispatcherLimits struct { - tenant string - limits Limits -} - -func (g *dispatcherLimits) MaxNumberOfAggregationGroups() int { - return g.limits.AlertmanagerMaxDispatcherAggregationGroups(g.tenant) -} - -var ( - errTooManyAlerts = "too many alerts, limit: %d" - errAlertsTooBig = "alerts too big, total size limit: %d bytes" -) - -// alertsLimiter limits the number and size of alerts being received by the Alertmanager. -// We consider an alert unique based on its fingerprint (a hash of its labels) and -// its size it's determined by the sum of bytes of its labels, annotations, and generator URL. -type alertsLimiter struct { - tenant string - limits Limits - - failureCounter prometheus.Counter - - mx sync.Mutex - sizes map[model.Fingerprint]int - count int - totalSize int -} - -func newAlertsLimiter(tenant string, limits Limits, reg prometheus.Registerer) *alertsLimiter { - limiter := &alertsLimiter{ - tenant: tenant, - limits: limits, - sizes: map[model.Fingerprint]int{}, - failureCounter: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "alertmanager_alerts_insert_limited_total", - Help: "Number of failures to insert new alerts to in-memory alert store.", - }), - } - - promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "alertmanager_alerts_limiter_current_alerts", - Help: "Number of alerts tracked by alerts limiter.", - }, func() float64 { - c, _ := limiter.currentStats() - return float64(c) - }) - - promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "alertmanager_alerts_limiter_current_alerts_size_bytes", - Help: "Total size of alerts tracked by alerts limiter.", - }, func() float64 { - _, s := limiter.currentStats() - return float64(s) - }) - - return limiter -} - -func (a *alertsLimiter) PreStore(alert *types.Alert, existing bool) error { - if alert == nil { - return nil - } - - fp := alert.Fingerprint() - - countLimit := a.limits.AlertmanagerMaxAlertsCount(a.tenant) - sizeLimit := a.limits.AlertmanagerMaxAlertsSizeBytes(a.tenant) - - sizeDiff := alertSize(alert.Alert) - - a.mx.Lock() - defer a.mx.Unlock() - - if !existing && countLimit > 0 && (a.count+1) > countLimit { - a.failureCounter.Inc() - return fmt.Errorf(errTooManyAlerts, countLimit) - } - - if existing { - sizeDiff -= a.sizes[fp] - } - - if sizeLimit > 0 && (a.totalSize+sizeDiff) > sizeLimit { - a.failureCounter.Inc() - return fmt.Errorf(errAlertsTooBig, sizeLimit) - } - - return nil -} - -func (a *alertsLimiter) PostStore(alert *types.Alert, existing bool) { - if alert == nil { - return - } - - newSize := alertSize(alert.Alert) - fp := alert.Fingerprint() - - a.mx.Lock() - defer a.mx.Unlock() - - if existing { - a.totalSize -= a.sizes[fp] - } else { - a.count++ - } - a.sizes[fp] = newSize - a.totalSize += newSize -} - -func (a *alertsLimiter) PostDelete(alert *types.Alert) { - if alert == nil { - return - } - - fp := alert.Fingerprint() - - a.mx.Lock() - defer a.mx.Unlock() - - a.totalSize -= a.sizes[fp] - delete(a.sizes, fp) - a.count-- -} - -func (a *alertsLimiter) currentStats() (count, totalSize int) { - a.mx.Lock() - defer a.mx.Unlock() - - return a.count, a.totalSize -} - -func alertSize(alert model.Alert) int { - size := 0 - for l, v := range alert.Labels { - size += len(l) - size += len(v) - } - for l, v := range alert.Annotations { - size += len(l) - size += len(v) - } - size += len(alert.GeneratorURL) - return size -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_client.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_client.go deleted file mode 100644 index e95d8708a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_client.go +++ /dev/null @@ -1,132 +0,0 @@ -package alertmanager - -import ( - "flag" - "time" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb" - "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/tls" -) - -// ClientsPool is the interface used to get the client from the pool for a specified address. -type ClientsPool interface { - // GetClientFor returns the alertmanager client for the given address. - GetClientFor(addr string) (Client, error) -} - -// Client is the interface that should be implemented by any client used to read/write data to an alertmanager via GRPC. -type Client interface { - alertmanagerpb.AlertmanagerClient - - // RemoteAddress returns the address of the remote alertmanager and is used to uniquely - // identify an alertmanager instance. - RemoteAddress() string -} - -// ClientConfig is the configuration struct for the alertmanager client. -type ClientConfig struct { - RemoteTimeout time.Duration `yaml:"remote_timeout"` - TLSEnabled bool `yaml:"tls_enabled"` - TLS tls.ClientConfig `yaml:",inline"` -} - -// RegisterFlagsWithPrefix registers flags with prefix. -func (cfg *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.BoolVar(&cfg.TLSEnabled, prefix+".tls-enabled", cfg.TLSEnabled, "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.") - f.DurationVar(&cfg.RemoteTimeout, prefix+".remote-timeout", 2*time.Second, "Timeout for downstream alertmanagers.") - cfg.TLS.RegisterFlagsWithPrefix(prefix, f) -} - -type alertmanagerClientsPool struct { - pool *client.Pool -} - -func newAlertmanagerClientsPool(discovery client.PoolServiceDiscovery, amClientCfg ClientConfig, logger log.Logger, reg prometheus.Registerer) ClientsPool { - // We prefer sane defaults instead of exposing further config options. - grpcCfg := grpcclient.Config{ - MaxRecvMsgSize: 16 * 1024 * 1024, - MaxSendMsgSize: 4 * 1024 * 1024, - GRPCCompression: "", - RateLimit: 0, - RateLimitBurst: 0, - BackoffOnRatelimits: false, - TLSEnabled: amClientCfg.TLSEnabled, - TLS: amClientCfg.TLS, - } - - requestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Name: "cortex_alertmanager_distributor_client_request_duration_seconds", - Help: "Time spent executing requests from an alertmanager to another alertmanager.", - Buckets: prometheus.ExponentialBuckets(0.008, 4, 7), - }, []string{"operation", "status_code"}) - - factory := func(addr string) (client.PoolClient, error) { - return dialAlertmanagerClient(grpcCfg, addr, requestDuration) - } - - poolCfg := client.PoolConfig{ - CheckInterval: time.Minute, - HealthCheckEnabled: true, - HealthCheckTimeout: 10 * time.Second, - } - - clientsCount := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "alertmanager_distributor_clients", - Help: "The current number of alertmanager distributor clients in the pool.", - }) - - return &alertmanagerClientsPool{pool: client.NewPool("alertmanager", poolCfg, discovery, factory, clientsCount, logger)} -} - -func (f *alertmanagerClientsPool) GetClientFor(addr string) (Client, error) { - c, err := f.pool.GetClientFor(addr) - if err != nil { - return nil, err - } - return c.(Client), nil -} - -func dialAlertmanagerClient(cfg grpcclient.Config, addr string, requestDuration *prometheus.HistogramVec) (*alertmanagerClient, error) { - opts, err := cfg.DialOption(grpcclient.Instrument(requestDuration)) - if err != nil { - return nil, err - } - conn, err := grpc.Dial(addr, opts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to dial alertmanager %s", addr) - } - - return &alertmanagerClient{ - AlertmanagerClient: alertmanagerpb.NewAlertmanagerClient(conn), - HealthClient: grpc_health_v1.NewHealthClient(conn), - conn: conn, - }, nil -} - -type alertmanagerClient struct { - alertmanagerpb.AlertmanagerClient - grpc_health_v1.HealthClient - conn *grpc.ClientConn -} - -func (c *alertmanagerClient) Close() error { - return c.conn.Close() -} - -func (c *alertmanagerClient) String() string { - return c.RemoteAddress() -} - -func (c *alertmanagerClient) RemoteAddress() string { - return c.conn.Target() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go deleted file mode 100644 index 1b27ef7b9..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go +++ /dev/null @@ -1,112 +0,0 @@ -package alertmanager - -import ( - "net/http" - "text/template" - - "github.com/go-kit/log/level" - - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -var ( - ringStatusPageTemplate = template.Must(template.New("ringStatusPage").Parse(` - - - - - Cortex Alertmanager Ring - - -

Cortex Alertmanager Ring

-

{{ .Message }}

- - `)) - - statusTemplate = template.Must(template.New("statusPage").Parse(` - - - Cortex Alertmanager Status - -

Cortex Alertmanager Status

- {{ if not .ClusterInfo }} -

Alertmanager gossip-based clustering is disabled.

- {{ else }} -

Node

-
-
Name
{{.ClusterInfo.self.Name}}
-
Addr
{{.ClusterInfo.self.Addr}}
-
Port
{{.ClusterInfo.self.Port}}
-
-

Members

- {{ with .ClusterInfo.members }} - - - {{ range . }} - - {{ end }} -
NameAddr
{{ .Name }}{{ .Addr }}
- {{ else }} -

No peers

- {{ end }} - {{ end }} - - `)) -) - -func writeRingStatusMessage(w http.ResponseWriter, message string) { - w.WriteHeader(http.StatusOK) - err := ringStatusPageTemplate.Execute(w, struct { - Message string - }{Message: message}) - if err != nil { - level.Error(util_log.Logger).Log("msg", "unable to serve alertmanager ring page", "err", err) - } -} - -func (am *MultitenantAlertmanager) RingHandler(w http.ResponseWriter, req *http.Request) { - if !am.cfg.ShardingEnabled { - writeRingStatusMessage(w, "Alertmanager has no ring because sharding is disabled.") - return - } - - if am.State() != services.Running { - // we cannot read the ring before the alertmanager is in Running state, - // because that would lead to race condition. - writeRingStatusMessage(w, "Alertmanager is not running yet.") - return - } - - am.ring.ServeHTTP(w, req) -} - -// GetStatusHandler returns the status handler for this multi-tenant -// alertmanager. -func (am *MultitenantAlertmanager) GetStatusHandler() StatusHandler { - return StatusHandler{ - am: am, - } -} - -// StatusHandler shows the status of the alertmanager. -type StatusHandler struct { - am *MultitenantAlertmanager -} - -// ServeHTTP serves the status of the alertmanager. -func (s StatusHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { - var clusterInfo map[string]interface{} - if s.am.peer != nil { - clusterInfo = s.am.peer.Info() - } - err := statusTemplate.Execute(w, struct { - ClusterInfo map[string]interface{} - }{ - ClusterInfo: clusterInfo, - }) - if err != nil { - level.Error(util_log.Logger).Log("msg", "unable to serve alertmanager status page", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go deleted file mode 100644 index a5371f6c9..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go +++ /dev/null @@ -1,337 +0,0 @@ -package alertmanager - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/util" -) - -// This struct aggregates metrics exported by Alertmanager -// and re-exports those aggregates as Cortex metrics. -type alertmanagerMetrics struct { - regs *util.UserRegistries - - // exported metrics, gathered from Alertmanager API - alertsReceived *prometheus.Desc - alertsInvalid *prometheus.Desc - - // exported metrics, gathered from Alertmanager PipelineBuilder - numNotifications *prometheus.Desc - numFailedNotifications *prometheus.Desc - numNotificationRequestsTotal *prometheus.Desc - numNotificationRequestsFailedTotal *prometheus.Desc - notificationLatencySeconds *prometheus.Desc - - // exported metrics, gathered from Alertmanager nflog - nflogGCDuration *prometheus.Desc - nflogSnapshotDuration *prometheus.Desc - nflogSnapshotSize *prometheus.Desc - nflogQueriesTotal *prometheus.Desc - nflogQueryErrorsTotal *prometheus.Desc - nflogQueryDuration *prometheus.Desc - nflogPropagatedMessagesTotal *prometheus.Desc - - // exported metrics, gathered from Alertmanager Marker - markerAlerts *prometheus.Desc - - // exported metrics, gathered from Alertmanager Silences - silencesGCDuration *prometheus.Desc - silencesSnapshotDuration *prometheus.Desc - silencesSnapshotSize *prometheus.Desc - silencesQueriesTotal *prometheus.Desc - silencesQueryErrorsTotal *prometheus.Desc - silencesQueryDuration *prometheus.Desc - silences *prometheus.Desc - silencesPropagatedMessagesTotal *prometheus.Desc - - // The alertmanager config hash. - configHashValue *prometheus.Desc - - partialMerges *prometheus.Desc - partialMergesFailed *prometheus.Desc - replicationTotal *prometheus.Desc - replicationFailed *prometheus.Desc - fetchReplicaStateTotal *prometheus.Desc - fetchReplicaStateFailed *prometheus.Desc - initialSyncTotal *prometheus.Desc - initialSyncCompleted *prometheus.Desc - initialSyncDuration *prometheus.Desc - persistTotal *prometheus.Desc - persistFailed *prometheus.Desc - - notificationRateLimited *prometheus.Desc - dispatcherAggregationGroupsLimitReached *prometheus.Desc - insertAlertFailures *prometheus.Desc - alertsLimiterAlertsCount *prometheus.Desc - alertsLimiterAlertsSize *prometheus.Desc -} - -func newAlertmanagerMetrics() *alertmanagerMetrics { - return &alertmanagerMetrics{ - regs: util.NewUserRegistries(), - alertsReceived: prometheus.NewDesc( - "cortex_alertmanager_alerts_received_total", - "The total number of received alerts.", - []string{"user"}, nil), - alertsInvalid: prometheus.NewDesc( - "cortex_alertmanager_alerts_invalid_total", - "The total number of received alerts that were invalid.", - []string{"user"}, nil), - numNotifications: prometheus.NewDesc( - "cortex_alertmanager_notifications_total", - "The total number of attempted notifications.", - []string{"user", "integration"}, nil), - numFailedNotifications: prometheus.NewDesc( - "cortex_alertmanager_notifications_failed_total", - "The total number of failed notifications.", - []string{"user", "integration"}, nil), - numNotificationRequestsTotal: prometheus.NewDesc( - "cortex_alertmanager_notification_requests_total", - "The total number of attempted notification requests.", - []string{"user", "integration"}, nil), - numNotificationRequestsFailedTotal: prometheus.NewDesc( - "cortex_alertmanager_notification_requests_failed_total", - "The total number of failed notification requests.", - []string{"user", "integration"}, nil), - notificationLatencySeconds: prometheus.NewDesc( - "cortex_alertmanager_notification_latency_seconds", - "The latency of notifications in seconds.", - nil, nil), - nflogGCDuration: prometheus.NewDesc( - "cortex_alertmanager_nflog_gc_duration_seconds", - "Duration of the last notification log garbage collection cycle.", - nil, nil), - nflogSnapshotDuration: prometheus.NewDesc( - "cortex_alertmanager_nflog_snapshot_duration_seconds", - "Duration of the last notification log snapshot.", - nil, nil), - nflogSnapshotSize: prometheus.NewDesc( - "cortex_alertmanager_nflog_snapshot_size_bytes", - "Size of the last notification log snapshot in bytes.", - nil, nil), - nflogQueriesTotal: prometheus.NewDesc( - "cortex_alertmanager_nflog_queries_total", - "Number of notification log queries were received.", - nil, nil), - nflogQueryErrorsTotal: prometheus.NewDesc( - "cortex_alertmanager_nflog_query_errors_total", - "Number notification log received queries that failed.", - nil, nil), - nflogQueryDuration: prometheus.NewDesc( - "cortex_alertmanager_nflog_query_duration_seconds", - "Duration of notification log query evaluation.", - nil, nil), - nflogPropagatedMessagesTotal: prometheus.NewDesc( - "cortex_alertmanager_nflog_gossip_messages_propagated_total", - "Number of received gossip messages that have been further gossiped.", - nil, nil), - markerAlerts: prometheus.NewDesc( - "cortex_alertmanager_alerts", - "How many alerts by state.", - []string{"user", "state"}, nil), - silencesGCDuration: prometheus.NewDesc( - "cortex_alertmanager_silences_gc_duration_seconds", - "Duration of the last silence garbage collection cycle.", - nil, nil), - silencesSnapshotDuration: prometheus.NewDesc( - "cortex_alertmanager_silences_snapshot_duration_seconds", - "Duration of the last silence snapshot.", - nil, nil), - silencesSnapshotSize: prometheus.NewDesc( - "cortex_alertmanager_silences_snapshot_size_bytes", - "Size of the last silence snapshot in bytes.", - nil, nil), - silencesQueriesTotal: prometheus.NewDesc( - "cortex_alertmanager_silences_queries_total", - "How many silence queries were received.", - nil, nil), - silencesQueryErrorsTotal: prometheus.NewDesc( - "cortex_alertmanager_silences_query_errors_total", - "How many silence received queries did not succeed.", - nil, nil), - silencesQueryDuration: prometheus.NewDesc( - "cortex_alertmanager_silences_query_duration_seconds", - "Duration of silence query evaluation.", - nil, nil), - silencesPropagatedMessagesTotal: prometheus.NewDesc( - "cortex_alertmanager_silences_gossip_messages_propagated_total", - "Number of received gossip messages that have been further gossiped.", - nil, nil), - silences: prometheus.NewDesc( - "cortex_alertmanager_silences", - "How many silences by state.", - []string{"user", "state"}, nil), - configHashValue: prometheus.NewDesc( - "cortex_alertmanager_config_hash", - "Hash of the currently loaded alertmanager configuration.", - []string{"user"}, nil), - partialMerges: prometheus.NewDesc( - "cortex_alertmanager_partial_state_merges_total", - "Number of times we have received a partial state to merge for a key.", - []string{"user"}, nil), - partialMergesFailed: prometheus.NewDesc( - "cortex_alertmanager_partial_state_merges_failed_total", - "Number of times we have failed to merge a partial state received for a key.", - []string{"user"}, nil), - replicationTotal: prometheus.NewDesc( - "cortex_alertmanager_state_replication_total", - "Number of times we have tried to replicate a state to other alertmanagers", - []string{"user"}, nil), - replicationFailed: prometheus.NewDesc( - "cortex_alertmanager_state_replication_failed_total", - "Number of times we have failed to replicate a state to other alertmanagers", - []string{"user"}, nil), - fetchReplicaStateTotal: prometheus.NewDesc( - "cortex_alertmanager_state_fetch_replica_state_total", - "Number of times we have tried to read and merge the full state from another replica.", - nil, nil), - fetchReplicaStateFailed: prometheus.NewDesc( - "cortex_alertmanager_state_fetch_replica_state_failed_total", - "Number of times we have failed to read and merge the full state from another replica.", - nil, nil), - initialSyncTotal: prometheus.NewDesc( - "cortex_alertmanager_state_initial_sync_total", - "Number of times we have tried to sync initial state from peers or storage.", - nil, nil), - initialSyncCompleted: prometheus.NewDesc( - "cortex_alertmanager_state_initial_sync_completed_total", - "Number of times we have completed syncing initial state for each possible outcome.", - []string{"outcome"}, nil), - initialSyncDuration: prometheus.NewDesc( - "cortex_alertmanager_state_initial_sync_duration_seconds", - "Time spent syncing initial state from peers or storage.", - nil, nil), - persistTotal: prometheus.NewDesc( - "cortex_alertmanager_state_persist_total", - "Number of times we have tried to persist the running state to storage.", - nil, nil), - persistFailed: prometheus.NewDesc( - "cortex_alertmanager_state_persist_failed_total", - "Number of times we have failed to persist the running state to storage.", - nil, nil), - notificationRateLimited: prometheus.NewDesc( - "cortex_alertmanager_notification_rate_limited_total", - "Total number of rate-limited notifications per integration.", - []string{"user", "integration"}, nil), - dispatcherAggregationGroupsLimitReached: prometheus.NewDesc( - "cortex_alertmanager_dispatcher_aggregation_group_limit_reached_total", - "Number of times when dispatcher failed to create new aggregation group due to limit.", - []string{"user"}, nil), - insertAlertFailures: prometheus.NewDesc( - "cortex_alertmanager_alerts_insert_limited_total", - "Total number of failures to store alert due to hitting alertmanager limits.", - []string{"user"}, nil), - alertsLimiterAlertsCount: prometheus.NewDesc( - "cortex_alertmanager_alerts_limiter_current_alerts", - "Number of alerts tracked by alerts limiter.", - []string{"user"}, nil), - alertsLimiterAlertsSize: prometheus.NewDesc( - "cortex_alertmanager_alerts_limiter_current_alerts_size_bytes", - "Total size of alerts tracked by alerts limiter.", - []string{"user"}, nil), - } -} - -func (m *alertmanagerMetrics) addUserRegistry(user string, reg *prometheus.Registry) { - m.regs.AddUserRegistry(user, reg) -} - -func (m *alertmanagerMetrics) removeUserRegistry(user string) { - // We need to go for a soft deletion here, as hard deletion requires - // that _all_ metrics except gauges are per-user. - m.regs.RemoveUserRegistry(user, false) -} - -func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { - out <- m.alertsReceived - out <- m.alertsInvalid - out <- m.numNotifications - out <- m.numFailedNotifications - out <- m.numNotificationRequestsTotal - out <- m.numNotificationRequestsFailedTotal - out <- m.notificationLatencySeconds - out <- m.markerAlerts - out <- m.nflogGCDuration - out <- m.nflogSnapshotDuration - out <- m.nflogSnapshotSize - out <- m.nflogQueriesTotal - out <- m.nflogQueryErrorsTotal - out <- m.nflogQueryDuration - out <- m.nflogPropagatedMessagesTotal - out <- m.silencesGCDuration - out <- m.silencesSnapshotDuration - out <- m.silencesSnapshotSize - out <- m.silencesQueriesTotal - out <- m.silencesQueryErrorsTotal - out <- m.silencesQueryDuration - out <- m.silencesPropagatedMessagesTotal - out <- m.silences - out <- m.configHashValue - out <- m.partialMerges - out <- m.partialMergesFailed - out <- m.replicationTotal - out <- m.replicationFailed - out <- m.fetchReplicaStateTotal - out <- m.fetchReplicaStateFailed - out <- m.initialSyncTotal - out <- m.initialSyncCompleted - out <- m.initialSyncDuration - out <- m.persistTotal - out <- m.persistFailed - out <- m.notificationRateLimited - out <- m.dispatcherAggregationGroupsLimitReached - out <- m.insertAlertFailures - out <- m.alertsLimiterAlertsCount - out <- m.alertsLimiterAlertsSize -} - -func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) { - data := m.regs.BuildMetricFamiliesPerUser() - - data.SendSumOfCountersPerUser(out, m.alertsReceived, "alertmanager_alerts_received_total") - data.SendSumOfCountersPerUser(out, m.alertsInvalid, "alertmanager_alerts_invalid_total") - - data.SendSumOfCountersPerUserWithLabels(out, m.numNotifications, "alertmanager_notifications_total", "integration") - data.SendSumOfCountersPerUserWithLabels(out, m.numFailedNotifications, "alertmanager_notifications_failed_total", "integration") - data.SendSumOfCountersPerUserWithLabels(out, m.numNotificationRequestsTotal, "alertmanager_notification_requests_total", "integration") - data.SendSumOfCountersPerUserWithLabels(out, m.numNotificationRequestsFailedTotal, "alertmanager_notification_requests_failed_total", "integration") - data.SendSumOfHistograms(out, m.notificationLatencySeconds, "alertmanager_notification_latency_seconds") - data.SendSumOfGaugesPerUserWithLabels(out, m.markerAlerts, "alertmanager_alerts", "state") - - data.SendSumOfSummaries(out, m.nflogGCDuration, "alertmanager_nflog_gc_duration_seconds") - data.SendSumOfSummaries(out, m.nflogSnapshotDuration, "alertmanager_nflog_snapshot_duration_seconds") - data.SendSumOfGauges(out, m.nflogSnapshotSize, "alertmanager_nflog_snapshot_size_bytes") - data.SendSumOfCounters(out, m.nflogQueriesTotal, "alertmanager_nflog_queries_total") - data.SendSumOfCounters(out, m.nflogQueryErrorsTotal, "alertmanager_nflog_query_errors_total") - data.SendSumOfHistograms(out, m.nflogQueryDuration, "alertmanager_nflog_query_duration_seconds") - data.SendSumOfCounters(out, m.nflogPropagatedMessagesTotal, "alertmanager_nflog_gossip_messages_propagated_total") - - data.SendSumOfSummaries(out, m.silencesGCDuration, "alertmanager_silences_gc_duration_seconds") - data.SendSumOfSummaries(out, m.silencesSnapshotDuration, "alertmanager_silences_snapshot_duration_seconds") - data.SendSumOfGauges(out, m.silencesSnapshotSize, "alertmanager_silences_snapshot_size_bytes") - data.SendSumOfCounters(out, m.silencesQueriesTotal, "alertmanager_silences_queries_total") - data.SendSumOfCounters(out, m.silencesQueryErrorsTotal, "alertmanager_silences_query_errors_total") - data.SendSumOfHistograms(out, m.silencesQueryDuration, "alertmanager_silences_query_duration_seconds") - data.SendSumOfCounters(out, m.silencesPropagatedMessagesTotal, "alertmanager_silences_gossip_messages_propagated_total") - data.SendSumOfGaugesPerUserWithLabels(out, m.silences, "alertmanager_silences", "state") - - data.SendMaxOfGaugesPerUser(out, m.configHashValue, "alertmanager_config_hash") - - data.SendSumOfCountersPerUser(out, m.partialMerges, "alertmanager_partial_state_merges_total") - data.SendSumOfCountersPerUser(out, m.partialMergesFailed, "alertmanager_partial_state_merges_failed_total") - data.SendSumOfCountersPerUser(out, m.replicationTotal, "alertmanager_state_replication_total") - data.SendSumOfCountersPerUser(out, m.replicationFailed, "alertmanager_state_replication_failed_total") - data.SendSumOfCounters(out, m.fetchReplicaStateTotal, "alertmanager_state_fetch_replica_state_total") - data.SendSumOfCounters(out, m.fetchReplicaStateFailed, "alertmanager_state_fetch_replica_state_failed_total") - data.SendSumOfCounters(out, m.initialSyncTotal, "alertmanager_state_initial_sync_total") - data.SendSumOfCountersWithLabels(out, m.initialSyncCompleted, "alertmanager_state_initial_sync_completed_total", "outcome") - data.SendSumOfHistograms(out, m.initialSyncDuration, "alertmanager_state_initial_sync_duration_seconds") - data.SendSumOfCounters(out, m.persistTotal, "alertmanager_state_persist_total") - data.SendSumOfCounters(out, m.persistFailed, "alertmanager_state_persist_failed_total") - - data.SendSumOfCountersPerUserWithLabels(out, m.notificationRateLimited, "alertmanager_notification_rate_limited_total", "integration") - data.SendSumOfCountersPerUser(out, m.dispatcherAggregationGroupsLimitReached, "alertmanager_dispatcher_aggregation_group_limit_reached_total") - data.SendSumOfCountersPerUser(out, m.insertAlertFailures, "alertmanager_alerts_insert_limited_total") - data.SendSumOfGaugesPerUser(out, m.alertsLimiterAlertsCount, "alertmanager_alerts_limiter_current_alerts") - data.SendSumOfGaugesPerUser(out, m.alertsLimiterAlertsSize, "alertmanager_alerts_limiter_current_alerts_size_bytes") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go deleted file mode 100644 index 66532a374..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go +++ /dev/null @@ -1,126 +0,0 @@ -package alertmanager - -import ( - "flag" - "fmt" - "os" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -const ( - // RingKey is the key under which we store the alertmanager ring in the KVStore. - RingKey = "alertmanager" - - // RingNameForServer is the name of the ring used by the alertmanager server. - RingNameForServer = "alertmanager" - - // RingNumTokens is a safe default instead of exposing to config option to the user - // in order to simplify the config. - RingNumTokens = 128 -) - -// RingOp is the operation used for reading/writing to the alertmanagers. -var RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, func(s ring.InstanceState) bool { - // Only ACTIVE Alertmanager get requests. If instance is not ACTIVE, we need to find another Alertmanager. - return s != ring.ACTIVE -}) - -// SyncRingOp is the operation used for checking if a user is owned by an alertmanager. -var SyncRingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE, ring.JOINING}, func(s ring.InstanceState) bool { - return s != ring.ACTIVE -}) - -// RingConfig masks the ring lifecycler config which contains -// many options not really required by the alertmanager ring. This config -// is used to strip down the config to the minimum, and avoid confusion -// to the user. -type RingConfig struct { - KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances."` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - ReplicationFactor int `yaml:"replication_factor"` - ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - - // Instance details - InstanceID string `yaml:"instance_id" doc:"hidden"` - InstanceInterfaceNames []string `yaml:"instance_interface_names"` - InstancePort int `yaml:"instance_port" doc:"hidden"` - InstanceAddr string `yaml:"instance_addr" doc:"hidden"` - InstanceZone string `yaml:"instance_availability_zone"` - - // Injected internally - ListenPort int `yaml:"-"` - RingCheckPeriod time.Duration `yaml:"-"` - - // Used for testing - SkipUnregister bool `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { - hostname, err := os.Hostname() - if err != nil { - level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) - os.Exit(1) - } - - // Prefix used by all the ring flags - rfprefix := "alertmanager.sharding-ring." - - // Ring flags - cfg.KVStore.RegisterFlagsWithPrefix(rfprefix, "alertmanagers/", f) - f.DurationVar(&cfg.HeartbeatPeriod, rfprefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") - f.DurationVar(&cfg.HeartbeatTimeout, rfprefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which alertmanagers are considered unhealthy within the ring. 0 = never (timeout disabled).") - f.IntVar(&cfg.ReplicationFactor, rfprefix+"replication-factor", 3, "The replication factor to use when sharding the alertmanager.") - f.BoolVar(&cfg.ZoneAwarenessEnabled, rfprefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate alerts across different availability zones.") - - // Instance flags - cfg.InstanceInterfaceNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), rfprefix+"instance-interface-names", "Name of network interface to read address from.") - f.StringVar(&cfg.InstanceAddr, rfprefix+"instance-addr", "", "IP address to advertise in the ring.") - f.IntVar(&cfg.InstancePort, rfprefix+"instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") - f.StringVar(&cfg.InstanceID, rfprefix+"instance-id", hostname, "Instance ID to register in the ring.") - f.StringVar(&cfg.InstanceZone, rfprefix+"instance-availability-zone", "", "The availability zone where this instance is running. Required if zone-awareness is enabled.") - - cfg.RingCheckPeriod = 5 * time.Second -} - -// ToLifecyclerConfig returns a LifecyclerConfig based on the alertmanager -// ring config. -func (cfg *RingConfig) ToLifecyclerConfig(logger log.Logger) (ring.BasicLifecyclerConfig, error) { - instanceAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames, logger) - if err != nil { - return ring.BasicLifecyclerConfig{}, err - } - - instancePort := ring.GetInstancePort(cfg.InstancePort, cfg.ListenPort) - - return ring.BasicLifecyclerConfig{ - ID: cfg.InstanceID, - Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), - HeartbeatPeriod: cfg.HeartbeatPeriod, - TokensObservePeriod: 0, - Zone: cfg.InstanceZone, - NumTokens: RingNumTokens, - }, nil -} - -func (cfg *RingConfig) ToRingConfig() ring.Config { - rc := ring.Config{} - flagext.DefaultValues(&rc) - - rc.KVStore = cfg.KVStore - rc.HeartbeatTimeout = cfg.HeartbeatTimeout - rc.ReplicationFactor = cfg.ReplicationFactor - rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled - - return rc -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.pb.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.pb.go deleted file mode 100644 index ecb21512f..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.pb.go +++ /dev/null @@ -1,1147 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: alertmanager.proto - -package alertmanagerpb - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - clusterpb "github.com/prometheus/alertmanager/cluster/clusterpb" - httpgrpc "github.com/weaveworks/common/httpgrpc" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type UpdateStateStatus int32 - -const ( - OK UpdateStateStatus = 0 - MERGE_ERROR UpdateStateStatus = 2 - USER_NOT_FOUND UpdateStateStatus = 3 -) - -var UpdateStateStatus_name = map[int32]string{ - 0: "OK", - 2: "MERGE_ERROR", - 3: "USER_NOT_FOUND", -} - -var UpdateStateStatus_value = map[string]int32{ - "OK": 0, - "MERGE_ERROR": 2, - "USER_NOT_FOUND": 3, -} - -func (UpdateStateStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e60437b6e0c74c9a, []int{0} -} - -type ReadStateStatus int32 - -const ( - READ_UNSPECIFIED ReadStateStatus = 0 - READ_OK ReadStateStatus = 1 - READ_ERROR ReadStateStatus = 2 - READ_USER_NOT_FOUND ReadStateStatus = 3 -) - -var ReadStateStatus_name = map[int32]string{ - 0: "READ_UNSPECIFIED", - 1: "READ_OK", - 2: "READ_ERROR", - 3: "READ_USER_NOT_FOUND", -} - -var ReadStateStatus_value = map[string]int32{ - "READ_UNSPECIFIED": 0, - "READ_OK": 1, - "READ_ERROR": 2, - "READ_USER_NOT_FOUND": 3, -} - -func (ReadStateStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e60437b6e0c74c9a, []int{1} -} - -type UpdateStateResponse struct { - Status UpdateStateStatus `protobuf:"varint,1,opt,name=status,proto3,enum=alertmanagerpb.UpdateStateStatus" json:"status,omitempty"` - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` -} - -func (m *UpdateStateResponse) Reset() { *m = UpdateStateResponse{} } -func (*UpdateStateResponse) ProtoMessage() {} -func (*UpdateStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e60437b6e0c74c9a, []int{0} -} -func (m *UpdateStateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateStateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UpdateStateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateStateResponse.Merge(m, src) -} -func (m *UpdateStateResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateStateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateStateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateStateResponse proto.InternalMessageInfo - -func (m *UpdateStateResponse) GetStatus() UpdateStateStatus { - if m != nil { - return m.Status - } - return OK -} - -func (m *UpdateStateResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type ReadStateRequest struct { -} - -func (m *ReadStateRequest) Reset() { *m = ReadStateRequest{} } -func (*ReadStateRequest) ProtoMessage() {} -func (*ReadStateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e60437b6e0c74c9a, []int{1} -} -func (m *ReadStateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadStateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadStateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadStateRequest.Merge(m, src) -} -func (m *ReadStateRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadStateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadStateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadStateRequest proto.InternalMessageInfo - -type ReadStateResponse struct { - Status ReadStateStatus `protobuf:"varint,1,opt,name=status,proto3,enum=alertmanagerpb.ReadStateStatus" json:"status,omitempty"` - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - State *clusterpb.FullState `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` -} - -func (m *ReadStateResponse) Reset() { *m = ReadStateResponse{} } -func (*ReadStateResponse) ProtoMessage() {} -func (*ReadStateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e60437b6e0c74c9a, []int{2} -} -func (m *ReadStateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadStateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadStateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadStateResponse.Merge(m, src) -} -func (m *ReadStateResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadStateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadStateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadStateResponse proto.InternalMessageInfo - -func (m *ReadStateResponse) GetStatus() ReadStateStatus { - if m != nil { - return m.Status - } - return READ_UNSPECIFIED -} - -func (m *ReadStateResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *ReadStateResponse) GetState() *clusterpb.FullState { - if m != nil { - return m.State - } - return nil -} - -func init() { - proto.RegisterEnum("alertmanagerpb.UpdateStateStatus", UpdateStateStatus_name, UpdateStateStatus_value) - proto.RegisterEnum("alertmanagerpb.ReadStateStatus", ReadStateStatus_name, ReadStateStatus_value) - proto.RegisterType((*UpdateStateResponse)(nil), "alertmanagerpb.UpdateStateResponse") - proto.RegisterType((*ReadStateRequest)(nil), "alertmanagerpb.ReadStateRequest") - proto.RegisterType((*ReadStateResponse)(nil), "alertmanagerpb.ReadStateResponse") -} - -func init() { proto.RegisterFile("alertmanager.proto", fileDescriptor_e60437b6e0c74c9a) } - -var fileDescriptor_e60437b6e0c74c9a = []byte{ - // 509 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x41, 0x6f, 0x12, 0x41, - 0x18, 0x9d, 0xa1, 0x16, 0xd3, 0x0f, 0x85, 0xed, 0x14, 0x95, 0x70, 0x98, 0x52, 0xbc, 0x10, 0x0e, - 0xbb, 0x09, 0x9a, 0x18, 0x3d, 0xb5, 0x95, 0xc5, 0x36, 0x8d, 0x40, 0x06, 0xb8, 0x98, 0x18, 0x32, - 0xc0, 0x08, 0x46, 0x60, 0xd6, 0xd9, 0x59, 0x7b, 0xf5, 0x27, 0x78, 0xf0, 0x07, 0x78, 0xf4, 0xa7, - 0x78, 0xe4, 0xd8, 0xa3, 0x2c, 0x97, 0x26, 0x5e, 0xfa, 0x13, 0x4c, 0x59, 0x76, 0x5d, 0xd7, 0xd8, - 0xf4, 0xb4, 0xdf, 0xbc, 0xf9, 0xde, 0x7b, 0xf3, 0xbd, 0x99, 0x05, 0xc2, 0xa7, 0x42, 0xe9, 0x19, - 0x9f, 0xf3, 0xb1, 0x50, 0xa6, 0xa3, 0xa4, 0x96, 0x24, 0x1b, 0xc7, 0x9c, 0x41, 0x31, 0x3f, 0x96, - 0x63, 0xb9, 0xde, 0xb2, 0xae, 0xab, 0xa0, 0xab, 0xf8, 0x74, 0xfc, 0x5e, 0x4f, 0xbc, 0x81, 0x39, - 0x94, 0x33, 0xeb, 0x5c, 0xf0, 0x4f, 0xe2, 0x5c, 0xaa, 0x0f, 0xae, 0x35, 0x94, 0xb3, 0x99, 0x9c, - 0x5b, 0x13, 0xad, 0x9d, 0xb1, 0x72, 0x86, 0x51, 0xb1, 0x61, 0x1d, 0xc7, 0x58, 0x8e, 0x92, 0x33, - 0xa1, 0x27, 0xc2, 0x73, 0xad, 0xb8, 0xa3, 0x35, 0x9c, 0x7a, 0xae, 0xfe, 0xf3, 0x75, 0x06, 0x61, - 0x15, 0x68, 0x94, 0xdf, 0xc1, 0x5e, 0xcf, 0x19, 0x71, 0x2d, 0x3a, 0x9a, 0x6b, 0xc1, 0x84, 0xeb, - 0xc8, 0xb9, 0x2b, 0xc8, 0x73, 0x48, 0xbb, 0x9a, 0x6b, 0xcf, 0x2d, 0xe0, 0x12, 0xae, 0x64, 0x6b, - 0x07, 0xe6, 0xdf, 0x73, 0x98, 0x31, 0x52, 0x67, 0xdd, 0xc8, 0x36, 0x04, 0x92, 0x87, 0x6d, 0xa1, - 0x94, 0x54, 0x85, 0x54, 0x09, 0x57, 0x76, 0x58, 0xb0, 0x28, 0x13, 0x30, 0x98, 0xe0, 0xa3, 0x8d, - 0xcb, 0x47, 0x4f, 0xb8, 0xba, 0xfc, 0x15, 0xc3, 0x6e, 0x0c, 0xdc, 0x58, 0x3f, 0x4b, 0x58, 0xef, - 0x27, 0xad, 0x23, 0xca, 0x6d, 0x8c, 0x49, 0x15, 0xb6, 0xaf, 0xf7, 0x45, 0x61, 0xab, 0x84, 0x2b, - 0x99, 0x5a, 0xde, 0x8c, 0x92, 0x30, 0x1b, 0xde, 0x74, 0x1a, 0x78, 0x07, 0x2d, 0x2f, 0xee, 0x5c, - 0x7e, 0xdb, 0x47, 0xd5, 0x43, 0xd8, 0xfd, 0x67, 0x3a, 0x92, 0x86, 0x54, 0xeb, 0xcc, 0x40, 0x24, - 0x07, 0x99, 0xd7, 0x36, 0x7b, 0x65, 0xf7, 0x6d, 0xc6, 0x5a, 0xcc, 0x48, 0x11, 0x02, 0xd9, 0x5e, - 0xc7, 0x66, 0xfd, 0x66, 0xab, 0xdb, 0x6f, 0xb4, 0x7a, 0xcd, 0xba, 0xb1, 0x55, 0x7d, 0x0b, 0xb9, - 0xc4, 0x21, 0x49, 0x1e, 0x0c, 0x66, 0x1f, 0xd5, 0xfb, 0xbd, 0x66, 0xa7, 0x6d, 0xbf, 0x3c, 0x6d, - 0x9c, 0xda, 0x75, 0x03, 0x91, 0x0c, 0xdc, 0x5d, 0xa3, 0xad, 0x33, 0x03, 0x93, 0x2c, 0xc0, 0x7a, - 0x11, 0x2a, 0x3f, 0x82, 0xbd, 0x80, 0x92, 0x90, 0xaf, 0xfd, 0xc2, 0x70, 0xef, 0x28, 0x96, 0x09, - 0x39, 0x84, 0xfb, 0x27, 0x7c, 0x3e, 0x9a, 0x86, 0xc9, 0x92, 0x07, 0x66, 0xf4, 0x54, 0x4e, 0xba, - 0xdd, 0xf6, 0x06, 0x2e, 0x3e, 0x4c, 0xc2, 0x41, 0xe4, 0x65, 0x44, 0x6c, 0xc8, 0xc4, 0x66, 0x26, - 0xb9, 0x58, 0x4a, 0x6d, 0xae, 0x74, 0xf1, 0xf1, 0x0d, 0xf7, 0x1f, 0x93, 0x61, 0xb0, 0x13, 0x0d, - 0x4e, 0x4a, 0xff, 0xbd, 0xb8, 0xf0, 0x3c, 0x07, 0x37, 0x74, 0x84, 0x9a, 0xc7, 0xf5, 0xc5, 0x92, - 0xa2, 0x8b, 0x25, 0x45, 0x57, 0x4b, 0x8a, 0x3f, 0xfb, 0x14, 0x7f, 0xf7, 0x29, 0xfe, 0xe1, 0x53, - 0xbc, 0xf0, 0x29, 0xfe, 0xe9, 0x53, 0x7c, 0xe9, 0x53, 0x74, 0xe5, 0x53, 0xfc, 0x65, 0x45, 0xd1, - 0x62, 0x45, 0xd1, 0xc5, 0x8a, 0xa2, 0x37, 0x89, 0xff, 0x6e, 0x90, 0x5e, 0x3f, 0xf7, 0x27, 0xbf, - 0x03, 0x00, 0x00, 0xff, 0xff, 0x81, 0x5b, 0x6b, 0x33, 0xa4, 0x03, 0x00, 0x00, -} - -func (x UpdateStateStatus) String() string { - s, ok := UpdateStateStatus_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (x ReadStateStatus) String() string { - s, ok := ReadStateStatus_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *UpdateStateResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UpdateStateResponse) - if !ok { - that2, ok := that.(UpdateStateResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Status != that1.Status { - return false - } - if this.Error != that1.Error { - return false - } - return true -} -func (this *ReadStateRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadStateRequest) - if !ok { - that2, ok := that.(ReadStateRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *UpdateStateResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&alertmanagerpb.UpdateStateResponse{") - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadStateRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&alertmanagerpb.ReadStateRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadStateResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&alertmanagerpb.ReadStateResponse{") - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") - if this.State != nil { - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringAlertmanager(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// AlertmanagerClient is the client API for Alertmanager service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type AlertmanagerClient interface { - HandleRequest(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) - UpdateState(ctx context.Context, in *clusterpb.Part, opts ...grpc.CallOption) (*UpdateStateResponse, error) - ReadState(ctx context.Context, in *ReadStateRequest, opts ...grpc.CallOption) (*ReadStateResponse, error) -} - -type alertmanagerClient struct { - cc *grpc.ClientConn -} - -func NewAlertmanagerClient(cc *grpc.ClientConn) AlertmanagerClient { - return &alertmanagerClient{cc} -} - -func (c *alertmanagerClient) HandleRequest(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { - out := new(httpgrpc.HTTPResponse) - err := c.cc.Invoke(ctx, "/alertmanagerpb.Alertmanager/HandleRequest", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *alertmanagerClient) UpdateState(ctx context.Context, in *clusterpb.Part, opts ...grpc.CallOption) (*UpdateStateResponse, error) { - out := new(UpdateStateResponse) - err := c.cc.Invoke(ctx, "/alertmanagerpb.Alertmanager/UpdateState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *alertmanagerClient) ReadState(ctx context.Context, in *ReadStateRequest, opts ...grpc.CallOption) (*ReadStateResponse, error) { - out := new(ReadStateResponse) - err := c.cc.Invoke(ctx, "/alertmanagerpb.Alertmanager/ReadState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AlertmanagerServer is the server API for Alertmanager service. -type AlertmanagerServer interface { - HandleRequest(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) - UpdateState(context.Context, *clusterpb.Part) (*UpdateStateResponse, error) - ReadState(context.Context, *ReadStateRequest) (*ReadStateResponse, error) -} - -// UnimplementedAlertmanagerServer can be embedded to have forward compatible implementations. -type UnimplementedAlertmanagerServer struct { -} - -func (*UnimplementedAlertmanagerServer) HandleRequest(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method HandleRequest not implemented") -} -func (*UnimplementedAlertmanagerServer) UpdateState(ctx context.Context, req *clusterpb.Part) (*UpdateStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateState not implemented") -} -func (*UnimplementedAlertmanagerServer) ReadState(ctx context.Context, req *ReadStateRequest) (*ReadStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadState not implemented") -} - -func RegisterAlertmanagerServer(s *grpc.Server, srv AlertmanagerServer) { - s.RegisterService(&_Alertmanager_serviceDesc, srv) -} - -func _Alertmanager_HandleRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(httpgrpc.HTTPRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertmanagerServer).HandleRequest(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/alertmanagerpb.Alertmanager/HandleRequest", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertmanagerServer).HandleRequest(ctx, req.(*httpgrpc.HTTPRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Alertmanager_UpdateState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(clusterpb.Part) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertmanagerServer).UpdateState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/alertmanagerpb.Alertmanager/UpdateState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertmanagerServer).UpdateState(ctx, req.(*clusterpb.Part)) - } - return interceptor(ctx, in, info, handler) -} - -func _Alertmanager_ReadState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AlertmanagerServer).ReadState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/alertmanagerpb.Alertmanager/ReadState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AlertmanagerServer).ReadState(ctx, req.(*ReadStateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Alertmanager_serviceDesc = grpc.ServiceDesc{ - ServiceName: "alertmanagerpb.Alertmanager", - HandlerType: (*AlertmanagerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "HandleRequest", - Handler: _Alertmanager_HandleRequest_Handler, - }, - { - MethodName: "UpdateState", - Handler: _Alertmanager_UpdateState_Handler, - }, - { - MethodName: "ReadState", - Handler: _Alertmanager_ReadState_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "alertmanager.proto", -} - -func (m *UpdateStateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateStateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintAlertmanager(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x12 - } - if m.Status != 0 { - i = encodeVarintAlertmanager(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ReadStateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadStateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ReadStateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadStateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.State != nil { - { - size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAlertmanager(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintAlertmanager(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x12 - } - if m.Status != 0 { - i = encodeVarintAlertmanager(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintAlertmanager(dAtA []byte, offset int, v uint64) int { - offset -= sovAlertmanager(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *UpdateStateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != 0 { - n += 1 + sovAlertmanager(uint64(m.Status)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovAlertmanager(uint64(l)) - } - return n -} - -func (m *ReadStateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ReadStateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != 0 { - n += 1 + sovAlertmanager(uint64(m.Status)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovAlertmanager(uint64(l)) - } - if m.State != nil { - l = m.State.Size() - n += 1 + l + sovAlertmanager(uint64(l)) - } - return n -} - -func sovAlertmanager(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAlertmanager(x uint64) (n int) { - return sovAlertmanager(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *UpdateStateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateStateResponse{`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ReadStateRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadStateRequest{`, - `}`, - }, "") - return s -} -func (this *ReadStateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadStateResponse{`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `State:` + strings.Replace(fmt.Sprintf("%v", this.State), "FullState", "clusterpb.FullState", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringAlertmanager(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *UpdateStateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateStateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= UpdateStateStatus(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAlertmanager - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAlertmanager - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAlertmanager(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAlertmanager - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAlertmanager - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadStateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadStateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAlertmanager(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAlertmanager - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAlertmanager - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadStateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadStateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= ReadStateStatus(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAlertmanager - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAlertmanager - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAlertmanager - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAlertmanager - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.State == nil { - m.State = &clusterpb.FullState{} - } - if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAlertmanager(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAlertmanager - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAlertmanager - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAlertmanager(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAlertmanager - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthAlertmanager - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAlertmanager - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipAlertmanager(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthAlertmanager - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthAlertmanager = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAlertmanager = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.proto b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.proto deleted file mode 100644 index 2ff154ddf..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb/alertmanager.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package alertmanagerpb; -import "gogoproto/gogo.proto"; - -option go_package = "alertmanagerpb"; - -import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; -import "github.com/prometheus/alertmanager/cluster/clusterpb/cluster.proto"; - -// Alertmanager interface exposed to the Alertmanager Distributor and other Alertmanagers -service Alertmanager { - rpc HandleRequest(httpgrpc.HTTPRequest) returns(httpgrpc.HTTPResponse) {}; - rpc UpdateState(clusterpb.Part) returns (UpdateStateResponse) {}; - rpc ReadState(ReadStateRequest) returns (ReadStateResponse) {}; -} -enum UpdateStateStatus { - OK = 0; - MERGE_ERROR = 2; - USER_NOT_FOUND = 3; -} - -message UpdateStateResponse { - UpdateStateStatus status = 1; - string error = 2; -} - -message ReadStateRequest { -} - -enum ReadStateStatus { - READ_UNSPECIFIED = 0; - READ_OK = 1; - READ_ERROR = 2; - READ_USER_NOT_FOUND = 3; -} - -message ReadStateResponse { - // Alertmanager (clusterpb) types do not have Equal methods. - option (gogoproto.equal) = false; - - ReadStateStatus status = 1; - string error = 2; - clusterpb.FullState state = 3; -} - diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient/bucket_client.go deleted file mode 100644 index dfab30338..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient/bucket_client.go +++ /dev/null @@ -1,209 +0,0 @@ -package bucketclient - -import ( - "bytes" - "context" - "io/ioutil" - "strings" - "sync" - - "github.com/go-kit/log" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/util/concurrency" - "github.com/cortexproject/cortex/pkg/util/runutil" -) - -const ( - // The bucket prefix under which all tenants alertmanager configs are stored. - // Note that objects stored under this prefix follow the pattern: - // alerts/ - alertsPrefix = "alerts" - - // The bucket prefix under which other alertmanager state is stored. - // Note that objects stored under this prefix follow the pattern: - // alertmanager// - alertmanagerPrefix = "alertmanager" - - // The name of alertmanager full state objects (notification log + silences). - fullStateName = "fullstate" - - // How many users to load concurrently. - fetchConcurrency = 16 -) - -// BucketAlertStore is used to support the AlertStore interface against an object storage backend. It is implemented -// using the Thanos objstore.Bucket interface -type BucketAlertStore struct { - alertsBucket objstore.Bucket - amBucket objstore.Bucket - cfgProvider bucket.TenantConfigProvider - logger log.Logger -} - -func NewBucketAlertStore(bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *BucketAlertStore { - return &BucketAlertStore{ - alertsBucket: bucket.NewPrefixedBucketClient(bkt, alertsPrefix), - amBucket: bucket.NewPrefixedBucketClient(bkt, alertmanagerPrefix), - cfgProvider: cfgProvider, - logger: logger, - } -} - -// ListAllUsers implements alertstore.AlertStore. -func (s *BucketAlertStore) ListAllUsers(ctx context.Context) ([]string, error) { - var userIDs []string - - err := s.alertsBucket.Iter(ctx, "", func(key string) error { - userIDs = append(userIDs, key) - return nil - }) - - return userIDs, err -} - -// GetAlertConfigs implements alertstore.AlertStore. -func (s *BucketAlertStore) GetAlertConfigs(ctx context.Context, userIDs []string) (map[string]alertspb.AlertConfigDesc, error) { - var ( - cfgsMx = sync.Mutex{} - cfgs = make(map[string]alertspb.AlertConfigDesc, len(userIDs)) - ) - - err := concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(userIDs), fetchConcurrency, func(ctx context.Context, job interface{}) error { - userID := job.(string) - - cfg, err := s.getAlertConfig(ctx, userID) - if s.alertsBucket.IsObjNotFoundErr(err) { - return nil - } else if err != nil { - return errors.Wrapf(err, "failed to fetch alertmanager config for user %s", userID) - } - - cfgsMx.Lock() - cfgs[userID] = cfg - cfgsMx.Unlock() - - return nil - }) - - return cfgs, err -} - -// GetAlertConfig implements alertstore.AlertStore. -func (s *BucketAlertStore) GetAlertConfig(ctx context.Context, userID string) (alertspb.AlertConfigDesc, error) { - cfg, err := s.getAlertConfig(ctx, userID) - if s.alertsBucket.IsObjNotFoundErr(err) { - return cfg, alertspb.ErrNotFound - } - - return cfg, err -} - -// SetAlertConfig implements alertstore.AlertStore. -func (s *BucketAlertStore) SetAlertConfig(ctx context.Context, cfg alertspb.AlertConfigDesc) error { - cfgBytes, err := cfg.Marshal() - if err != nil { - return err - } - - return s.getUserBucket(cfg.User).Upload(ctx, cfg.User, bytes.NewBuffer(cfgBytes)) -} - -// DeleteAlertConfig implements alertstore.AlertStore. -func (s *BucketAlertStore) DeleteAlertConfig(ctx context.Context, userID string) error { - userBkt := s.getUserBucket(userID) - - err := userBkt.Delete(ctx, userID) - if userBkt.IsObjNotFoundErr(err) { - return nil - } - return err -} - -// ListUsersWithFullState implements alertstore.AlertStore. -func (s *BucketAlertStore) ListUsersWithFullState(ctx context.Context) ([]string, error) { - var userIDs []string - - err := s.amBucket.Iter(ctx, "", func(key string) error { - userIDs = append(userIDs, strings.TrimRight(key, "/")) - return nil - }) - - return userIDs, err -} - -// GetFullState implements alertstore.AlertStore. -func (s *BucketAlertStore) GetFullState(ctx context.Context, userID string) (alertspb.FullStateDesc, error) { - bkt := s.getAlertmanagerUserBucket(userID) - fs := alertspb.FullStateDesc{} - - err := s.get(ctx, bkt, fullStateName, &fs) - if s.amBucket.IsObjNotFoundErr(err) { - return fs, alertspb.ErrNotFound - } - - return fs, err -} - -// SetFullState implements alertstore.AlertStore. -func (s *BucketAlertStore) SetFullState(ctx context.Context, userID string, fs alertspb.FullStateDesc) error { - bkt := s.getAlertmanagerUserBucket(userID) - - fsBytes, err := fs.Marshal() - if err != nil { - return err - } - - return bkt.Upload(ctx, fullStateName, bytes.NewBuffer(fsBytes)) -} - -// DeleteFullState implements alertstore.AlertStore. -func (s *BucketAlertStore) DeleteFullState(ctx context.Context, userID string) error { - userBkt := s.getAlertmanagerUserBucket(userID) - - err := userBkt.Delete(ctx, fullStateName) - if userBkt.IsObjNotFoundErr(err) { - return nil - } - return err -} - -func (s *BucketAlertStore) getAlertConfig(ctx context.Context, userID string) (alertspb.AlertConfigDesc, error) { - config := alertspb.AlertConfigDesc{} - err := s.get(ctx, s.getUserBucket(userID), userID, &config) - return config, err -} - -func (s *BucketAlertStore) get(ctx context.Context, bkt objstore.Bucket, name string, msg proto.Message) error { - readCloser, err := bkt.Get(ctx, name) - if err != nil { - return err - } - - defer runutil.CloseWithLogOnErr(s.logger, readCloser, "close bucket reader") - - buf, err := ioutil.ReadAll(readCloser) - if err != nil { - return errors.Wrapf(err, "failed to read alertmanager config for user %s", name) - } - - err = proto.Unmarshal(buf, msg) - if err != nil { - return errors.Wrapf(err, "failed to deserialize alertmanager config for user %s", name) - } - - return nil -} - -func (s *BucketAlertStore) getUserBucket(userID string) objstore.Bucket { - // Inject server-side encryption based on the tenant config. - return bucket.NewSSEBucketClient(userID, s.alertsBucket, s.cfgProvider) -} - -func (s *BucketAlertStore) getAlertmanagerUserBucket(userID string) objstore.Bucket { - return bucket.NewUserBucketClient(userID, s.amBucket, s.cfgProvider).WithExpectedErrs(s.amBucket.IsObjNotFoundErr) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/config.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/config.go deleted file mode 100644 index 1198eb2ef..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/config.go +++ /dev/null @@ -1,82 +0,0 @@ -package alertstore - -import ( - "flag" - - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local" - "github.com/cortexproject/cortex/pkg/chunk/aws" - "github.com/cortexproject/cortex/pkg/chunk/azure" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/storage/bucket" -) - -// LegacyConfig configures the alertmanager storage backend using the legacy storage clients. -// TODO remove this legacy config in Cortex 1.11. -type LegacyConfig struct { - Type string `yaml:"type"` - ConfigDB client.Config `yaml:"configdb"` - - // Object Storage Configs - Azure azure.BlobStorageConfig `yaml:"azure"` - GCS gcp.GCSConfig `yaml:"gcs"` - S3 aws.S3Config `yaml:"s3"` - Local local.StoreConfig `yaml:"local"` -} - -// RegisterFlags registers flags. -func (cfg *LegacyConfig) RegisterFlags(f *flag.FlagSet) { - cfg.ConfigDB.RegisterFlagsWithPrefix("alertmanager.", f) - f.StringVar(&cfg.Type, "alertmanager.storage.type", configdb.Name, "Type of backend to use to store alertmanager configs. Supported values are: \"configdb\", \"gcs\", \"s3\", \"local\".") - - cfg.Azure.RegisterFlagsWithPrefix("alertmanager.storage.", f) - cfg.GCS.RegisterFlagsWithPrefix("alertmanager.storage.", f) - cfg.S3.RegisterFlagsWithPrefix("alertmanager.storage.", f) - cfg.Local.RegisterFlagsWithPrefix("alertmanager.storage.", f) -} - -// Validate config and returns error on failure -func (cfg *LegacyConfig) Validate() error { - if err := cfg.Azure.Validate(); err != nil { - return errors.Wrap(err, "invalid Azure Storage config") - } - if err := cfg.S3.Validate(); err != nil { - return errors.Wrap(err, "invalid S3 Storage config") - } - return nil -} - -// IsDefaults returns true if the storage options have not been set. -func (cfg *LegacyConfig) IsDefaults() bool { - return cfg.Type == configdb.Name && cfg.ConfigDB.ConfigsAPIURL.URL == nil -} - -// Config configures a the alertmanager storage backend. -type Config struct { - bucket.Config `yaml:",inline"` - ConfigDB client.Config `yaml:"configdb"` - Local local.StoreConfig `yaml:"local"` -} - -// RegisterFlags registers the backend storage config. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - prefix := "alertmanager-storage." - - cfg.ExtraBackends = []string{configdb.Name, local.Name} - cfg.ConfigDB.RegisterFlagsWithPrefix(prefix, f) - cfg.Local.RegisterFlagsWithPrefix(prefix, f) - cfg.RegisterFlagsWithPrefix(prefix, f) -} - -// IsFullStateSupported returns if the given configuration supports access to FullState objects. -func (cfg *Config) IsFullStateSupported() bool { - for _, backend := range bucket.SupportedBackends { - if cfg.Backend == backend { - return true - } - } - return false -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb/store.go deleted file mode 100644 index 880af40c0..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb/store.go +++ /dev/null @@ -1,146 +0,0 @@ -package configdb - -import ( - "context" - "errors" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/configs/userconfig" -) - -const ( - Name = "configdb" -) - -var ( - errReadOnly = errors.New("configdb alertmanager config storage is read-only") - errState = errors.New("configdb alertmanager storage does not support state persistency") -) - -// Store is a concrete implementation of RuleStore that sources rules from the config service -type Store struct { - configClient client.Client - since userconfig.ID - alertConfigs map[string]alertspb.AlertConfigDesc -} - -// NewStore constructs a Store -func NewStore(c client.Client) *Store { - return &Store{ - configClient: c, - since: 0, - alertConfigs: make(map[string]alertspb.AlertConfigDesc), - } -} - -// ListAllUsers implements alertstore.AlertStore. -func (c *Store) ListAllUsers(ctx context.Context) ([]string, error) { - configs, err := c.reloadConfigs(ctx) - if err != nil { - return nil, err - } - - userIDs := make([]string, 0, len(configs)) - for userID := range configs { - userIDs = append(userIDs, userID) - } - - return userIDs, nil -} - -// GetAlertConfigs implements alertstore.AlertStore. -func (c *Store) GetAlertConfigs(ctx context.Context, userIDs []string) (map[string]alertspb.AlertConfigDesc, error) { - // Refresh the local state. - configs, err := c.reloadConfigs(ctx) - if err != nil { - return nil, err - } - - filtered := make(map[string]alertspb.AlertConfigDesc, len(userIDs)) - for _, userID := range userIDs { - if cfg, ok := configs[userID]; ok { - filtered[userID] = cfg - } - } - - return filtered, nil -} - -// GetAlertConfig implements alertstore.AlertStore. -func (c *Store) GetAlertConfig(ctx context.Context, user string) (alertspb.AlertConfigDesc, error) { - // Refresh the local state. - configs, err := c.reloadConfigs(ctx) - if err != nil { - return alertspb.AlertConfigDesc{}, err - } - - cfg, exists := configs[user] - if !exists { - return alertspb.AlertConfigDesc{}, alertspb.ErrNotFound - } - - return cfg, nil -} - -// SetAlertConfig implements alertstore.AlertStore. -func (c *Store) SetAlertConfig(ctx context.Context, cfg alertspb.AlertConfigDesc) error { - return errReadOnly -} - -// DeleteAlertConfig implements alertstore.AlertStore. -func (c *Store) DeleteAlertConfig(ctx context.Context, user string) error { - return errReadOnly -} - -// ListUsersWithFullState implements alertstore.AlertStore. -func (c *Store) ListUsersWithFullState(ctx context.Context) ([]string, error) { - return nil, errState -} - -// GetFullState implements alertstore.AlertStore. -func (c *Store) GetFullState(ctx context.Context, user string) (alertspb.FullStateDesc, error) { - return alertspb.FullStateDesc{}, errState -} - -// SetFullState implements alertstore.AlertStore. -func (c *Store) SetFullState(ctx context.Context, user string, cfg alertspb.FullStateDesc) error { - return errState -} - -// DeleteFullState implements alertstore.AlertStore. -func (c *Store) DeleteFullState(ctx context.Context, user string) error { - return errState -} - -func (c *Store) reloadConfigs(ctx context.Context) (map[string]alertspb.AlertConfigDesc, error) { - configs, err := c.configClient.GetAlerts(ctx, c.since) - if err != nil { - return nil, err - } - - for user, cfg := range configs.Configs { - if cfg.IsDeleted() { - delete(c.alertConfigs, user) - continue - } - - var templates []*alertspb.TemplateDesc - for fn, template := range cfg.Config.TemplateFiles { - templates = append(templates, &alertspb.TemplateDesc{ - Filename: fn, - Body: template, - }) - } - - c.alertConfigs[user] = alertspb.AlertConfigDesc{ - User: user, - RawConfig: cfg.Config.AlertmanagerConfig, - Templates: templates, - } - } - - c.since = configs.GetLatestConfigID() - - return c.alertConfigs, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local/store.go deleted file mode 100644 index 9970df1e2..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local/store.go +++ /dev/null @@ -1,194 +0,0 @@ -package local - -import ( - "context" - "flag" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "github.com/prometheus/alertmanager/config" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" -) - -const ( - Name = "local" - templatesDir = "templates" -) - -var ( - errReadOnly = errors.New("local alertmanager config storage is read-only") - errState = errors.New("local alertmanager storage does not support state persistency") -) - -// StoreConfig configures a static file alertmanager store -type StoreConfig struct { - Path string `yaml:"path"` -} - -// RegisterFlags registers flags related to the alertmanager local storage. -func (cfg *StoreConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.Path, prefix+"local.path", "", "Path at which alertmanager configurations are stored.") -} - -// Store is used to load user alertmanager configs from a local disk -type Store struct { - cfg StoreConfig -} - -// NewStore returns a new file alert store. -func NewStore(cfg StoreConfig) (*Store, error) { - return &Store{cfg}, nil -} - -// ListAllUsers implements alertstore.AlertStore. -func (f *Store) ListAllUsers(_ context.Context) ([]string, error) { - configs, err := f.reloadConfigs() - if err != nil { - return nil, err - } - - userIDs := make([]string, 0, len(configs)) - for userID := range configs { - userIDs = append(userIDs, userID) - } - - return userIDs, nil -} - -// GetAlertConfigs implements alertstore.AlertStore. -func (f *Store) GetAlertConfigs(_ context.Context, userIDs []string) (map[string]alertspb.AlertConfigDesc, error) { - configs, err := f.reloadConfigs() - if err != nil { - return nil, err - } - - filtered := make(map[string]alertspb.AlertConfigDesc, len(userIDs)) - for _, userID := range userIDs { - if cfg, ok := configs[userID]; ok { - filtered[userID] = cfg - } - } - - return filtered, nil -} - -// GetAlertConfig implements alertstore.AlertStore. -func (f *Store) GetAlertConfig(_ context.Context, user string) (alertspb.AlertConfigDesc, error) { - cfgs, err := f.reloadConfigs() - if err != nil { - return alertspb.AlertConfigDesc{}, err - } - - cfg, exists := cfgs[user] - - if !exists { - return alertspb.AlertConfigDesc{}, alertspb.ErrNotFound - } - - return cfg, nil -} - -// SetAlertConfig implements alertstore.AlertStore. -func (f *Store) SetAlertConfig(_ context.Context, cfg alertspb.AlertConfigDesc) error { - return errReadOnly -} - -// DeleteAlertConfig implements alertstore.AlertStore. -func (f *Store) DeleteAlertConfig(_ context.Context, user string) error { - return errReadOnly -} - -// ListUsersWithFullState implements alertstore.AlertStore. -func (f *Store) ListUsersWithFullState(ctx context.Context) ([]string, error) { - return nil, errState -} - -// GetFullState implements alertstore.AlertStore. -func (f *Store) GetFullState(ctx context.Context, user string) (alertspb.FullStateDesc, error) { - return alertspb.FullStateDesc{}, errState -} - -// SetFullState implements alertstore.AlertStore. -func (f *Store) SetFullState(ctx context.Context, user string, cfg alertspb.FullStateDesc) error { - return errState -} - -// DeleteFullState implements alertstore.AlertStore. -func (f *Store) DeleteFullState(ctx context.Context, user string) error { - return errState -} - -func (f *Store) reloadConfigs() (map[string]alertspb.AlertConfigDesc, error) { - configs := map[string]alertspb.AlertConfigDesc{} - err := filepath.Walk(f.cfg.Path, func(path string, info os.FileInfo, err error) error { - if err != nil { - return errors.Wrapf(err, "unable to walk file path at %s", path) - } - - // Ignore files that are directories or not yaml files - ext := filepath.Ext(info.Name()) - if info.IsDir() || (ext != ".yml" && ext != ".yaml") { - return nil - } - - // Ensure the file is a valid Alertmanager Config. - _, err = config.LoadFile(path) - if err != nil { - return errors.Wrapf(err, "unable to load alertmanager config %s", path) - } - - // Load the file to be returned by the store. - content, err := ioutil.ReadFile(path) - if err != nil { - return errors.Wrapf(err, "unable to read alertmanager config %s", path) - } - - // The file name must correspond to the user tenant ID - user := strings.TrimSuffix(info.Name(), ext) - - // Load template files - userTemplateDir := filepath.Join(f.cfg.Path, user, templatesDir) - var templates []*alertspb.TemplateDesc - - if _, e := os.Stat(userTemplateDir); e == nil { - err = filepath.Walk(userTemplateDir, func(templatePath string, info os.FileInfo, err error) error { - if err != nil { - return errors.Wrapf(err, "unable to walk file path at %s", templatePath) - } - // Ignore files that are directories - if info.IsDir() { - return nil - } - content, err := os.ReadFile(templatePath) - if err != nil { - return errors.Wrapf(err, "unable to read alertmanager templates %s", templatePath) - } - - templates = append(templates, &alertspb.TemplateDesc{ - Body: string(content), - Filename: info.Name(), - }) - return nil - }) - - if err != nil { - return errors.Wrapf(err, "unable to list alertmanager templates: %s", userTemplateDir) - } - } else if !os.IsNotExist(e) { - return errors.Wrapf(e, "unable to read alertmanager templates %s", path) - } - - configs[user] = alertspb.AlertConfigDesc{ - User: user, - RawConfig: string(content), - Templates: templates, - } - return nil - }) - - return configs, err -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/objectclient/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/objectclient/store.go deleted file mode 100644 index b38542d11..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/objectclient/store.go +++ /dev/null @@ -1,163 +0,0 @@ -package objectclient - -import ( - "bytes" - "context" - "io/ioutil" - "path" - "strings" - "sync" - - "github.com/go-kit/log" - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util/concurrency" - "github.com/cortexproject/cortex/pkg/util/runutil" -) - -// Object Alert Storage Schema -// ======================= -// Object Name: "alerts/" -// Storage Format: Encoded AlertConfigDesc - -const ( - // The bucket prefix under which all tenants alertmanager configs are stored. - alertPrefix = "alerts/" - - // How many users to load concurrently. - fetchConcurrency = 16 -) - -var ( - errState = errors.New("legacy object alertmanager storage does not support state persistency") -) - -// AlertStore allows cortex alertmanager configs to be stored using an object store backend. -type AlertStore struct { - client chunk.ObjectClient - logger log.Logger -} - -// NewAlertStore returns a new AlertStore -func NewAlertStore(client chunk.ObjectClient, logger log.Logger) *AlertStore { - return &AlertStore{ - client: client, - logger: logger, - } -} - -// ListAllUsers implements alertstore.AlertStore. -func (a *AlertStore) ListAllUsers(ctx context.Context) ([]string, error) { - objs, _, err := a.client.List(ctx, alertPrefix, "") - if err != nil { - return nil, err - } - - userIDs := make([]string, 0, len(objs)) - for _, obj := range objs { - userID := strings.TrimPrefix(obj.Key, alertPrefix) - userIDs = append(userIDs, userID) - } - - return userIDs, nil -} - -// GetAlertConfigs implements alertstore.AlertStore. -func (a *AlertStore) GetAlertConfigs(ctx context.Context, userIDs []string) (map[string]alertspb.AlertConfigDesc, error) { - var ( - cfgsMx = sync.Mutex{} - cfgs = make(map[string]alertspb.AlertConfigDesc, len(userIDs)) - ) - - err := concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(userIDs), fetchConcurrency, func(ctx context.Context, job interface{}) error { - userID := job.(string) - - cfg, err := a.getAlertConfig(ctx, path.Join(alertPrefix, userID)) - if errors.Is(err, chunk.ErrStorageObjectNotFound) { - return nil - } else if err != nil { - return errors.Wrapf(err, "failed to fetch alertmanager config for user %s", userID) - } - - cfgsMx.Lock() - cfgs[userID] = cfg - cfgsMx.Unlock() - - return nil - }) - - return cfgs, err -} - -func (a *AlertStore) getAlertConfig(ctx context.Context, key string) (alertspb.AlertConfigDesc, error) { - readCloser, err := a.client.GetObject(ctx, key) - if err != nil { - return alertspb.AlertConfigDesc{}, err - } - - defer runutil.CloseWithLogOnErr(a.logger, readCloser, "close alert config reader") - - buf, err := ioutil.ReadAll(readCloser) - if err != nil { - return alertspb.AlertConfigDesc{}, errors.Wrapf(err, "failed to read alertmanager config %s", key) - } - - config := alertspb.AlertConfigDesc{} - err = config.Unmarshal(buf) - if err != nil { - return alertspb.AlertConfigDesc{}, errors.Wrapf(err, "failed to unmarshal alertmanager config %s", key) - } - - return config, nil -} - -// GetAlertConfig implements alertstore.AlertStore. -func (a *AlertStore) GetAlertConfig(ctx context.Context, user string) (alertspb.AlertConfigDesc, error) { - cfg, err := a.getAlertConfig(ctx, path.Join(alertPrefix, user)) - if err == chunk.ErrStorageObjectNotFound { - return cfg, alertspb.ErrNotFound - } - - return cfg, err -} - -// SetAlertConfig implements alertstore.AlertStore. -func (a *AlertStore) SetAlertConfig(ctx context.Context, cfg alertspb.AlertConfigDesc) error { - cfgBytes, err := cfg.Marshal() - if err != nil { - return err - } - - return a.client.PutObject(ctx, path.Join(alertPrefix, cfg.User), bytes.NewReader(cfgBytes)) -} - -// DeleteAlertConfig implements alertstore.AlertStore. -func (a *AlertStore) DeleteAlertConfig(ctx context.Context, user string) error { - err := a.client.DeleteObject(ctx, path.Join(alertPrefix, user)) - if err == chunk.ErrStorageObjectNotFound { - return nil - } - return err -} - -// ListUsersWithFullState implements alertstore.AlertStore. -func (a *AlertStore) ListUsersWithFullState(ctx context.Context) ([]string, error) { - return nil, errState -} - -// GetFullState implements alertstore.AlertStore. -func (a *AlertStore) GetFullState(ctx context.Context, user string) (alertspb.FullStateDesc, error) { - return alertspb.FullStateDesc{}, errState -} - -// SetFullState implements alertstore.AlertStore. -func (a *AlertStore) SetFullState(ctx context.Context, user string, cfg alertspb.FullStateDesc) error { - return errState -} - -// DeleteFullState implements alertstore.AlertStore. -func (a *AlertStore) DeleteFullState(ctx context.Context, user string) error { - return errState -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/store.go deleted file mode 100644 index 7a713d00d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertstore/store.go +++ /dev/null @@ -1,111 +0,0 @@ -package alertstore - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/objectclient" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/aws" - "github.com/cortexproject/cortex/pkg/chunk/azure" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/storage/bucket" -) - -// AlertStore stores and configures users rule configs -type AlertStore interface { - // ListAllUsers returns all users with alertmanager configuration. - ListAllUsers(ctx context.Context) ([]string, error) - - // GetAlertConfigs loads and returns the alertmanager configuration for given users. - // If any of the provided users has no configuration, then this function does not return an - // error but the returned configs will not include the missing users. - GetAlertConfigs(ctx context.Context, userIDs []string) (map[string]alertspb.AlertConfigDesc, error) - - // GetAlertConfig loads and returns the alertmanager configuration for the given user. - GetAlertConfig(ctx context.Context, user string) (alertspb.AlertConfigDesc, error) - - // SetAlertConfig stores the alertmanager configuration for an user. - SetAlertConfig(ctx context.Context, cfg alertspb.AlertConfigDesc) error - - // DeleteAlertConfig deletes the alertmanager configuration for an user. - // If configuration for the user doesn't exist, no error is reported. - DeleteAlertConfig(ctx context.Context, user string) error - - // ListUsersWithFullState returns the list of users which have had state written. - ListUsersWithFullState(ctx context.Context) ([]string, error) - - // GetFullState loads and returns the alertmanager state for the given user. - GetFullState(ctx context.Context, user string) (alertspb.FullStateDesc, error) - - // SetFullState stores the alertmanager state for the given user. - SetFullState(ctx context.Context, user string, fs alertspb.FullStateDesc) error - - // DeleteFullState deletes the alertmanager state for an user. - // If state for the user doesn't exist, no error is reported. - DeleteFullState(ctx context.Context, user string) error -} - -// NewLegacyAlertStore returns a new alertmanager storage backend poller and store -func NewLegacyAlertStore(cfg LegacyConfig, logger log.Logger) (AlertStore, error) { - if cfg.Type == configdb.Name { - c, err := client.New(cfg.ConfigDB) - if err != nil { - return nil, err - } - return configdb.NewStore(c), nil - } - - if cfg.Type == local.Name { - return local.NewStore(cfg.Local) - } - - // Create the object store client. - var client chunk.ObjectClient - var err error - switch cfg.Type { - case "azure": - client, err = azure.NewBlobStorage(&cfg.Azure) - case "gcs": - client, err = gcp.NewGCSObjectClient(context.Background(), cfg.GCS) - case "s3": - client, err = aws.NewS3ObjectClient(cfg.S3) - default: - return nil, fmt.Errorf("unrecognized alertmanager storage backend %v, choose one of: azure, configdb, gcs, local, s3", cfg.Type) - } - if err != nil { - return nil, err - } - - return objectclient.NewAlertStore(client, logger), nil -} - -// NewAlertStore returns a alertmanager store backend client based on the provided cfg. -func NewAlertStore(ctx context.Context, cfg Config, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) (AlertStore, error) { - if cfg.Backend == configdb.Name { - c, err := client.New(cfg.ConfigDB) - if err != nil { - return nil, err - } - return configdb.NewStore(c), nil - } - - if cfg.Backend == local.Name { - return local.NewStore(cfg.Local) - } - - bucketClient, err := bucket.NewClient(ctx, cfg.Config, "alertmanager-storage", logger, reg) - if err != nil { - return nil, err - } - - return bucketclient.NewBucketAlertStore(bucketClient, cfgProvider, logger), nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go deleted file mode 100644 index 905341204..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go +++ /dev/null @@ -1,470 +0,0 @@ -package alertmanager - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "reflect" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/alertmanager/config" - "github.com/prometheus/alertmanager/template" - commoncfg "github.com/prometheus/common/config" - "gopkg.in/yaml.v2" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/concurrency" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -const ( - errMarshallingYAML = "error marshalling YAML Alertmanager config" - errValidatingConfig = "error validating Alertmanager config" - errReadingConfiguration = "unable to read the Alertmanager config" - errStoringConfiguration = "unable to store the Alertmanager config" - errDeletingConfiguration = "unable to delete the Alertmanager config" - errNoOrgID = "unable to determine the OrgID" - errListAllUser = "unable to list the Alertmanager users" - errConfigurationTooBig = "Alertmanager configuration is too big, limit: %d bytes" - errTooManyTemplates = "too many templates in the configuration: %d (limit: %d)" - errTemplateTooBig = "template %s is too big: %d bytes (limit: %d bytes)" - - fetchConcurrency = 16 -) - -var ( - errPasswordFileNotAllowed = errors.New("setting password_file, bearer_token_file and credentials_file is not allowed") - errOAuth2SecretFileNotAllowed = errors.New("setting OAuth2 client_secret_file is not allowed") - errTLSFileNotAllowed = errors.New("setting TLS ca_file, cert_file and key_file is not allowed") - errSlackAPIURLFileNotAllowed = errors.New("setting Slack api_url_file and global slack_api_url_file is not allowed") - errVictorOpsAPIKeyFileNotAllowed = errors.New("setting VictorOps api_key_file is not allowed") - errOpsGenieAPIKeyFileNotAllowed = errors.New("setting OpsGenie api_key_file is not allowed") -) - -// UserConfig is used to communicate a users alertmanager configs -type UserConfig struct { - TemplateFiles map[string]string `yaml:"template_files"` - AlertmanagerConfig string `yaml:"alertmanager_config"` -} - -func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http.Request) { - logger := util_log.WithContext(r.Context(), am.logger) - - userID, err := tenant.TenantID(r.Context()) - if err != nil { - level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) - return - } - - cfg, err := am.store.GetAlertConfig(r.Context(), userID) - if err != nil { - if err == alertspb.ErrNotFound { - http.Error(w, err.Error(), http.StatusNotFound) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - return - } - - d, err := yaml.Marshal(&UserConfig{ - TemplateFiles: alertspb.ParseTemplates(cfg), - AlertmanagerConfig: cfg.RawConfig, - }) - - if err != nil { - level.Error(logger).Log("msg", errMarshallingYAML, "err", err, "user", userID) - http.Error(w, fmt.Sprintf("%s: %s", errMarshallingYAML, err.Error()), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/yaml") - if _, err := w.Write(d); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http.Request) { - logger := util_log.WithContext(r.Context(), am.logger) - userID, err := tenant.TenantID(r.Context()) - if err != nil { - level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) - return - } - - var input io.Reader - maxConfigSize := am.limits.AlertmanagerMaxConfigSize(userID) - if maxConfigSize > 0 { - // LimitReader will return EOF after reading specified number of bytes. To check if - // we have read too many bytes, allow one extra byte. - input = io.LimitReader(r.Body, int64(maxConfigSize)+1) - } else { - input = r.Body - } - - payload, err := ioutil.ReadAll(input) - if err != nil { - level.Error(logger).Log("msg", errReadingConfiguration, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errReadingConfiguration, err.Error()), http.StatusBadRequest) - return - } - - if maxConfigSize > 0 && len(payload) > maxConfigSize { - msg := fmt.Sprintf(errConfigurationTooBig, maxConfigSize) - level.Warn(logger).Log("msg", msg) - http.Error(w, msg, http.StatusBadRequest) - return - } - - cfg := &UserConfig{} - err = yaml.Unmarshal(payload, cfg) - if err != nil { - level.Error(logger).Log("msg", errMarshallingYAML, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errMarshallingYAML, err.Error()), http.StatusBadRequest) - return - } - - cfgDesc := alertspb.ToProto(cfg.AlertmanagerConfig, cfg.TemplateFiles, userID) - if err := validateUserConfig(logger, cfgDesc, am.limits, userID); err != nil { - level.Warn(logger).Log("msg", errValidatingConfig, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errValidatingConfig, err.Error()), http.StatusBadRequest) - return - } - - err = am.store.SetAlertConfig(r.Context(), cfgDesc) - if err != nil { - level.Error(logger).Log("msg", errStoringConfiguration, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errStoringConfiguration, err.Error()), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusCreated) -} - -// DeleteUserConfig is exposed via user-visible API (if enabled, uses DELETE method), but also as an internal endpoint using POST method. -// Note that if no config exists for a user, StatusOK is returned. -func (am *MultitenantAlertmanager) DeleteUserConfig(w http.ResponseWriter, r *http.Request) { - logger := util_log.WithContext(r.Context(), am.logger) - userID, err := tenant.TenantID(r.Context()) - if err != nil { - level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) - return - } - - err = am.store.DeleteAlertConfig(r.Context(), userID) - if err != nil { - level.Error(logger).Log("msg", errDeletingConfiguration, "err", err.Error()) - http.Error(w, fmt.Sprintf("%s: %s", errDeletingConfiguration, err.Error()), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) -} - -// Partially copied from: https://github.com/prometheus/alertmanager/blob/8e861c646bf67599a1704fc843c6a94d519ce312/cli/check_config.go#L65-L96 -func validateUserConfig(logger log.Logger, cfg alertspb.AlertConfigDesc, limits Limits, user string) error { - // We don't have a valid use case for empty configurations. If a tenant does not have a - // configuration set and issue a request to the Alertmanager, we'll a) upload an empty - // config and b) immediately start an Alertmanager instance for them if a fallback - // configuration is provisioned. - if cfg.RawConfig == "" { - return fmt.Errorf("configuration provided is empty, if you'd like to remove your configuration please use the delete configuration endpoint") - } - - amCfg, err := config.Load(cfg.RawConfig) - if err != nil { - return err - } - - // Validate the config recursively scanning it. - if err := validateAlertmanagerConfig(amCfg); err != nil { - return err - } - - // Validate templates referenced in the alertmanager config. - for _, name := range amCfg.Templates { - if err := validateTemplateFilename(name); err != nil { - return err - } - } - - // Check template limits. - if l := limits.AlertmanagerMaxTemplatesCount(user); l > 0 && len(cfg.Templates) > l { - return fmt.Errorf(errTooManyTemplates, len(cfg.Templates), l) - } - - if maxSize := limits.AlertmanagerMaxTemplateSize(user); maxSize > 0 { - for _, tmpl := range cfg.Templates { - if size := len(tmpl.GetBody()); size > maxSize { - return fmt.Errorf(errTemplateTooBig, tmpl.GetFilename(), size, maxSize) - } - } - } - - // Validate template files. - for _, tmpl := range cfg.Templates { - if err := validateTemplateFilename(tmpl.Filename); err != nil { - return err - } - } - - // Create templates on disk in a temporary directory. - // Note: This means the validation will succeed if we can write to tmp but - // not to configured data dir, and on the flipside, it'll fail if we can't write - // to tmpDir. Ignoring both cases for now as they're ultra rare but will revisit if - // we see this in the wild. - userTempDir, err := ioutil.TempDir("", "validate-config-"+cfg.User) - if err != nil { - return err - } - defer os.RemoveAll(userTempDir) - - for _, tmpl := range cfg.Templates { - templateFilepath, err := safeTemplateFilepath(userTempDir, tmpl.Filename) - if err != nil { - level.Error(logger).Log("msg", "unable to create template file path", "err", err, "user", cfg.User) - return err - } - - if _, err = storeTemplateFile(templateFilepath, tmpl.Body); err != nil { - level.Error(logger).Log("msg", "unable to store template file", "err", err, "user", cfg.User) - return fmt.Errorf("unable to store template file '%s'", tmpl.Filename) - } - } - - templateFiles := make([]string, len(amCfg.Templates)) - for i, t := range amCfg.Templates { - templateFiles[i] = filepath.Join(userTempDir, t) - } - - _, err = template.FromGlobs(templateFiles...) - if err != nil { - return err - } - - // Note: Not validating the MultitenantAlertmanager.transformConfig function as that - // that function shouldn't break configuration. Only way it can fail is if the base - // autoWebhookURL itself is broken. In that case, I would argue, we should accept the config - // not reject it. - - return nil -} - -func (am *MultitenantAlertmanager) ListAllConfigs(w http.ResponseWriter, r *http.Request) { - logger := util_log.WithContext(r.Context(), am.logger) - userIDs, err := am.store.ListAllUsers(r.Context()) - if err != nil { - level.Error(logger).Log("msg", "failed to list users of alertmanager", "err", err) - http.Error(w, fmt.Sprintf("%s: %s", errListAllUser, err.Error()), http.StatusInternalServerError) - return - } - - done := make(chan struct{}) - iter := make(chan interface{}) - - go func() { - util.StreamWriteYAMLResponse(w, iter, logger) - close(done) - }() - - err = concurrency.ForEachUser(r.Context(), userIDs, fetchConcurrency, func(ctx context.Context, userID string) error { - cfg, err := am.store.GetAlertConfig(ctx, userID) - if errors.Is(err, alertspb.ErrNotFound) { - return nil - } else if err != nil { - return errors.Wrapf(err, "failed to fetch alertmanager config for user %s", userID) - } - data := map[string]*UserConfig{ - userID: { - TemplateFiles: alertspb.ParseTemplates(cfg), - AlertmanagerConfig: cfg.RawConfig, - }, - } - - select { - case iter <- data: - case <-done: // stop early, if sending response has already finished - } - - return nil - }) - if err != nil { - level.Error(logger).Log("msg", "failed to list all alertmanager configs", "err", err) - } - close(iter) - <-done -} - -// validateAlertmanagerConfig recursively scans the input config looking for data types for which -// we have a specific validation and, whenever encountered, it runs their validation. Returns the -// first error or nil if validation succeeds. -func validateAlertmanagerConfig(cfg interface{}) error { - v := reflect.ValueOf(cfg) - t := v.Type() - - // Skip invalid, the zero value or a nil pointer (checked by zero value). - if !v.IsValid() || v.IsZero() { - return nil - } - - // If the input config is a pointer then we need to get its value. - // At this point the pointer value can't be nil. - if v.Kind() == reflect.Ptr { - v = v.Elem() - t = v.Type() - } - - // Check if the input config is a data type for which we have a specific validation. - // At this point the value can't be a pointer anymore. - switch t { - case reflect.TypeOf(config.GlobalConfig{}): - if err := validateGlobalConfig(v.Interface().(config.GlobalConfig)); err != nil { - return err - } - - case reflect.TypeOf(commoncfg.HTTPClientConfig{}): - if err := validateReceiverHTTPConfig(v.Interface().(commoncfg.HTTPClientConfig)); err != nil { - return err - } - - case reflect.TypeOf(config.OpsGenieConfig{}): - if err := validateOpsGenieConfig(v.Interface().(config.OpsGenieConfig)); err != nil { - return err - } - - case reflect.TypeOf(commoncfg.TLSConfig{}): - if err := validateReceiverTLSConfig(v.Interface().(commoncfg.TLSConfig)); err != nil { - return err - } - - case reflect.TypeOf(config.SlackConfig{}): - if err := validateSlackConfig(v.Interface().(config.SlackConfig)); err != nil { - return err - } - - case reflect.TypeOf(config.VictorOpsConfig{}): - if err := validateVictorOpsConfig(v.Interface().(config.VictorOpsConfig)); err != nil { - return err - } - } - - // If the input config is a struct, recursively iterate on all fields. - if t.Kind() == reflect.Struct { - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - fieldValue := v.FieldByIndex(field.Index) - - // Skip any field value which can't be converted to interface (eg. primitive types). - if fieldValue.CanInterface() { - if err := validateAlertmanagerConfig(fieldValue.Interface()); err != nil { - return err - } - } - } - } - - if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { - for i := 0; i < v.Len(); i++ { - fieldValue := v.Index(i) - - // Skip any field value which can't be converted to interface (eg. primitive types). - if fieldValue.CanInterface() { - if err := validateAlertmanagerConfig(fieldValue.Interface()); err != nil { - return err - } - } - } - } - - if t.Kind() == reflect.Map { - for _, key := range v.MapKeys() { - fieldValue := v.MapIndex(key) - - // Skip any field value which can't be converted to interface (eg. primitive types). - if fieldValue.CanInterface() { - if err := validateAlertmanagerConfig(fieldValue.Interface()); err != nil { - return err - } - } - } - } - - return nil -} - -// validateReceiverHTTPConfig validates the HTTP config and returns an error if it contains -// settings not allowed by Cortex. -func validateReceiverHTTPConfig(cfg commoncfg.HTTPClientConfig) error { - if cfg.BasicAuth != nil && cfg.BasicAuth.PasswordFile != "" { - return errPasswordFileNotAllowed - } - if cfg.Authorization != nil && cfg.Authorization.CredentialsFile != "" { - return errPasswordFileNotAllowed - } - if cfg.BearerTokenFile != "" { - return errPasswordFileNotAllowed - } - if cfg.OAuth2 != nil && cfg.OAuth2.ClientSecretFile != "" { - return errOAuth2SecretFileNotAllowed - } - return validateReceiverTLSConfig(cfg.TLSConfig) -} - -// validateReceiverTLSConfig validates the TLS config and returns an error if it contains -// settings not allowed by Cortex. -func validateReceiverTLSConfig(cfg commoncfg.TLSConfig) error { - if cfg.CAFile != "" || cfg.CertFile != "" || cfg.KeyFile != "" { - return errTLSFileNotAllowed - } - return nil -} - -// validateGlobalConfig validates the Global config and returns an error if it contains -// settings now allowed by Cortex. -func validateGlobalConfig(cfg config.GlobalConfig) error { - if cfg.OpsGenieAPIKeyFile != "" { - return errOpsGenieAPIKeyFileNotAllowed - } - if cfg.SlackAPIURLFile != "" { - return errSlackAPIURLFileNotAllowed - } - return nil -} - -// validateOpsGenieConfig validates the OpsGenie config and returns an error if it contains -// settings now allowed by Cortex. -func validateOpsGenieConfig(cfg config.OpsGenieConfig) error { - if cfg.APIKeyFile != "" { - return errOpsGenieAPIKeyFileNotAllowed - } - return nil -} - -// validateSlackConfig validates the Slack config and returns an error if it contains -// settings now allowed by Cortex. -func validateSlackConfig(cfg config.SlackConfig) error { - if cfg.APIURLFile != "" { - return errSlackAPIURLFileNotAllowed - } - return nil -} - -// validateVictorOpsConfig validates the VictorOps config and returns an error if it contains -// settings now allowed by Cortex. -func validateVictorOpsConfig(cfg config.VictorOpsConfig) error { - if cfg.APIKeyFile != "" { - return errVictorOpsAPIKeyFileNotAllowed - } - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go deleted file mode 100644 index fecef294d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/distributor.go +++ /dev/null @@ -1,340 +0,0 @@ -package alertmanager - -import ( - "context" - "hash/fnv" - "io/ioutil" - "math/rand" - "net/http" - "path" - "strings" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" - - "github.com/cortexproject/cortex/pkg/alertmanager/merger" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// Distributor forwards requests to individual alertmanagers. -type Distributor struct { - services.Service - - cfg ClientConfig - maxRecvMsgSize int64 - requestsInFlight sync.WaitGroup - - alertmanagerRing ring.ReadRing - alertmanagerClientsPool ClientsPool - - logger log.Logger -} - -// NewDistributor constructs a new Distributor -func NewDistributor(cfg ClientConfig, maxRecvMsgSize int64, alertmanagersRing *ring.Ring, alertmanagerClientsPool ClientsPool, logger log.Logger, reg prometheus.Registerer) (d *Distributor, err error) { - if alertmanagerClientsPool == nil { - alertmanagerClientsPool = newAlertmanagerClientsPool(client.NewRingServiceDiscovery(alertmanagersRing), cfg, logger, reg) - } - - d = &Distributor{ - cfg: cfg, - logger: logger, - maxRecvMsgSize: maxRecvMsgSize, - alertmanagerRing: alertmanagersRing, - alertmanagerClientsPool: alertmanagerClientsPool, - } - - d.Service = services.NewBasicService(nil, d.running, nil) - return d, nil -} - -func (d *Distributor) running(ctx context.Context) error { - <-ctx.Done() - d.requestsInFlight.Wait() - return nil -} - -// IsPathSupported returns true if the given route is currently supported by the Distributor. -func (d *Distributor) IsPathSupported(p string) bool { - // API can be found at https://petstore.swagger.io/?url=https://raw.githubusercontent.com/prometheus/alertmanager/master/api/v2/openapi.yaml. - isQuorumReadPath, _ := d.isQuorumReadPath(p) - return d.isQuorumWritePath(p) || d.isUnaryWritePath(p) || d.isUnaryDeletePath(p) || d.isUnaryReadPath(p) || isQuorumReadPath -} - -func (d *Distributor) isQuorumWritePath(p string) bool { - return strings.HasSuffix(p, "/alerts") -} - -func (d *Distributor) isUnaryWritePath(p string) bool { - return strings.HasSuffix(p, "/silences") -} - -func (d *Distributor) isUnaryDeletePath(p string) bool { - return strings.HasSuffix(path.Dir(p), "/silence") -} - -func (d *Distributor) isQuorumReadPath(p string) (bool, merger.Merger) { - if strings.HasSuffix(p, "/v1/alerts") { - return true, merger.V1Alerts{} - } - if strings.HasSuffix(p, "/v2/alerts") { - return true, merger.V2Alerts{} - } - if strings.HasSuffix(p, "/v2/alerts/groups") { - return true, merger.V2AlertGroups{} - } - if strings.HasSuffix(p, "/v1/silences") { - return true, merger.V1Silences{} - } - if strings.HasSuffix(path.Dir(p), "/v1/silence") { - return true, merger.V1SilenceID{} - } - if strings.HasSuffix(p, "/v2/silences") { - return true, merger.V2Silences{} - } - if strings.HasSuffix(path.Dir(p), "/v2/silence") { - return true, merger.V2SilenceID{} - } - return false, nil -} - -func (d *Distributor) isUnaryReadPath(p string) bool { - return strings.HasSuffix(p, "/status") || - strings.HasSuffix(p, "/receivers") -} - -// DistributeRequest shards the writes and returns as soon as the quorum is satisfied. -// In case of reads, it proxies the request to one of the alertmanagers. -// DistributeRequest assumes that the caller has verified IsPathSupported returns -// true for the route. -func (d *Distributor) DistributeRequest(w http.ResponseWriter, r *http.Request) { - d.requestsInFlight.Add(1) - defer d.requestsInFlight.Done() - - userID, err := tenant.TenantID(r.Context()) - if err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - logger := util_log.WithContext(r.Context(), d.logger) - - if r.Method == http.MethodPost { - if d.isQuorumWritePath(r.URL.Path) { - d.doQuorum(userID, w, r, logger, merger.Noop{}) - return - } - if d.isUnaryWritePath(r.URL.Path) { - d.doUnary(userID, w, r, logger) - return - } - } - if r.Method == http.MethodDelete { - if d.isUnaryDeletePath(r.URL.Path) { - d.doUnary(userID, w, r, logger) - return - } - } - if r.Method == http.MethodGet || r.Method == http.MethodHead { - if ok, m := d.isQuorumReadPath(r.URL.Path); ok { - d.doQuorum(userID, w, r, logger, m) - return - } - if d.isUnaryReadPath(r.URL.Path) { - d.doUnary(userID, w, r, logger) - return - } - } - - http.Error(w, "route not supported by distributor", http.StatusNotFound) -} - -func (d *Distributor) doQuorum(userID string, w http.ResponseWriter, r *http.Request, logger log.Logger, m merger.Merger) { - var body []byte - var err error - if r.Body != nil { - body, err = ioutil.ReadAll(http.MaxBytesReader(w, r.Body, d.maxRecvMsgSize)) - if err != nil { - if util.IsRequestBodyTooLarge(err) { - http.Error(w, "Request body too large", http.StatusRequestEntityTooLarge) - return - } - level.Error(logger).Log("msg", "failed to read the request body during write", "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - } - - var responses []*httpgrpc.HTTPResponse - var responsesMtx sync.Mutex - grpcHeaders := httpToHttpgrpcHeaders(r.Header) - err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error { - // Use a background context to make sure all alertmanagers get the request even if we return early. - localCtx := user.InjectOrgID(context.Background(), userID) - sp, localCtx := opentracing.StartSpanFromContext(localCtx, "Distributor.doQuorum") - defer sp.Finish() - - resp, err := d.doRequest(localCtx, am, &httpgrpc.HTTPRequest{ - Method: r.Method, - Url: r.RequestURI, - Body: body, - Headers: grpcHeaders, - }) - if err != nil { - return err - } - - if resp.Code/100 != 2 { - return httpgrpc.ErrorFromHTTPResponse(resp) - } - - responsesMtx.Lock() - responses = append(responses, resp) - responsesMtx.Unlock() - - return nil - }, func() {}) - - if err != nil { - respondFromError(err, w, logger) - return - } - - responsesMtx.Lock() // Another request might be ongoing after quorum. - resps := responses - responsesMtx.Unlock() - - if len(resps) > 0 { - respondFromMultipleHTTPGRPCResponses(w, logger, resps, m) - } else { - // This should not happen. - level.Error(logger).Log("msg", "distributor did not receive any response from alertmanagers, but there were no errors") - w.WriteHeader(http.StatusInternalServerError) - } -} - -func (d *Distributor) doUnary(userID string, w http.ResponseWriter, r *http.Request, logger log.Logger) { - key := shardByUser(userID) - replicationSet, err := d.alertmanagerRing.Get(key, RingOp, nil, nil, nil) - if err != nil { - level.Error(logger).Log("msg", "failed to get replication set from the ring", "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - body, err := ioutil.ReadAll(http.MaxBytesReader(w, r.Body, d.maxRecvMsgSize)) - if err != nil { - if util.IsRequestBodyTooLarge(err) { - http.Error(w, "Request body too large", http.StatusRequestEntityTooLarge) - return - } - level.Error(logger).Log("msg", "failed to read the request body during read", "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - req := &httpgrpc.HTTPRequest{ - Method: r.Method, - Url: r.RequestURI, - Body: body, - Headers: httpToHttpgrpcHeaders(r.Header), - } - - sp, ctx := opentracing.StartSpanFromContext(r.Context(), "Distributor.doUnary") - defer sp.Finish() - // Until we have a mechanism to combine the results from multiple alertmanagers, - // we forward the request to only only of the alertmanagers. - amDesc := replicationSet.Instances[rand.Intn(len(replicationSet.Instances))] - resp, err := d.doRequest(ctx, amDesc, req) - if err != nil { - respondFromError(err, w, logger) - return - } - - respondFromHTTPGRPCResponse(w, resp) -} - -func respondFromError(err error, w http.ResponseWriter, logger log.Logger) { - httpResp, ok := httpgrpc.HTTPResponseFromError(errors.Cause(err)) - if !ok { - level.Error(logger).Log("msg", "failed to process the request to the alertmanager", "err", err) - http.Error(w, "Failed to process the request to the alertmanager", http.StatusInternalServerError) - return - } - respondFromHTTPGRPCResponse(w, httpResp) -} - -func respondFromHTTPGRPCResponse(w http.ResponseWriter, httpResp *httpgrpc.HTTPResponse) { - for _, h := range httpResp.Headers { - for _, v := range h.Values { - w.Header().Add(h.Key, v) - } - } - w.WriteHeader(int(httpResp.Code)) - w.Write(httpResp.Body) //nolint -} - -func (d *Distributor) doRequest(ctx context.Context, am ring.InstanceDesc, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - ctx, cancel := context.WithTimeout(ctx, d.cfg.RemoteTimeout) - defer cancel() - amClient, err := d.alertmanagerClientsPool.GetClientFor(am.Addr) - if err != nil { - return nil, errors.Wrapf(err, "failed to get alertmanager client from pool (alertmanager address: %s)", am.Addr) - } - - return amClient.HandleRequest(ctx, req) -} - -func shardByUser(userID string) uint32 { - ringHasher := fnv.New32a() - // Hasher never returns err. - _, _ = ringHasher.Write([]byte(userID)) - return ringHasher.Sum32() -} - -func httpToHttpgrpcHeaders(hs http.Header) []*httpgrpc.Header { - result := make([]*httpgrpc.Header, 0, len(hs)) - for k, vs := range hs { - result = append(result, &httpgrpc.Header{ - Key: k, - Values: vs, - }) - } - return result -} - -func respondFromMultipleHTTPGRPCResponses(w http.ResponseWriter, logger log.Logger, responses []*httpgrpc.HTTPResponse, merger merger.Merger) { - bodies := make([][]byte, len(responses)) - for i, r := range responses { - bodies[i] = r.Body - } - - body, err := merger.MergeResponses(bodies) - if err != nil { - level.Error(logger).Log("msg", "failed to merge responses for request", "err", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - // It is assumed by using this function, the caller knows that the responses it receives - // have already been checked for success or failure, and that the headers will always - // match due to the nature of the request. If this is not the case, a different merge - // function should be implemented to cope with the differing responses. - response := &httpgrpc.HTTPResponse{ - Code: responses[0].Code, - Headers: responses[0].Headers, - Body: body, - } - - respondFromHTTPGRPCResponse(w, response) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go deleted file mode 100644 index 54e420701..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go +++ /dev/null @@ -1,28 +0,0 @@ -package alertmanager - -import ( - "github.com/cortexproject/cortex/pkg/ring" -) - -func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { - // When we initialize the alertmanager instance in the ring we want to start from - // a clean situation, so whatever is the state we set it JOINING, while we keep existing - // tokens (if any). - var tokens []uint32 - if instanceExists { - tokens = instanceDesc.GetTokens() - } - - _, takenTokens := ringDesc.TokensFor(instanceID) - newTokens := ring.GenerateTokens(RingNumTokens-len(tokens), takenTokens) - - // Tokens sorting will be enforced by the parent caller. - tokens = append(tokens, newTokens...) - - return ring.JOINING, tokens -} - -func (r *MultitenantAlertmanager) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} -func (r *MultitenantAlertmanager) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (r *MultitenantAlertmanager) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/merger.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/merger.go deleted file mode 100644 index f91913c46..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/merger.go +++ /dev/null @@ -1,18 +0,0 @@ -package merger - -// Merger represents logic for merging response bodies. -type Merger interface { - MergeResponses([][]byte) ([]byte, error) -} - -// Noop is an implementation of the Merger interface which does not actually merge -// responses, but just returns an arbitrary response(the first in the list). It can -// be used for write requests where the response is either empty or inconsequential. -type Noop struct{} - -func (Noop) MergeResponses(in [][]byte) ([]byte, error) { - if len(in) == 0 { - return nil, nil - } - return in[0], nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_alerts.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_alerts.go deleted file mode 100644 index ae2f28689..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_alerts.go +++ /dev/null @@ -1,72 +0,0 @@ -package merger - -import ( - "encoding/json" - "fmt" - "sort" - - v1 "github.com/prometheus/alertmanager/api/v1" -) - -const ( - statusSuccess = "success" -) - -// V1Alerts implements the Merger interface for GET /v1/alerts. It returns the union of alerts over -// all the responses. When the same alert exists in multiple responses, the alert instance in the -// earliest response is returned in the final response. We cannot use the UpdatedAt timestamp as -// for V2Alerts, because the v1 API does not provide it. -type V1Alerts struct{} - -func (V1Alerts) MergeResponses(in [][]byte) ([]byte, error) { - type bodyType struct { - Status string `json:"status"` - Data []*v1.Alert `json:"data"` - } - - alerts := make([]*v1.Alert, 0) - for _, body := range in { - parsed := bodyType{} - if err := json.Unmarshal(body, &parsed); err != nil { - return nil, err - } - if parsed.Status != statusSuccess { - return nil, fmt.Errorf("unable to merge response of status: %s", parsed.Status) - } - alerts = append(alerts, parsed.Data...) - } - - merged, err := mergeV1Alerts(alerts) - if err != nil { - return nil, err - } - body := bodyType{ - Status: statusSuccess, - Data: merged, - } - - return json.Marshal(body) -} - -func mergeV1Alerts(in []*v1.Alert) ([]*v1.Alert, error) { - // Select an arbitrary alert for each distinct alert. - alerts := make(map[string]*v1.Alert) - for _, alert := range in { - key := alert.Fingerprint - if _, ok := alerts[key]; !ok { - alerts[key] = alert - } - } - - result := make([]*v1.Alert, 0, len(alerts)) - for _, alert := range alerts { - result = append(result, alert) - } - - // Mimic Alertmanager which returns alerts ordered by fingerprint (as string). - sort.Slice(result, func(i, j int) bool { - return result[i].Fingerprint < result[j].Fingerprint - }) - - return result, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silence_id.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silence_id.go deleted file mode 100644 index 4352634f4..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silence_id.go +++ /dev/null @@ -1,51 +0,0 @@ -package merger - -import ( - "encoding/json" - "errors" - "fmt" - - v2_models "github.com/prometheus/alertmanager/api/v2/models" -) - -// V1Silences implements the Merger interface for GET /v1/silences. This re-uses the logic for -// merging /v2/silences, with additional handling for the enclosing status/data fields. Unlike for -// alerts, the API definitions for silences are almost identical between v1 and v2. The differences -// are that the fields in the JSON output are ordered differently, and the timestamps have more -// precision in v1, but these differences should not be problematic to clients. -type V1SilenceID struct{} - -func (V1SilenceID) MergeResponses(in [][]byte) ([]byte, error) { - type bodyType struct { - Status string `json:"status"` - Data *v2_models.GettableSilence `json:"data"` - } - - silences := make(v2_models.GettableSilences, 0) - for _, body := range in { - parsed := bodyType{} - if err := json.Unmarshal(body, &parsed); err != nil { - return nil, err - } - if parsed.Status != statusSuccess { - return nil, fmt.Errorf("unable to merge response of status: %s", parsed.Status) - } - silences = append(silences, parsed.Data) - } - - merged, err := mergeV2Silences(silences) - if err != nil { - return nil, err - } - - if len(merged) != 1 { - return nil, errors.New("unexpected mismatched silence ids") - } - - body := bodyType{ - Status: statusSuccess, - Data: merged[0], - } - - return json.Marshal(body) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silences.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silences.go deleted file mode 100644 index 1e0bd0812..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v1_silences.go +++ /dev/null @@ -1,45 +0,0 @@ -package merger - -import ( - "encoding/json" - "fmt" - - v2_models "github.com/prometheus/alertmanager/api/v2/models" -) - -// V1Silences implements the Merger interface for GET /v1/silences. Unlike for alerts, the API -// definitions for silences are almost identical between v1 and v2. The differences are that the -// fields in the JSON output are ordered differently, and the timestamps have more precision in v1, -// but these differences should not be problematic to clients. Therefore, the implementation -// re-uses the v2 types, with additional handling for the enclosing status/data fields. -type V1Silences struct{} - -func (V1Silences) MergeResponses(in [][]byte) ([]byte, error) { - type bodyType struct { - Status string `json:"status"` - Data v2_models.GettableSilences `json:"data"` - } - - silences := make(v2_models.GettableSilences, 0) - for _, body := range in { - parsed := bodyType{} - if err := json.Unmarshal(body, &parsed); err != nil { - return nil, err - } - if parsed.Status != statusSuccess { - return nil, fmt.Errorf("unable to merge response of status: %s", parsed.Status) - } - silences = append(silences, parsed.Data...) - } - - merged, err := mergeV2Silences(silences) - if err != nil { - return nil, err - } - body := bodyType{ - Status: statusSuccess, - Data: merged, - } - - return json.Marshal(body) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alert_groups.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alert_groups.go deleted file mode 100644 index ab660afa0..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alert_groups.go +++ /dev/null @@ -1,104 +0,0 @@ -package merger - -import ( - "errors" - "sort" - - "github.com/go-openapi/swag" - v2 "github.com/prometheus/alertmanager/api/v2" - v2_models "github.com/prometheus/alertmanager/api/v2/models" - prom_model "github.com/prometheus/common/model" -) - -// V2AlertGroups implements the Merger interface for GET /v2/alerts/groups. It returns -// the union of alert groups over all the responses. When the same alert exists in the same -// group for multiple responses, the instance of that alert with the most recent UpdatedAt -// timestamp is returned in that group within the response. -type V2AlertGroups struct{} - -func (V2AlertGroups) MergeResponses(in [][]byte) ([]byte, error) { - groups := make(v2_models.AlertGroups, 0) - for _, body := range in { - parsed := make(v2_models.AlertGroups, 0) - if err := swag.ReadJSON(body, &parsed); err != nil { - return nil, err - } - groups = append(groups, parsed...) - } - - merged, err := mergeV2AlertGroups(groups) - if err != nil { - return nil, err - } - - return swag.WriteJSON(merged) -} - -func mergeV2AlertGroups(in v2_models.AlertGroups) (v2_models.AlertGroups, error) { - // Gather lists of all alerts for each distinct group. - groups := make(map[groupKey]*v2_models.AlertGroup) - for _, group := range in { - if group.Receiver == nil { - return nil, errors.New("unexpected nil receiver") - } - if group.Receiver.Name == nil { - return nil, errors.New("unexpected nil receiver name") - } - - key := getGroupKey(group) - if current, ok := groups[key]; ok { - current.Alerts = append(current.Alerts, group.Alerts...) - } else { - groups[key] = group - } - } - - // Merge duplicates of the same alert within each group. - for _, group := range groups { - var err error - group.Alerts, err = mergeV2Alerts(group.Alerts) - if err != nil { - return nil, err - } - } - - result := make(v2_models.AlertGroups, 0, len(groups)) - for _, group := range groups { - result = append(result, group) - } - - // Mimic Alertmanager which returns groups ordered by labels and receiver. - sort.Sort(byGroup(result)) - - return result, nil -} - -// getGroupKey returns an identity for a group which can be used to match it against other groups. -// Only the receiver name is necessary to ensure grouping by receiver, and for the labels, we again -// use the same method for matching the group labels as used internally, generating the fingerprint. -func getGroupKey(group *v2_models.AlertGroup) groupKey { - return groupKey{ - fingerprint: prom_model.LabelsToSignature(group.Labels), - receiver: *group.Receiver.Name, - } -} - -type groupKey struct { - fingerprint uint64 - receiver string -} - -// byGroup implements the ordering of Alertmanager dispatch.AlertGroups on the OpenAPI type. -type byGroup v2_models.AlertGroups - -func (ag byGroup) Swap(i, j int) { ag[i], ag[j] = ag[j], ag[i] } -func (ag byGroup) Less(i, j int) bool { - iLabels := v2.APILabelSetToModelLabelSet(ag[i].Labels) - jLabels := v2.APILabelSetToModelLabelSet(ag[j].Labels) - - if iLabels.Equal(jLabels) { - return *ag[i].Receiver.Name < *ag[j].Receiver.Name - } - return iLabels.Before(jLabels) -} -func (ag byGroup) Len() int { return len(ag) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alerts.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alerts.go deleted file mode 100644 index a1cbe463d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_alerts.go +++ /dev/null @@ -1,67 +0,0 @@ -package merger - -import ( - "errors" - "sort" - "time" - - "github.com/go-openapi/swag" - v2_models "github.com/prometheus/alertmanager/api/v2/models" -) - -// V2Alerts implements the Merger interface for GET /v2/alerts. It returns the union -// of alerts over all the responses. When the same alert exists in multiple responses, the -// instance of that alert with the most recent UpdatedAt timestamp is returned in the response. -type V2Alerts struct{} - -func (V2Alerts) MergeResponses(in [][]byte) ([]byte, error) { - alerts := make(v2_models.GettableAlerts, 0) - for _, body := range in { - parsed := make(v2_models.GettableAlerts, 0) - if err := swag.ReadJSON(body, &parsed); err != nil { - return nil, err - } - alerts = append(alerts, parsed...) - } - - merged, err := mergeV2Alerts(alerts) - if err != nil { - return nil, err - } - - return swag.WriteJSON(merged) -} - -func mergeV2Alerts(in v2_models.GettableAlerts) (v2_models.GettableAlerts, error) { - // Select the most recently updated alert for each distinct alert. - alerts := make(map[string]*v2_models.GettableAlert) - for _, alert := range in { - if alert.Fingerprint == nil { - return nil, errors.New("unexpected nil fingerprint") - } - if alert.UpdatedAt == nil { - return nil, errors.New("unexpected nil updatedAt") - } - - key := *alert.Fingerprint - if current, ok := alerts[key]; ok { - if time.Time(*alert.UpdatedAt).After(time.Time(*current.UpdatedAt)) { - alerts[key] = alert - } - } else { - alerts[key] = alert - } - } - - result := make(v2_models.GettableAlerts, 0, len(alerts)) - for _, alert := range alerts { - result = append(result, alert) - } - - // Mimic Alertmanager which returns alerts ordered by fingerprint (as string). - sort.Slice(result, func(i, j int) bool { - return *result[i].Fingerprint < *result[j].Fingerprint - }) - - return result, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silence_id.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silence_id.go deleted file mode 100644 index 7718cb99e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silence_id.go +++ /dev/null @@ -1,34 +0,0 @@ -package merger - -import ( - "errors" - - "github.com/go-openapi/swag" - v2_models "github.com/prometheus/alertmanager/api/v2/models" -) - -// V2SilenceID implements the Merger interface for GET /v2/silence/{id}. It returns the most -// recently updated silence (newest UpdatedAt timestamp). -type V2SilenceID struct{} - -func (V2SilenceID) MergeResponses(in [][]byte) ([]byte, error) { - silences := make(v2_models.GettableSilences, 0) - for _, body := range in { - parsed := &v2_models.GettableSilence{} - if err := swag.ReadJSON(body, parsed); err != nil { - return nil, err - } - silences = append(silences, parsed) - } - - merged, err := mergeV2Silences(silences) - if err != nil { - return nil, err - } - - if len(merged) != 1 { - return nil, errors.New("unexpected mismatched silence ids") - } - - return swag.WriteJSON(merged[0]) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silences.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silences.go deleted file mode 100644 index f268e0b98..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/merger/v2_silences.go +++ /dev/null @@ -1,65 +0,0 @@ -package merger - -import ( - "errors" - "time" - - "github.com/go-openapi/swag" - v2 "github.com/prometheus/alertmanager/api/v2" - v2_models "github.com/prometheus/alertmanager/api/v2/models" -) - -// V2Silences implements the Merger interface for GET /v2/silences. It returns the union of silences -// over all the responses. When a silence with the same ID exists in multiple responses, the silence -// most recently updated silence is returned (newest UpdatedAt timestamp). -type V2Silences struct{} - -func (V2Silences) MergeResponses(in [][]byte) ([]byte, error) { - silences := make(v2_models.GettableSilences, 0) - for _, body := range in { - parsed := make(v2_models.GettableSilences, 0) - if err := swag.ReadJSON(body, &parsed); err != nil { - return nil, err - } - silences = append(silences, parsed...) - } - - merged, err := mergeV2Silences(silences) - if err != nil { - return nil, err - } - - return swag.WriteJSON(merged) -} - -func mergeV2Silences(in v2_models.GettableSilences) (v2_models.GettableSilences, error) { - // Select the most recently updated silences for each silence ID. - silences := make(map[string]*v2_models.GettableSilence) - for _, silence := range in { - if silence.ID == nil { - return nil, errors.New("unexpected nil id") - } - if silence.UpdatedAt == nil { - return nil, errors.New("unexpected nil updatedAt") - } - - key := *silence.ID - if current, ok := silences[key]; ok { - if time.Time(*silence.UpdatedAt).After(time.Time(*current.UpdatedAt)) { - silences[key] = silence - } - } else { - silences[key] = silence - } - } - - result := make(v2_models.GettableSilences, 0, len(silences)) - for _, silence := range silences { - result = append(result, silence) - } - - // Re-use Alertmanager sorting for silences. - v2.SortSilences(result) - - return result, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go deleted file mode 100644 index a02aa4a5c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go +++ /dev/null @@ -1,1370 +0,0 @@ -package alertmanager - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/alertmanager/cluster" - "github.com/prometheus/alertmanager/cluster/clusterpb" - amconfig "github.com/prometheus/alertmanager/config" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/httpgrpc/server" - "github.com/weaveworks/common/user" - "golang.org/x/time/rate" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb" - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/concurrency" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -const ( - // If a config sets the webhook URL to this, it will be rewritten to - // a URL derived from Config.AutoWebhookRoot - autoWebhookURL = "http://internal.monitor" - - // Reasons for (re)syncing alertmanager configurations from object storage. - reasonPeriodic = "periodic" - reasonInitial = "initial" - reasonRingChange = "ring-change" - - // ringAutoForgetUnhealthyPeriods is how many consecutive timeout periods an unhealthy instance - // in the ring will be automatically removed. - ringAutoForgetUnhealthyPeriods = 5 -) - -var ( - errInvalidExternalURL = errors.New("the configured external URL is invalid: should not end with /") - errShardingLegacyStorage = errors.New("deprecated -alertmanager.storage.* not supported with -alertmanager.sharding-enabled, use -alertmanager-storage.*") - errShardingUnsupportedStorage = errors.New("the configured alertmanager storage backend is not supported when sharding is enabled") - errZoneAwarenessEnabledWithoutZoneInfo = errors.New("the configured alertmanager has zone awareness enabled but zone is not set") -) - -// MultitenantAlertmanagerConfig is the configuration for a multitenant Alertmanager. -type MultitenantAlertmanagerConfig struct { - DataDir string `yaml:"data_dir"` - Retention time.Duration `yaml:"retention"` - ExternalURL flagext.URLValue `yaml:"external_url"` - PollInterval time.Duration `yaml:"poll_interval"` - MaxRecvMsgSize int64 `yaml:"max_recv_msg_size"` - - // Enable sharding for the Alertmanager - ShardingEnabled bool `yaml:"sharding_enabled"` - ShardingRing RingConfig `yaml:"sharding_ring"` - - FallbackConfigFile string `yaml:"fallback_config_file"` - AutoWebhookRoot string `yaml:"auto_webhook_root"` - - Store alertstore.LegacyConfig `yaml:"storage" doc:"description=Deprecated. Use -alertmanager-storage.* CLI flags and their respective YAML config options instead."` - Cluster ClusterConfig `yaml:"cluster"` - - EnableAPI bool `yaml:"enable_api"` - - // For distributor. - AlertmanagerClient ClientConfig `yaml:"alertmanager_client"` - - // For the state persister. - Persister PersisterConfig `yaml:",inline"` -} - -type ClusterConfig struct { - ListenAddr string `yaml:"listen_address"` - AdvertiseAddr string `yaml:"advertise_address"` - Peers flagext.StringSliceCSV `yaml:"peers"` - PeerTimeout time.Duration `yaml:"peer_timeout"` - GossipInterval time.Duration `yaml:"gossip_interval"` - PushPullInterval time.Duration `yaml:"push_pull_interval"` -} - -const ( - defaultClusterAddr = "0.0.0.0:9094" - defaultPeerTimeout = 15 * time.Second -) - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.DataDir, "alertmanager.storage.path", "data/", "Base path for data storage.") - f.DurationVar(&cfg.Retention, "alertmanager.storage.retention", 5*24*time.Hour, "How long to keep data for.") - f.Int64Var(&cfg.MaxRecvMsgSize, "alertmanager.max-recv-msg-size", 16<<20, "Maximum size (bytes) of an accepted HTTP request body.") - - f.Var(&cfg.ExternalURL, "alertmanager.web.external-url", "The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, relevant URL components will be derived automatically.") - - f.StringVar(&cfg.FallbackConfigFile, "alertmanager.configs.fallback", "", "Filename of fallback config to use if none specified for instance.") - f.StringVar(&cfg.AutoWebhookRoot, "alertmanager.configs.auto-webhook-root", "", "Root of URL to generate if config is "+autoWebhookURL) - f.DurationVar(&cfg.PollInterval, "alertmanager.configs.poll-interval", 15*time.Second, "How frequently to poll Cortex configs") - - f.BoolVar(&cfg.EnableAPI, "experimental.alertmanager.enable-api", false, "Enable the experimental alertmanager config api.") - - f.BoolVar(&cfg.ShardingEnabled, "alertmanager.sharding-enabled", false, "Shard tenants across multiple alertmanager instances.") - - cfg.AlertmanagerClient.RegisterFlagsWithPrefix("alertmanager.alertmanager-client", f) - cfg.Persister.RegisterFlagsWithPrefix("alertmanager", f) - cfg.ShardingRing.RegisterFlags(f) - cfg.Store.RegisterFlags(f) - cfg.Cluster.RegisterFlags(f) -} - -func (cfg *ClusterConfig) RegisterFlags(f *flag.FlagSet) { - prefix := "alertmanager.cluster." - f.StringVar(&cfg.ListenAddr, prefix+"listen-address", defaultClusterAddr, "Listen address and port for the cluster. Not specifying this flag disables high-availability mode.") - f.StringVar(&cfg.AdvertiseAddr, prefix+"advertise-address", "", "Explicit address or hostname to advertise in cluster.") - f.Var(&cfg.Peers, prefix+"peers", "Comma-separated list of initial peers.") - f.DurationVar(&cfg.PeerTimeout, prefix+"peer-timeout", defaultPeerTimeout, "Time to wait between peers to send notifications.") - f.DurationVar(&cfg.GossipInterval, prefix+"gossip-interval", cluster.DefaultGossipInterval, "The interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated across cluster more quickly at the expense of increased bandwidth usage.") - f.DurationVar(&cfg.PushPullInterval, prefix+"push-pull-interval", cluster.DefaultPushPullInterval, "The interval between gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.") -} - -// Validate config and returns error on failure -func (cfg *MultitenantAlertmanagerConfig) Validate(storageCfg alertstore.Config) error { - if cfg.ExternalURL.URL != nil && strings.HasSuffix(cfg.ExternalURL.Path, "/") { - return errInvalidExternalURL - } - - if err := cfg.Store.Validate(); err != nil { - return errors.Wrap(err, "invalid storage config") - } - - if err := cfg.Persister.Validate(); err != nil { - return err - } - - if cfg.ShardingEnabled { - if !cfg.Store.IsDefaults() { - return errShardingLegacyStorage - } - if !storageCfg.IsFullStateSupported() { - return errShardingUnsupportedStorage - } - if cfg.ShardingRing.ZoneAwarenessEnabled && cfg.ShardingRing.InstanceZone == "" { - return errZoneAwarenessEnabledWithoutZoneInfo - } - } - - return nil -} - -type multitenantAlertmanagerMetrics struct { - lastReloadSuccessful *prometheus.GaugeVec - lastReloadSuccessfulTimestamp *prometheus.GaugeVec -} - -func newMultitenantAlertmanagerMetrics(reg prometheus.Registerer) *multitenantAlertmanagerMetrics { - m := &multitenantAlertmanagerMetrics{} - - m.lastReloadSuccessful = promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "alertmanager_config_last_reload_successful", - Help: "Boolean set to 1 whenever the last configuration reload attempt was successful.", - }, []string{"user"}) - - m.lastReloadSuccessfulTimestamp = promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "alertmanager_config_last_reload_successful_seconds", - Help: "Timestamp of the last successful configuration reload.", - }, []string{"user"}) - - return m -} - -// Limits defines limits used by Alertmanager. -type Limits interface { - // AlertmanagerReceiversBlockCIDRNetworks returns the list of network CIDRs that should be blocked - // in the Alertmanager receivers for the given user. - AlertmanagerReceiversBlockCIDRNetworks(user string) []flagext.CIDR - - // AlertmanagerReceiversBlockPrivateAddresses returns true if private addresses should be blocked - // in the Alertmanager receivers for the given user. - AlertmanagerReceiversBlockPrivateAddresses(user string) bool - - // NotificationRateLimit methods return limit used by rate-limiter for given integration. - // If set to 0, no notifications are allowed. - // rate.Inf = all notifications are allowed. - // - // Note that when negative or zero values specified by user are translated to rate.Limit by Overrides, - // and may have different meaning there. - NotificationRateLimit(tenant string, integration string) rate.Limit - - // NotificationBurstSize returns burst-size for rate limiter for given integration type. If 0, no notifications are allowed except - // when limit == rate.Inf. - NotificationBurstSize(tenant string, integration string) int - - // AlertmanagerMaxConfigSize returns max size of configuration file that user is allowed to upload. If 0, there is no limit. - AlertmanagerMaxConfigSize(tenant string) int - - // AlertmanagerMaxTemplatesCount returns max number of templates that tenant can use in the configuration. 0 = no limit. - AlertmanagerMaxTemplatesCount(tenant string) int - - // AlertmanagerMaxTemplateSize returns max size of individual template. 0 = no limit. - AlertmanagerMaxTemplateSize(tenant string) int - - // AlertmanagerMaxDispatcherAggregationGroups returns maximum number of aggregation groups in Alertmanager's dispatcher that a tenant can have. - // Each aggregation group consumes single goroutine. 0 = unlimited. - AlertmanagerMaxDispatcherAggregationGroups(t string) int - - // AlertmanagerMaxAlertsCount returns max number of alerts that tenant can have active at the same time. 0 = no limit. - AlertmanagerMaxAlertsCount(tenant string) int - - // AlertmanagerMaxAlertsSizeBytes returns total max size of alerts that tenant can have active at the same time. 0 = no limit. - // Size of the alert is computed from alert labels, annotations and generator URL. - AlertmanagerMaxAlertsSizeBytes(tenant string) int -} - -// A MultitenantAlertmanager manages Alertmanager instances for multiple -// organizations. -type MultitenantAlertmanager struct { - services.Service - - cfg *MultitenantAlertmanagerConfig - - // Ring used for sharding alertmanager instances. - // When sharding is disabled, the flow is: - // ServeHTTP() -> serveRequest() - // When sharding is enabled: - // ServeHTTP() -> distributor.DistributeRequest() -> (sends to other AM or even the current) - // -> HandleRequest() (gRPC call) -> grpcServer() -> handlerForGRPCServer.ServeHTTP() -> serveRequest(). - ringLifecycler *ring.BasicLifecycler - ring *ring.Ring - distributor *Distributor - grpcServer *server.Server - - // Last ring state. This variable is not protected with a mutex because it's always - // accessed by a single goroutine at a time. - ringLastState ring.ReplicationSet - - // Subservices manager (ring, lifecycler) - subservices *services.Manager - subservicesWatcher *services.FailureWatcher - - store alertstore.AlertStore - - // The fallback config is stored as a string and parsed every time it's needed - // because we mutate the parsed results and don't want those changes to take - // effect here. - fallbackConfig string - - alertmanagersMtx sync.Mutex - alertmanagers map[string]*Alertmanager - // Stores the current set of configurations we're running in each tenant's Alertmanager. - // Used for comparing configurations as we synchronize them. - cfgs map[string]alertspb.AlertConfigDesc - - logger log.Logger - alertmanagerMetrics *alertmanagerMetrics - multitenantMetrics *multitenantAlertmanagerMetrics - - peer *cluster.Peer - alertmanagerClientsPool ClientsPool - - limits Limits - - registry prometheus.Registerer - ringCheckErrors prometheus.Counter - tenantsOwned prometheus.Gauge - tenantsDiscovered prometheus.Gauge - syncTotal *prometheus.CounterVec - syncFailures *prometheus.CounterVec -} - -// NewMultitenantAlertmanager creates a new MultitenantAlertmanager. -func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, store alertstore.AlertStore, limits Limits, logger log.Logger, registerer prometheus.Registerer) (*MultitenantAlertmanager, error) { - err := os.MkdirAll(cfg.DataDir, 0777) - if err != nil { - return nil, fmt.Errorf("unable to create Alertmanager data directory %q: %s", cfg.DataDir, err) - } - - if cfg.ExternalURL.URL == nil { - return nil, fmt.Errorf("unable to create Alertmanager because the external URL has not been configured") - } - - var fallbackConfig []byte - if cfg.FallbackConfigFile != "" { - fallbackConfig, err = ioutil.ReadFile(cfg.FallbackConfigFile) - if err != nil { - return nil, fmt.Errorf("unable to read fallback config %q: %s", cfg.FallbackConfigFile, err) - } - _, err = amconfig.LoadFile(cfg.FallbackConfigFile) - if err != nil { - return nil, fmt.Errorf("unable to load fallback config %q: %s", cfg.FallbackConfigFile, err) - } - } - - var peer *cluster.Peer - // We need to take this case into account to support our legacy upstream clustering. - if cfg.Cluster.ListenAddr != "" && !cfg.ShardingEnabled { - peer, err = cluster.Create( - log.With(logger, "component", "cluster"), - registerer, - cfg.Cluster.ListenAddr, - cfg.Cluster.AdvertiseAddr, - cfg.Cluster.Peers, - true, - cfg.Cluster.PushPullInterval, - cfg.Cluster.GossipInterval, - cluster.DefaultTcpTimeout, - cluster.DefaultProbeTimeout, - cluster.DefaultProbeInterval, - nil, - false, - ) - if err != nil { - return nil, errors.Wrap(err, "unable to initialize gossip mesh") - } - err = peer.Join(cluster.DefaultReconnectInterval, cluster.DefaultReconnectTimeout) - if err != nil { - level.Warn(logger).Log("msg", "unable to join gossip mesh while initializing cluster for high availability mode", "err", err) - } - go peer.Settle(context.Background(), cluster.DefaultGossipInterval) - } - - var ringStore kv.Client - if cfg.ShardingEnabled { - util_log.WarnExperimentalUse("Alertmanager sharding") - - ringStore, err = kv.NewClient( - cfg.ShardingRing.KVStore, - ring.GetCodec(), - kv.RegistererWithKVName(prometheus.WrapRegistererWithPrefix("cortex_", registerer), "alertmanager"), - logger, - ) - if err != nil { - return nil, errors.Wrap(err, "create KV store client") - } - } - - return createMultitenantAlertmanager(cfg, fallbackConfig, peer, store, ringStore, limits, logger, registerer) -} - -func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackConfig []byte, peer *cluster.Peer, store alertstore.AlertStore, ringStore kv.Client, limits Limits, logger log.Logger, registerer prometheus.Registerer) (*MultitenantAlertmanager, error) { - am := &MultitenantAlertmanager{ - cfg: cfg, - fallbackConfig: string(fallbackConfig), - cfgs: map[string]alertspb.AlertConfigDesc{}, - alertmanagers: map[string]*Alertmanager{}, - alertmanagerMetrics: newAlertmanagerMetrics(), - multitenantMetrics: newMultitenantAlertmanagerMetrics(registerer), - peer: peer, - store: store, - logger: log.With(logger, "component", "MultiTenantAlertmanager"), - registry: registerer, - limits: limits, - ringCheckErrors: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_alertmanager_ring_check_errors_total", - Help: "Number of errors that have occurred when checking the ring for ownership.", - }), - syncTotal: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_alertmanager_sync_configs_total", - Help: "Total number of times the alertmanager sync operation triggered.", - }, []string{"reason"}), - syncFailures: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_alertmanager_sync_configs_failed_total", - Help: "Total number of times the alertmanager sync operation failed.", - }, []string{"reason"}), - tenantsDiscovered: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_alertmanager_tenants_discovered", - Help: "Number of tenants with an Alertmanager configuration discovered.", - }), - tenantsOwned: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_alertmanager_tenants_owned", - Help: "Current number of tenants owned by the Alertmanager instance.", - }), - } - - // Initialize the top-level metrics. - for _, r := range []string{reasonInitial, reasonPeriodic, reasonRingChange} { - am.syncTotal.WithLabelValues(r) - am.syncFailures.WithLabelValues(r) - } - - if cfg.ShardingEnabled { - lifecyclerCfg, err := am.cfg.ShardingRing.ToLifecyclerConfig(am.logger) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize Alertmanager's lifecycler config") - } - - // Define lifecycler delegates in reverse order (last to be called defined first because they're - // chained via "next delegate"). - delegate := ring.BasicLifecyclerDelegate(am) - delegate = ring.NewLeaveOnStoppingDelegate(delegate, am.logger) - delegate = ring.NewAutoForgetDelegate(am.cfg.ShardingRing.HeartbeatTimeout*ringAutoForgetUnhealthyPeriods, delegate, am.logger) - - am.ringLifecycler, err = ring.NewBasicLifecycler(lifecyclerCfg, RingNameForServer, RingKey, ringStore, delegate, am.logger, prometheus.WrapRegistererWithPrefix("cortex_", am.registry)) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize Alertmanager's lifecycler") - } - - am.ring, err = ring.NewWithStoreClientAndStrategy(am.cfg.ShardingRing.ToRingConfig(), RingNameForServer, RingKey, ringStore, ring.NewIgnoreUnhealthyInstancesReplicationStrategy(), prometheus.WrapRegistererWithPrefix("cortex_", am.registry), am.logger) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize Alertmanager's ring") - } - - am.grpcServer = server.NewServer(&handlerForGRPCServer{am: am}) - - am.alertmanagerClientsPool = newAlertmanagerClientsPool(client.NewRingServiceDiscovery(am.ring), cfg.AlertmanagerClient, logger, am.registry) - am.distributor, err = NewDistributor(cfg.AlertmanagerClient, cfg.MaxRecvMsgSize, am.ring, am.alertmanagerClientsPool, log.With(logger, "component", "AlertmanagerDistributor"), am.registry) - if err != nil { - return nil, errors.Wrap(err, "create distributor") - } - } - - if registerer != nil { - registerer.MustRegister(am.alertmanagerMetrics) - } - - am.Service = services.NewBasicService(am.starting, am.run, am.stopping) - - return am, nil -} - -// handlerForGRPCServer acts as a handler for gRPC server to serve -// the serveRequest() via the standard ServeHTTP. -type handlerForGRPCServer struct { - am *MultitenantAlertmanager -} - -func (h *handlerForGRPCServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { - h.am.serveRequest(w, req) -} - -func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) { - err = am.migrateStateFilesToPerTenantDirectories() - if err != nil { - return err - } - - defer func() { - if err == nil || am.subservices == nil { - return - } - - if stopErr := services.StopManagerAndAwaitStopped(context.Background(), am.subservices); stopErr != nil { - level.Error(am.logger).Log("msg", "failed to gracefully stop alertmanager dependencies", "err", stopErr) - } - }() - - if am.cfg.ShardingEnabled { - if am.subservices, err = services.NewManager(am.ringLifecycler, am.ring, am.distributor); err != nil { - return errors.Wrap(err, "failed to start alertmanager's subservices") - } - - if err = services.StartManagerAndAwaitHealthy(ctx, am.subservices); err != nil { - return errors.Wrap(err, "failed to start alertmanager's subservices") - } - - am.subservicesWatcher = services.NewFailureWatcher() - am.subservicesWatcher.WatchManager(am.subservices) - - // We wait until the instance is in the JOINING state, once it does we know that tokens are assigned to this instance and we'll be ready to perform an initial sync of configs. - level.Info(am.logger).Log("waiting until alertmanager is JOINING in the ring") - if err = ring.WaitInstanceState(ctx, am.ring, am.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { - return err - } - level.Info(am.logger).Log("msg", "alertmanager is JOINING in the ring") - } - - // At this point, if sharding is enabled, the instance is registered with some tokens - // and we can run the initial iteration to sync configs. If no sharding is enabled we load _all_ the configs. - if err := am.loadAndSyncConfigs(ctx, reasonInitial); err != nil { - return err - } - - if am.cfg.ShardingEnabled { - // Store the ring state after the initial Alertmanager configs sync has been done and before we do change - // our state in the ring. - am.ringLastState, _ = am.ring.GetAllHealthy(RingOp) - - // Make sure that all the alertmanagers we were initially configured with have - // fetched state from the replicas, before advertising as ACTIVE. This will - // reduce the possibility that we lose state when new instances join/leave. - level.Info(am.logger).Log("msg", "waiting until initial state sync is complete for all users") - if err := am.waitInitialStateSync(ctx); err != nil { - return errors.Wrap(err, "failed to wait for initial state sync") - } - level.Info(am.logger).Log("msg", "initial state sync is complete") - - // With the initial sync now completed, we should have loaded all assigned alertmanager configurations to this instance. We can switch it to ACTIVE and start serving requests. - if err := am.ringLifecycler.ChangeState(ctx, ring.ACTIVE); err != nil { - return errors.Wrapf(err, "switch instance to %s in the ring", ring.ACTIVE) - } - - // Wait until the ring client detected this instance in the ACTIVE state. - level.Info(am.logger).Log("msg", "waiting until alertmanager is ACTIVE in the ring") - if err := ring.WaitInstanceState(ctx, am.ring, am.ringLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { - return err - } - level.Info(am.logger).Log("msg", "alertmanager is ACTIVE in the ring") - } - - return nil -} - -// migrateStateFilesToPerTenantDirectories migrates any existing configuration from old place to new hierarchy. -// TODO: Remove in Cortex 1.11. -func (am *MultitenantAlertmanager) migrateStateFilesToPerTenantDirectories() error { - migrate := func(from, to string) error { - level.Info(am.logger).Log("msg", "migrating alertmanager state", "from", from, "to", to) - err := os.Rename(from, to) - return errors.Wrapf(err, "failed to migrate alertmanager state from %v to %v", from, to) - } - - st, err := am.getObsoleteFilesPerUser() - if err != nil { - return errors.Wrap(err, "failed to migrate alertmanager state files") - } - - for userID, files := range st { - tenantDir := am.getTenantDirectory(userID) - err := os.MkdirAll(tenantDir, 0777) - if err != nil { - return errors.Wrapf(err, "failed to create per-tenant directory %v", tenantDir) - } - - errs := tsdb_errors.NewMulti() - - if files.notificationLogSnapshot != "" { - errs.Add(migrate(files.notificationLogSnapshot, filepath.Join(tenantDir, notificationLogSnapshot))) - } - - if files.silencesSnapshot != "" { - errs.Add(migrate(files.silencesSnapshot, filepath.Join(tenantDir, silencesSnapshot))) - } - - if files.templatesDir != "" { - errs.Add(migrate(files.templatesDir, filepath.Join(tenantDir, templatesDir))) - } - - if err := errs.Err(); err != nil { - return err - } - } - return nil -} - -type obsoleteStateFiles struct { - notificationLogSnapshot string - silencesSnapshot string - templatesDir string -} - -// getObsoleteFilesPerUser returns per-user set of files that should be migrated from old structure to new structure. -func (am *MultitenantAlertmanager) getObsoleteFilesPerUser() (map[string]obsoleteStateFiles, error) { - files, err := ioutil.ReadDir(am.cfg.DataDir) - if err != nil { - return nil, errors.Wrapf(err, "failed to list dir %v", am.cfg.DataDir) - } - - // old names - const ( - notificationLogPrefix = "nflog:" - silencesPrefix = "silences:" - templates = "templates" - ) - - result := map[string]obsoleteStateFiles{} - - for _, f := range files { - fullPath := filepath.Join(am.cfg.DataDir, f.Name()) - - if f.IsDir() { - // Process templates dir. - if f.Name() != templates { - // Ignore other files -- those are likely per tenant directories. - continue - } - - templateDirs, err := ioutil.ReadDir(fullPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to list dir %v", fullPath) - } - - // Previously templates directory contained per-tenant subdirectory. - for _, d := range templateDirs { - if d.IsDir() { - v := result[d.Name()] - v.templatesDir = filepath.Join(fullPath, d.Name()) - result[d.Name()] = v - } else { - level.Warn(am.logger).Log("msg", "ignoring unknown local file while migrating local alertmanager state files", "file", filepath.Join(fullPath, d.Name())) - } - } - continue - } - - switch { - case strings.HasPrefix(f.Name(), notificationLogPrefix): - userID := strings.TrimPrefix(f.Name(), notificationLogPrefix) - v := result[userID] - v.notificationLogSnapshot = fullPath - result[userID] = v - - case strings.HasPrefix(f.Name(), silencesPrefix): - userID := strings.TrimPrefix(f.Name(), silencesPrefix) - v := result[userID] - v.silencesSnapshot = fullPath - result[userID] = v - - default: - level.Warn(am.logger).Log("msg", "ignoring unknown local data file while migrating local alertmanager state files", "file", fullPath) - } - } - - return result, nil -} - -func (am *MultitenantAlertmanager) run(ctx context.Context) error { - tick := time.NewTicker(am.cfg.PollInterval) - defer tick.Stop() - - var ringTickerChan <-chan time.Time - - if am.cfg.ShardingEnabled { - ringTicker := time.NewTicker(util.DurationWithJitter(am.cfg.ShardingRing.RingCheckPeriod, 0.2)) - defer ringTicker.Stop() - ringTickerChan = ringTicker.C - } - - for { - select { - case <-ctx.Done(): - return nil - case err := <-am.subservicesWatcher.Chan(): - return errors.Wrap(err, "alertmanager subservices failed") - case <-tick.C: - // We don't want to halt execution here but instead just log what happened. - if err := am.loadAndSyncConfigs(ctx, reasonPeriodic); err != nil { - level.Warn(am.logger).Log("msg", "error while synchronizing alertmanager configs", "err", err) - } - case <-ringTickerChan: - // We ignore the error because in case of error it will return an empty - // replication set which we use to compare with the previous state. - currRingState, _ := am.ring.GetAllHealthy(RingOp) - - if ring.HasReplicationSetChanged(am.ringLastState, currRingState) { - am.ringLastState = currRingState - if err := am.loadAndSyncConfigs(ctx, reasonRingChange); err != nil { - level.Warn(am.logger).Log("msg", "error while synchronizing alertmanager configs", "err", err) - } - } - } - } -} - -func (am *MultitenantAlertmanager) loadAndSyncConfigs(ctx context.Context, syncReason string) error { - level.Info(am.logger).Log("msg", "synchronizing alertmanager configs for users") - am.syncTotal.WithLabelValues(syncReason).Inc() - - allUsers, cfgs, err := am.loadAlertmanagerConfigs(ctx) - if err != nil { - am.syncFailures.WithLabelValues(syncReason).Inc() - return err - } - - am.syncConfigs(cfgs) - am.deleteUnusedLocalUserState() - - // Currently, remote state persistence is only used when sharding is enabled. - if am.cfg.ShardingEnabled { - // Note when cleaning up remote state, remember that the user may not necessarily be configured - // in this instance. Therefore, pass the list of _all_ configured users to filter by. - am.deleteUnusedRemoteUserState(ctx, allUsers) - } - - return nil -} - -func (am *MultitenantAlertmanager) waitInitialStateSync(ctx context.Context) error { - am.alertmanagersMtx.Lock() - ams := make([]*Alertmanager, 0, len(am.alertmanagers)) - for _, userAM := range am.alertmanagers { - ams = append(ams, userAM) - } - am.alertmanagersMtx.Unlock() - - for _, userAM := range ams { - if err := userAM.WaitInitialStateSync(ctx); err != nil { - return err - } - } - - return nil -} - -// stopping runs when MultitenantAlertmanager transitions to Stopping state. -func (am *MultitenantAlertmanager) stopping(_ error) error { - am.alertmanagersMtx.Lock() - for _, am := range am.alertmanagers { - am.StopAndWait() - } - am.alertmanagersMtx.Unlock() - if am.peer != nil { // Tests don't setup any peer. - err := am.peer.Leave(am.cfg.Cluster.PeerTimeout) - if err != nil { - level.Warn(am.logger).Log("msg", "failed to leave the cluster", "err", err) - } - } - - if am.subservices != nil { - // subservices manages ring and lifecycler, if sharding was enabled. - _ = services.StopManagerAndAwaitStopped(context.Background(), am.subservices) - } - return nil -} - -// loadAlertmanagerConfigs Loads (and filters) the alertmanagers configuration from object storage, taking into consideration the sharding strategy. Returns: -// - The list of discovered users (all users with a configuration in storage) -// - The configurations of users owned by this instance. -func (am *MultitenantAlertmanager) loadAlertmanagerConfigs(ctx context.Context) ([]string, map[string]alertspb.AlertConfigDesc, error) { - // Find all users with an alertmanager config. - allUserIDs, err := am.store.ListAllUsers(ctx) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to list users with alertmanager configuration") - } - numUsersDiscovered := len(allUserIDs) - ownedUserIDs := make([]string, 0, len(allUserIDs)) - - // Filter out users not owned by this shard. - for _, userID := range allUserIDs { - if am.isUserOwned(userID) { - ownedUserIDs = append(ownedUserIDs, userID) - } - } - numUsersOwned := len(ownedUserIDs) - - // Load the configs for the owned users. - configs, err := am.store.GetAlertConfigs(ctx, ownedUserIDs) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to load alertmanager configurations for owned users") - } - - am.tenantsDiscovered.Set(float64(numUsersDiscovered)) - am.tenantsOwned.Set(float64(numUsersOwned)) - return allUserIDs, configs, nil -} - -func (am *MultitenantAlertmanager) isUserOwned(userID string) bool { - // If sharding is disabled, any alertmanager instance owns all users. - if !am.cfg.ShardingEnabled { - return true - } - - alertmanagers, err := am.ring.Get(shardByUser(userID), SyncRingOp, nil, nil, nil) - if err != nil { - am.ringCheckErrors.Inc() - level.Error(am.logger).Log("msg", "failed to load alertmanager configuration", "user", userID, "err", err) - return false - } - - return alertmanagers.Includes(am.ringLifecycler.GetInstanceAddr()) -} - -func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alertspb.AlertConfigDesc) { - level.Debug(am.logger).Log("msg", "adding configurations", "num_configs", len(cfgs)) - for user, cfg := range cfgs { - err := am.setConfig(cfg) - if err != nil { - am.multitenantMetrics.lastReloadSuccessful.WithLabelValues(user).Set(float64(0)) - level.Warn(am.logger).Log("msg", "error applying config", "err", err) - continue - } - - am.multitenantMetrics.lastReloadSuccessful.WithLabelValues(user).Set(float64(1)) - am.multitenantMetrics.lastReloadSuccessfulTimestamp.WithLabelValues(user).SetToCurrentTime() - } - - userAlertmanagersToStop := map[string]*Alertmanager{} - - am.alertmanagersMtx.Lock() - for userID, userAM := range am.alertmanagers { - if _, exists := cfgs[userID]; !exists { - userAlertmanagersToStop[userID] = userAM - delete(am.alertmanagers, userID) - delete(am.cfgs, userID) - am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(userID) - am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(userID) - am.alertmanagerMetrics.removeUserRegistry(userID) - } - } - am.alertmanagersMtx.Unlock() - - // Now stop alertmanagers and wait until they are really stopped, without holding lock. - for userID, userAM := range userAlertmanagersToStop { - level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", userID) - userAM.StopAndWait() - level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", userID) - } -} - -// setConfig applies the given configuration to the alertmanager for `userID`, -// creating an alertmanager if it doesn't already exist. -func (am *MultitenantAlertmanager) setConfig(cfg alertspb.AlertConfigDesc) error { - var userAmConfig *amconfig.Config - var err error - var hasTemplateChanges bool - var userTemplateDir = filepath.Join(am.getTenantDirectory(cfg.User), templatesDir) - var pathsToRemove = make(map[string]struct{}) - - // List existing files to keep track the ones to be removed - if oldTemplateFiles, err := ioutil.ReadDir(userTemplateDir); err == nil { - for _, file := range oldTemplateFiles { - pathsToRemove[filepath.Join(userTemplateDir, file.Name())] = struct{}{} - } - } - - for _, tmpl := range cfg.Templates { - templateFilePath, err := safeTemplateFilepath(userTemplateDir, tmpl.Filename) - if err != nil { - return err - } - - // Removing from pathsToRemove map the files that still exists in the config - delete(pathsToRemove, templateFilePath) - hasChanged, err := storeTemplateFile(templateFilePath, tmpl.Body) - if err != nil { - return err - } - - if hasChanged { - hasTemplateChanges = true - } - } - - for pathToRemove := range pathsToRemove { - err := os.Remove(pathToRemove) - if err != nil { - level.Warn(am.logger).Log("msg", "failed to remove file", "file", pathToRemove, "err", err) - } - hasTemplateChanges = true - } - - level.Debug(am.logger).Log("msg", "setting config", "user", cfg.User) - - am.alertmanagersMtx.Lock() - defer am.alertmanagersMtx.Unlock() - existing, hasExisting := am.alertmanagers[cfg.User] - - rawCfg := cfg.RawConfig - if cfg.RawConfig == "" { - if am.fallbackConfig == "" { - return fmt.Errorf("blank Alertmanager configuration for %v", cfg.User) - } - level.Debug(am.logger).Log("msg", "blank Alertmanager configuration; using fallback", "user", cfg.User) - userAmConfig, err = amconfig.Load(am.fallbackConfig) - if err != nil { - return fmt.Errorf("unable to load fallback configuration for %v: %v", cfg.User, err) - } - rawCfg = am.fallbackConfig - } else { - userAmConfig, err = amconfig.Load(cfg.RawConfig) - if err != nil && hasExisting { - // This means that if a user has a working config and - // they submit a broken one, the Manager will keep running the last known - // working configuration. - return fmt.Errorf("invalid Cortex configuration for %v: %v", cfg.User, err) - } - } - - // We can have an empty configuration here if: - // 1) the user had a previous alertmanager - // 2) then, submitted a non-working configuration (and we kept running the prev working config) - // 3) finally, the cortex AM instance is restarted and the running version is no longer present - if userAmConfig == nil { - return fmt.Errorf("no usable Alertmanager configuration for %v", cfg.User) - } - - // Transform webhook configs URLs to the per tenant monitor - if am.cfg.AutoWebhookRoot != "" { - for i, r := range userAmConfig.Receivers { - for j, w := range r.WebhookConfigs { - if w.URL.String() == autoWebhookURL { - u, err := url.Parse(am.cfg.AutoWebhookRoot + "/" + cfg.User + "/monitor") - if err != nil { - return err - } - - userAmConfig.Receivers[i].WebhookConfigs[j].URL = &amconfig.URL{URL: u} - } - } - } - } - - // If no Alertmanager instance exists for this user yet, start one. - if !hasExisting { - level.Debug(am.logger).Log("msg", "initializing new per-tenant alertmanager", "user", cfg.User) - newAM, err := am.newAlertmanager(cfg.User, userAmConfig, rawCfg) - if err != nil { - return err - } - am.alertmanagers[cfg.User] = newAM - } else if am.cfgs[cfg.User].RawConfig != cfg.RawConfig || hasTemplateChanges { - level.Info(am.logger).Log("msg", "updating new per-tenant alertmanager", "user", cfg.User) - // If the config changed, apply the new one. - err := existing.ApplyConfig(cfg.User, userAmConfig, rawCfg) - if err != nil { - return fmt.Errorf("unable to apply Alertmanager config for user %v: %v", cfg.User, err) - } - } - - am.cfgs[cfg.User] = cfg - return nil -} - -func (am *MultitenantAlertmanager) getTenantDirectory(userID string) string { - return filepath.Join(am.cfg.DataDir, userID) -} - -func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amconfig.Config, rawCfg string) (*Alertmanager, error) { - reg := prometheus.NewRegistry() - - tenantDir := am.getTenantDirectory(userID) - err := os.MkdirAll(tenantDir, 0777) - if err != nil { - return nil, errors.Wrapf(err, "failed to create per-tenant directory %v", tenantDir) - } - - newAM, err := New(&Config{ - UserID: userID, - TenantDataDir: tenantDir, - Logger: am.logger, - Peer: am.peer, - PeerTimeout: am.cfg.Cluster.PeerTimeout, - Retention: am.cfg.Retention, - ExternalURL: am.cfg.ExternalURL.URL, - ShardingEnabled: am.cfg.ShardingEnabled, - Replicator: am, - ReplicationFactor: am.cfg.ShardingRing.ReplicationFactor, - Store: am.store, - PersisterConfig: am.cfg.Persister, - Limits: am.limits, - }, reg) - if err != nil { - return nil, fmt.Errorf("unable to start Alertmanager for user %v: %v", userID, err) - } - - if err := newAM.ApplyConfig(userID, amConfig, rawCfg); err != nil { - return nil, fmt.Errorf("unable to apply initial config for user %v: %v", userID, err) - } - - am.alertmanagerMetrics.addUserRegistry(userID, reg) - return newAM, nil -} - -// GetPositionForUser returns the position this Alertmanager instance holds in the ring related to its other replicas for an specific user. -func (am *MultitenantAlertmanager) GetPositionForUser(userID string) int { - // If we have a replication factor of 1 or less we don't need to do any work and can immediately return. - if am.ring == nil || am.ring.ReplicationFactor() <= 1 { - return 0 - } - - set, err := am.ring.Get(shardByUser(userID), RingOp, nil, nil, nil) - if err != nil { - level.Error(am.logger).Log("msg", "unable to read the ring while trying to determine the alertmanager position", "err", err) - // If we're unable to determine the position, we don't want a tenant to miss out on the notification - instead, - // just assume we're the first in line and run the risk of a double notification. - return 0 - } - - var position int - for i, instance := range set.Instances { - if instance.Addr == am.ringLifecycler.GetInstanceAddr() { - position = i - break - } - } - - return position -} - -// ServeHTTP serves the Alertmanager's web UI and API. -func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if am.State() != services.Running { - http.Error(w, "Alertmanager not ready", http.StatusServiceUnavailable) - return - } - - if am.cfg.ShardingEnabled && am.distributor.IsPathSupported(req.URL.Path) { - am.distributor.DistributeRequest(w, req) - return - } - - // If sharding is not enabled or Distributor does not support this path, - // it is served by this instance. - am.serveRequest(w, req) -} - -// HandleRequest implements gRPC Alertmanager service, which receives request from AlertManager-Distributor. -func (am *MultitenantAlertmanager) HandleRequest(ctx context.Context, in *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - return am.grpcServer.Handle(ctx, in) -} - -// serveRequest serves the Alertmanager's web UI and API. -func (am *MultitenantAlertmanager) serveRequest(w http.ResponseWriter, req *http.Request) { - userID, err := tenant.TenantID(req.Context()) - if err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - am.alertmanagersMtx.Lock() - userAM, ok := am.alertmanagers[userID] - am.alertmanagersMtx.Unlock() - - if ok { - userAM.mux.ServeHTTP(w, req) - return - } - - if am.fallbackConfig != "" { - userAM, err = am.alertmanagerFromFallbackConfig(userID) - if err != nil { - level.Error(am.logger).Log("msg", "unable to initialize the Alertmanager with a fallback configuration", "user", userID, "err", err) - http.Error(w, "Failed to initialize the Alertmanager", http.StatusInternalServerError) - return - } - - userAM.mux.ServeHTTP(w, req) - return - } - - level.Debug(am.logger).Log("msg", "the Alertmanager has no configuration and no fallback specified", "user", userID) - http.Error(w, "the Alertmanager is not configured", http.StatusNotFound) -} - -func (am *MultitenantAlertmanager) alertmanagerFromFallbackConfig(userID string) (*Alertmanager, error) { - // Upload an empty config so that the Alertmanager is no de-activated in the next poll - cfgDesc := alertspb.ToProto("", nil, userID) - err := am.store.SetAlertConfig(context.Background(), cfgDesc) - if err != nil { - return nil, err - } - - // Calling setConfig with an empty configuration will use the fallback config. - err = am.setConfig(cfgDesc) - if err != nil { - return nil, err - } - - am.alertmanagersMtx.Lock() - defer am.alertmanagersMtx.Unlock() - return am.alertmanagers[userID], nil -} - -// ReplicateStateForUser attempts to replicate a partial state sent by an alertmanager to its other replicas through the ring. -func (am *MultitenantAlertmanager) ReplicateStateForUser(ctx context.Context, userID string, part *clusterpb.Part) error { - level.Debug(am.logger).Log("msg", "message received for replication", "user", userID, "key", part.Key) - - selfAddress := am.ringLifecycler.GetInstanceAddr() - err := ring.DoBatch(ctx, RingOp, am.ring, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error { - if desc.GetAddr() == selfAddress { - return nil - } - - c, err := am.alertmanagerClientsPool.GetClientFor(desc.GetAddr()) - if err != nil { - return err - } - - resp, err := c.UpdateState(user.InjectOrgID(ctx, userID), part) - if err != nil { - return err - } - - switch resp.Status { - case alertmanagerpb.MERGE_ERROR: - level.Error(am.logger).Log("msg", "state replication failed", "user", userID, "key", part.Key, "err", resp.Error) - case alertmanagerpb.USER_NOT_FOUND: - level.Debug(am.logger).Log("msg", "user not found while trying to replicate state", "user", userID, "key", part.Key) - } - return nil - }, func() {}) - - return err -} - -// ReadFullStateForUser attempts to read the full state from each replica for user. Note that it will try to obtain and return -// state from all replicas, but will consider it a success if state is obtained from at least one replica. -func (am *MultitenantAlertmanager) ReadFullStateForUser(ctx context.Context, userID string) ([]*clusterpb.FullState, error) { - // Only get the set of replicas which contain the specified user. - key := shardByUser(userID) - replicationSet, err := am.ring.Get(key, RingOp, nil, nil, nil) - if err != nil { - return nil, err - } - - // We should only query state from other replicas, and not our own state. - addrs := replicationSet.GetAddressesWithout(am.ringLifecycler.GetInstanceAddr()) - - var ( - resultsMtx sync.Mutex - results []*clusterpb.FullState - ) - - // Note that the jobs swallow the errors - this is because we want to give each replica a chance to respond. - jobs := concurrency.CreateJobsFromStrings(addrs) - err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { - addr := job.(string) - level.Debug(am.logger).Log("msg", "contacting replica for full state", "user", userID, "addr", addr) - - c, err := am.alertmanagerClientsPool.GetClientFor(addr) - if err != nil { - level.Error(am.logger).Log("msg", "failed to get rpc client", "err", err) - return nil - } - - resp, err := c.ReadState(user.InjectOrgID(ctx, userID), &alertmanagerpb.ReadStateRequest{}) - if err != nil { - level.Error(am.logger).Log("msg", "rpc reading state from replica failed", "addr", addr, "user", userID, "err", err) - return nil - } - - switch resp.Status { - case alertmanagerpb.READ_OK: - resultsMtx.Lock() - results = append(results, resp.State) - resultsMtx.Unlock() - case alertmanagerpb.READ_ERROR: - level.Error(am.logger).Log("msg", "error trying to read state", "addr", addr, "user", userID, "err", resp.Error) - case alertmanagerpb.READ_USER_NOT_FOUND: - level.Debug(am.logger).Log("msg", "user not found while trying to read state", "addr", addr, "user", userID) - default: - level.Error(am.logger).Log("msg", "unknown response trying to read state", "addr", addr, "user", userID) - } - return nil - }) - if err != nil { - return nil, err - } - - // We only require the state from a single replica, though we return as many as we were able to obtain. - if len(results) == 0 { - return nil, fmt.Errorf("failed to read state from any replica") - } - - return results, nil -} - -// UpdateState implements the Alertmanager service. -func (am *MultitenantAlertmanager) UpdateState(ctx context.Context, part *clusterpb.Part) (*alertmanagerpb.UpdateStateResponse, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - am.alertmanagersMtx.Lock() - userAM, ok := am.alertmanagers[userID] - am.alertmanagersMtx.Unlock() - - if !ok { - // We can end up trying to replicate state to an alertmanager that is no longer available due to e.g. a ring topology change. - level.Debug(am.logger).Log("msg", "user does not have an alertmanager in this instance", "user", userID) - return &alertmanagerpb.UpdateStateResponse{ - Status: alertmanagerpb.USER_NOT_FOUND, - Error: "alertmanager for this user does not exists", - }, nil - } - - if err = userAM.mergePartialExternalState(part); err != nil { - return &alertmanagerpb.UpdateStateResponse{ - Status: alertmanagerpb.MERGE_ERROR, - Error: err.Error(), - }, nil - } - - return &alertmanagerpb.UpdateStateResponse{Status: alertmanagerpb.OK}, nil -} - -// deleteUnusedRemoteUserState deletes state objects in remote storage for users that are no longer configured. -func (am *MultitenantAlertmanager) deleteUnusedRemoteUserState(ctx context.Context, allUsers []string) { - - users := make(map[string]struct{}, len(allUsers)) - for _, userID := range allUsers { - users[userID] = struct{}{} - } - - usersWithState, err := am.store.ListUsersWithFullState(ctx) - if err != nil { - level.Warn(am.logger).Log("msg", "failed to list users with state", "err", err) - return - } - - for _, userID := range usersWithState { - if _, ok := users[userID]; ok { - continue - } - - err := am.store.DeleteFullState(ctx, userID) - if err != nil { - level.Warn(am.logger).Log("msg", "failed to delete remote state for user", "user", userID, "err", err) - } else { - level.Info(am.logger).Log("msg", "deleted remote state for user", "user", userID) - } - } -} - -// deleteUnusedLocalUserState deletes local files for users that we no longer need. -func (am *MultitenantAlertmanager) deleteUnusedLocalUserState() { - userDirs := am.getPerUserDirectories() - - // And delete remaining files. - for userID, dir := range userDirs { - am.alertmanagersMtx.Lock() - userAM := am.alertmanagers[userID] - am.alertmanagersMtx.Unlock() - - // Don't delete directory if AM for user still exists. - if userAM != nil { - continue - } - - err := os.RemoveAll(dir) - if err != nil { - level.Warn(am.logger).Log("msg", "failed to delete directory for user", "dir", dir, "user", userID, "err", err) - } else { - level.Info(am.logger).Log("msg", "deleted local directory for user", "dir", dir, "user", userID) - } - } -} - -// getPerUserDirectories returns map of users to their directories (full path). Only users with local -// directory are returned. -func (am *MultitenantAlertmanager) getPerUserDirectories() map[string]string { - files, err := ioutil.ReadDir(am.cfg.DataDir) - if err != nil { - level.Warn(am.logger).Log("msg", "failed to list local dir", "dir", am.cfg.DataDir, "err", err) - return nil - } - - result := map[string]string{} - - for _, f := range files { - fullPath := filepath.Join(am.cfg.DataDir, f.Name()) - - if !f.IsDir() { - level.Warn(am.logger).Log("msg", "ignoring unexpected file while scanning local alertmanager configs", "file", fullPath) - continue - } - - result[f.Name()] = fullPath - } - return result -} - -// UpdateState implements the Alertmanager service. -func (am *MultitenantAlertmanager) ReadState(ctx context.Context, req *alertmanagerpb.ReadStateRequest) (*alertmanagerpb.ReadStateResponse, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - am.alertmanagersMtx.Lock() - userAM, ok := am.alertmanagers[userID] - am.alertmanagersMtx.Unlock() - - if !ok { - level.Debug(am.logger).Log("msg", "user does not have an alertmanager in this instance", "user", userID) - return &alertmanagerpb.ReadStateResponse{ - Status: alertmanagerpb.READ_USER_NOT_FOUND, - Error: "alertmanager for this user does not exists", - }, nil - } - - state, err := userAM.getFullState() - if err != nil { - return &alertmanagerpb.ReadStateResponse{ - Status: alertmanagerpb.READ_ERROR, - Error: err.Error(), - }, nil - } - - return &alertmanagerpb.ReadStateResponse{ - Status: alertmanagerpb.READ_OK, - State: state, - }, nil -} - -// validateTemplateFilename validated the template filename and returns error if it's not valid. -// The validation done in this function is a first fence to avoid having a tenant submitting -// a config which may escape the per-tenant data directory on disk. -func validateTemplateFilename(filename string) error { - if filepath.Base(filename) != filename { - return fmt.Errorf("invalid template name %q: the template name cannot contain any path", filename) - } - - // Further enforce no path in the template name. - if filepath.Dir(filepath.Clean(filename)) != "." { - return fmt.Errorf("invalid template name %q: the template name cannot contain any path", filename) - } - - return nil -} - -// safeTemplateFilepath builds and return the template filepath within the provided dir. -// This function also performs a security check to make sure the provided templateName -// doesn't contain a relative path escaping the provided dir. -func safeTemplateFilepath(dir, templateName string) (string, error) { - // We expect all template files to be stored and referenced within the provided directory. - containerDir, err := filepath.Abs(dir) - if err != nil { - return "", err - } - - // Build the actual path of the template. - actualPath, err := filepath.Abs(filepath.Join(containerDir, templateName)) - if err != nil { - return "", err - } - - // Ensure the actual path of the template is within the expected directory. - // This check is a counter-measure to make sure the tenant is not trying to - // escape its own directory on disk. - if !strings.HasPrefix(actualPath, containerDir) { - return "", fmt.Errorf("invalid template name %q: the template filepath is escaping the per-tenant local directory", templateName) - } - - return actualPath, nil -} - -// storeTemplateFile stores template file at the given templateFilepath. -// Returns true, if file content has changed (new or updated file), false if file with the same name -// and content was already stored locally. -func storeTemplateFile(templateFilepath, content string) (bool, error) { - // Make sure the directory exists. - dir := filepath.Dir(templateFilepath) - err := os.MkdirAll(dir, 0755) - if err != nil { - return false, fmt.Errorf("unable to create Alertmanager templates directory %q: %s", dir, err) - } - - // Check if the template file already exists and if it has changed - if tmpl, err := ioutil.ReadFile(templateFilepath); err == nil && string(tmpl) == content { - return false, nil - } else if err != nil && !os.IsNotExist(err) { - return false, err - } - - if err := ioutil.WriteFile(templateFilepath, []byte(content), 0644); err != nil { - return false, fmt.Errorf("unable to create Alertmanager template file %q: %s", templateFilepath, err) - } - - return true, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/rate_limited_notifier.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/rate_limited_notifier.go deleted file mode 100644 index 6ca2ca4b8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/rate_limited_notifier.go +++ /dev/null @@ -1,65 +0,0 @@ -package alertmanager - -import ( - "context" - "errors" - "time" - - "github.com/prometheus/alertmanager/notify" - "github.com/prometheus/alertmanager/types" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/atomic" - "golang.org/x/time/rate" -) - -type rateLimits interface { - RateLimit() rate.Limit - Burst() int -} - -type rateLimitedNotifier struct { - upstream notify.Notifier - counter prometheus.Counter - - limiter *rate.Limiter - limits rateLimits - - recheckInterval time.Duration - recheckAt atomic.Int64 // unix nanoseconds timestamp -} - -func newRateLimitedNotifier(upstream notify.Notifier, limits rateLimits, recheckInterval time.Duration, counter prometheus.Counter) *rateLimitedNotifier { - return &rateLimitedNotifier{ - upstream: upstream, - counter: counter, - limits: limits, - limiter: rate.NewLimiter(limits.RateLimit(), limits.Burst()), - recheckInterval: recheckInterval, - } -} - -var errRateLimited = errors.New("failed to notify due to rate limits") - -func (r *rateLimitedNotifier) Notify(ctx context.Context, alerts ...*types.Alert) (bool, error) { - now := time.Now() - if now.UnixNano() >= r.recheckAt.Load() { - if limit := r.limits.RateLimit(); r.limiter.Limit() != limit { - r.limiter.SetLimitAt(now, limit) - } - - if burst := r.limits.Burst(); r.limiter.Burst() != burst { - r.limiter.SetBurstAt(now, burst) - } - - r.recheckAt.Store(now.UnixNano() + r.recheckInterval.Nanoseconds()) - } - - // This counts as single notification, no matter how many alerts there are in it. - if !r.limiter.AllowN(now, 1) { - r.counter.Inc() - // Don't retry this notification later. - return false, errRateLimited - } - - return r.upstream.Notify(ctx, alerts...) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_persister.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_persister.go deleted file mode 100644 index 4238b5836..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_persister.go +++ /dev/null @@ -1,130 +0,0 @@ -package alertmanager - -import ( - "context" - "flag" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/alertmanager/cluster/clusterpb" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" - "github.com/cortexproject/cortex/pkg/util/services" -) - -const ( - defaultPersistTimeout = 30 * time.Second -) - -var ( - errInvalidPersistInterval = errors.New("invalid alertmanager persist interval, must be greater than zero") -) - -type PersisterConfig struct { - Interval time.Duration `yaml:"persist_interval"` -} - -func (cfg *PersisterConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.DurationVar(&cfg.Interval, prefix+".persist-interval", 15*time.Minute, "The interval between persisting the current alertmanager state (notification log and silences) to object storage. This is only used when sharding is enabled. This state is read when all replicas for a shard can not be contacted. In this scenario, having persisted the state more frequently will result in potentially fewer lost silences, and fewer duplicate notifications.") -} - -func (cfg *PersisterConfig) Validate() error { - if cfg.Interval <= 0 { - return errInvalidPersistInterval - } - return nil -} - -type PersistableState interface { - State - GetFullState() (*clusterpb.FullState, error) -} - -// statePersister periodically writes the alertmanager state to persistent storage. -type statePersister struct { - services.Service - - state PersistableState - store alertstore.AlertStore - userID string - logger log.Logger - - timeout time.Duration - - persistTotal prometheus.Counter - persistFailed prometheus.Counter -} - -// newStatePersister creates a new state persister. -func newStatePersister(cfg PersisterConfig, userID string, state PersistableState, store alertstore.AlertStore, l log.Logger, r prometheus.Registerer) *statePersister { - - s := &statePersister{ - state: state, - store: store, - userID: userID, - logger: l, - timeout: defaultPersistTimeout, - persistTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "alertmanager_state_persist_total", - Help: "Number of times we have tried to persist the running state to remote storage.", - }), - persistFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "alertmanager_state_persist_failed_total", - Help: "Number of times we have failed to persist the running state to remote storage.", - }), - } - - s.Service = services.NewTimerService(cfg.Interval, s.starting, s.iteration, nil) - - return s -} - -func (s *statePersister) starting(ctx context.Context) error { - // Waits until the state replicator is settled, so that state is not - // persisted before obtaining some initial state. - return s.state.WaitReady(ctx) -} - -func (s *statePersister) iteration(ctx context.Context) error { - if err := s.persist(ctx); err != nil { - level.Error(s.logger).Log("msg", "failed to persist state", "user", s.userID, "err", err) - } - return nil -} - -func (s *statePersister) persist(ctx context.Context) (err error) { - // Only the replica at position zero should write the state. - if s.state.Position() != 0 { - return nil - } - - s.persistTotal.Inc() - defer func() { - if err != nil { - s.persistFailed.Inc() - } - }() - - level.Debug(s.logger).Log("msg", "persisting state", "user", s.userID) - - var fs *clusterpb.FullState - fs, err = s.state.GetFullState() - if err != nil { - return err - } - - ctx, cancel := context.WithTimeout(ctx, s.timeout) - defer cancel() - - desc := alertspb.FullStateDesc{State: fs} - if err = s.store.SetFullState(ctx, s.userID, desc); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go deleted file mode 100644 index fedd28cc6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/state_replication.go +++ /dev/null @@ -1,316 +0,0 @@ -package alertmanager - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/alertmanager/cluster" - "github.com/prometheus/alertmanager/cluster/clusterpb" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" - "github.com/cortexproject/cortex/pkg/util/services" -) - -const ( - defaultSettleReadTimeout = 15 * time.Second - defaultStoreReadTimeout = 15 * time.Second - - // Initial sync outcome label values. - syncFromReplica = "from-replica" - syncFromStorage = "from-storage" - syncUserNotFound = "user-not-found" - syncFailed = "failed" -) - -// state represents the Alertmanager silences and notification log internal state. -type state struct { - services.Service - - userID string - logger log.Logger - reg prometheus.Registerer - - settleReadTimeout time.Duration - storeReadTimeout time.Duration - - mtx sync.Mutex - states map[string]cluster.State - - replicationFactor int - replicator Replicator - store alertstore.AlertStore - - partialStateMergesTotal *prometheus.CounterVec - partialStateMergesFailed *prometheus.CounterVec - stateReplicationTotal *prometheus.CounterVec - stateReplicationFailed *prometheus.CounterVec - fetchReplicaStateTotal prometheus.Counter - fetchReplicaStateFailed prometheus.Counter - initialSyncTotal prometheus.Counter - initialSyncCompleted *prometheus.CounterVec - initialSyncDuration prometheus.Histogram - - msgc chan *clusterpb.Part -} - -// newReplicatedStates creates a new state struct, which manages state to be replicated between alertmanagers. -func newReplicatedStates(userID string, rf int, re Replicator, st alertstore.AlertStore, l log.Logger, r prometheus.Registerer) *state { - - s := &state{ - logger: l, - userID: userID, - replicationFactor: rf, - replicator: re, - store: st, - states: make(map[string]cluster.State, 2), // we use two, one for the notifications and one for silences. - msgc: make(chan *clusterpb.Part), - reg: r, - settleReadTimeout: defaultSettleReadTimeout, - storeReadTimeout: defaultStoreReadTimeout, - partialStateMergesTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "alertmanager_partial_state_merges_total", - Help: "Number of times we have received a partial state to merge for a key.", - }, []string{"key"}), - partialStateMergesFailed: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "alertmanager_partial_state_merges_failed_total", - Help: "Number of times we have failed to merge a partial state received for a key.", - }, []string{"key"}), - stateReplicationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "alertmanager_state_replication_total", - Help: "Number of times we have tried to replicate a state to other alertmanagers.", - }, []string{"key"}), - stateReplicationFailed: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "alertmanager_state_replication_failed_total", - Help: "Number of times we have failed to replicate a state to other alertmanagers.", - }, []string{"key"}), - fetchReplicaStateTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "alertmanager_state_fetch_replica_state_total", - Help: "Number of times we have tried to read and merge the full state from another replica.", - }), - fetchReplicaStateFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "alertmanager_state_fetch_replica_state_failed_total", - Help: "Number of times we have failed to read and merge the full state from another replica.", - }), - initialSyncTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "alertmanager_state_initial_sync_total", - Help: "Number of times we have tried to sync initial state from peers or remote storage.", - }), - initialSyncCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "alertmanager_state_initial_sync_completed_total", - Help: "Number of times we have completed syncing initial state for each possible outcome.", - }, []string{"outcome"}), - initialSyncDuration: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "alertmanager_state_initial_sync_duration_seconds", - Help: "Time spent syncing initial state from peers or remote storage.", - Buckets: prometheus.ExponentialBuckets(0.008, 4, 7), - }), - } - s.initialSyncCompleted.WithLabelValues(syncFromReplica) - s.initialSyncCompleted.WithLabelValues(syncFromStorage) - s.initialSyncCompleted.WithLabelValues(syncUserNotFound) - s.initialSyncCompleted.WithLabelValues(syncFailed) - - s.Service = services.NewBasicService(s.starting, s.running, nil) - - return s -} - -// AddState adds a new state that will be replicated using the ReplicationFunc. It returns a channel to which the client can broadcast messages of the state to be sent. -func (s *state) AddState(key string, cs cluster.State, _ prometheus.Registerer) cluster.ClusterChannel { - s.mtx.Lock() - defer s.mtx.Unlock() - - s.states[key] = cs - - s.partialStateMergesTotal.WithLabelValues(key) - s.partialStateMergesFailed.WithLabelValues(key) - s.stateReplicationTotal.WithLabelValues(key) - s.stateReplicationFailed.WithLabelValues(key) - - return &stateChannel{ - s: s, - key: key, - } -} - -// MergePartialState merges a received partial message with an internal state. -func (s *state) MergePartialState(p *clusterpb.Part) error { - s.partialStateMergesTotal.WithLabelValues(p.Key).Inc() - - s.mtx.Lock() - defer s.mtx.Unlock() - st, ok := s.states[p.Key] - if !ok { - s.partialStateMergesFailed.WithLabelValues(p.Key).Inc() - return fmt.Errorf("key not found while merging") - } - - if err := st.Merge(p.Data); err != nil { - s.partialStateMergesFailed.WithLabelValues(p.Key).Inc() - return err - } - - return nil -} - -// Position helps in determining how long should we wait before sending a notification based on the number of replicas. -func (s *state) Position() int { - return s.replicator.GetPositionForUser(s.userID) -} - -// GetFullState returns the full internal state. -func (s *state) GetFullState() (*clusterpb.FullState, error) { - s.mtx.Lock() - defer s.mtx.Unlock() - - all := &clusterpb.FullState{ - Parts: make([]clusterpb.Part, 0, len(s.states)), - } - - for key, s := range s.states { - b, err := s.MarshalBinary() - if err != nil { - return nil, errors.Wrapf(err, "failed to encode state for key: %v", key) - } - all.Parts = append(all.Parts, clusterpb.Part{Key: key, Data: b}) - } - - return all, nil -} - -// starting waits until the alertmanagers are ready (and sets the appropriate internal state when it is). -// The idea is that we don't want to start working" before we get a chance to know most of the notifications and/or silences. -func (s *state) starting(ctx context.Context) error { - s.initialSyncTotal.Inc() - timer := prometheus.NewTimer(s.initialSyncDuration) - defer timer.ObserveDuration() - - level.Info(s.logger).Log("msg", "Waiting for notification and silences to settle...") - - // If the replication factor is <= 1, there is nowhere to obtain the state from. - if s.replicationFactor <= 1 { - level.Info(s.logger).Log("msg", "skipping settling (no replicas)") - return nil - } - - // We can check other alertmanager(s) and explicitly ask them to propagate their state to us if available. - readCtx, cancel := context.WithTimeout(ctx, s.settleReadTimeout) - defer cancel() - - s.fetchReplicaStateTotal.Inc() - fullStates, err := s.replicator.ReadFullStateForUser(readCtx, s.userID) - if err == nil { - if err = s.mergeFullStates(fullStates); err == nil { - level.Info(s.logger).Log("msg", "state settled; proceeding") - s.initialSyncCompleted.WithLabelValues(syncFromReplica).Inc() - return nil - } - } - s.fetchReplicaStateFailed.Inc() - - level.Info(s.logger).Log("msg", "state not settled; trying to read from storage", "err", err) - - // Attempt to read the state from persistent storage instead. - storeReadCtx, cancel := context.WithTimeout(ctx, s.storeReadTimeout) - defer cancel() - - fullState, err := s.store.GetFullState(storeReadCtx, s.userID) - if errors.Is(err, alertspb.ErrNotFound) { - level.Info(s.logger).Log("msg", "no state for user in storage; proceeding", "user", s.userID) - s.initialSyncCompleted.WithLabelValues(syncUserNotFound).Inc() - return nil - } - if err == nil { - if err = s.mergeFullStates([]*clusterpb.FullState{fullState.State}); err == nil { - level.Info(s.logger).Log("msg", "state read from storage; proceeding") - s.initialSyncCompleted.WithLabelValues(syncFromStorage).Inc() - return nil - } - } - - level.Warn(s.logger).Log("msg", "failed to read state from storage; continuing anyway", "err", err) - s.initialSyncCompleted.WithLabelValues(syncFailed).Inc() - - return nil -} - -// WaitReady is needed for the pipeline builder to know whenever we've settled and the state is up to date. -func (s *state) WaitReady(ctx context.Context) error { - return s.Service.AwaitRunning(ctx) -} - -func (s *state) Ready() bool { - return s.Service.State() == services.Running -} - -// mergeFullStates attempts to merge all full states received from peers during settling. -func (s *state) mergeFullStates(fs []*clusterpb.FullState) error { - s.mtx.Lock() - defer s.mtx.Unlock() - - for _, f := range fs { - for _, p := range f.Parts { - level.Debug(s.logger).Log("msg", "merging full state", "user", s.userID, "key", p.Key, "bytes", len(p.Data)) - - st, ok := s.states[p.Key] - if !ok { - level.Error(s.logger).Log("msg", "key not found while merging full state", "user", s.userID, "key", p.Key) - continue - } - - if err := st.Merge(p.Data); err != nil { - return errors.Wrapf(err, "failed to merge part of full state for key: %v", p.Key) - } - } - } - - return nil -} - -func (s *state) running(ctx context.Context) error { - for { - select { - case p := <-s.msgc: - // If the replication factor is <= 1, we don't need to replicate any state anywhere else. - if s.replicationFactor <= 1 { - return nil - } - - s.stateReplicationTotal.WithLabelValues(p.Key).Inc() - if err := s.replicator.ReplicateStateForUser(ctx, s.userID, p); err != nil { - s.stateReplicationFailed.WithLabelValues(p.Key).Inc() - level.Error(s.logger).Log("msg", "failed to replicate state to other alertmanagers", "user", s.userID, "key", p.Key, "err", err) - } - case <-ctx.Done(): - return nil - } - } -} - -func (s *state) broadcast(key string, b []byte) { - // We should ignore the Merges into the initial state during settling. - if s.Ready() { - s.msgc <- &clusterpb.Part{Key: key, Data: b} - } -} - -// stateChannel allows a state publisher to send messages that will be broadcasted to all other alertmanagers that a tenant -// belongs to. -type stateChannel struct { - s *state - key string -} - -// Broadcast receives a message to be replicated by the state. -func (c *stateChannel) Broadcast(b []byte) { - c.s.broadcast(c.key, b) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go deleted file mode 100644 index ced768544..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ /dev/null @@ -1,426 +0,0 @@ -package api - -import ( - "context" - "flag" - "net/http" - "path" - "strings" - "time" - - "github.com/NYTimes/gziphandler" - "github.com/felixge/fgprof" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/storage" - "github.com/weaveworks/common/middleware" - "github.com/weaveworks/common/server" - - "github.com/cortexproject/cortex/pkg/alertmanager" - "github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb" - "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/compactor" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/distributor" - "github.com/cortexproject/cortex/pkg/distributor/distributorpb" - frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" - "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" - frontendv2 "github.com/cortexproject/cortex/pkg/frontend/v2" - "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ruler" - "github.com/cortexproject/cortex/pkg/scheduler" - "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" - "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" - "github.com/cortexproject/cortex/pkg/util/push" -) - -// DistributorPushWrapper wraps around a push. It is similar to middleware.Interface. -type DistributorPushWrapper func(next push.Func) push.Func -type ConfigHandler func(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc - -type Config struct { - ResponseCompression bool `yaml:"response_compression_enabled"` - - AlertmanagerHTTPPrefix string `yaml:"alertmanager_http_prefix"` - PrometheusHTTPPrefix string `yaml:"prometheus_http_prefix"` - - // The following configs are injected by the upstream caller. - ServerPrefix string `yaml:"-"` - LegacyHTTPPrefix string `yaml:"-"` - HTTPAuthMiddleware middleware.Interface `yaml:"-"` - - // This allows downstream projects to wrap the distributor push function - // and access the deserialized write requests before/after they are pushed. - DistributorPushWrapper DistributorPushWrapper `yaml:"-"` - - // The CustomConfigHandler allows for providing a different handler for the - // `/config` endpoint. If this field is set _before_ the API module is - // initialized, the custom config handler will be used instead of - // DefaultConfigHandler. - CustomConfigHandler ConfigHandler `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.ResponseCompression, "api.response-compression-enabled", false, "Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression.") - cfg.RegisterFlagsWithPrefix("", f) -} - -// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet with the set prefix. -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.AlertmanagerHTTPPrefix, prefix+"http.alertmanager-http-prefix", "/alertmanager", "HTTP URL path under which the Alertmanager ui and api will be served.") - f.StringVar(&cfg.PrometheusHTTPPrefix, prefix+"http.prometheus-http-prefix", "/prometheus", "HTTP URL path under which the Prometheus api will be served.") -} - -// Push either wraps the distributor push function as configured or returns the distributor push directly. -func (cfg *Config) wrapDistributorPush(d *distributor.Distributor) push.Func { - if cfg.DistributorPushWrapper != nil { - return cfg.DistributorPushWrapper(d.Push) - } - - return d.Push -} - -type API struct { - AuthMiddleware middleware.Interface - - cfg Config - server *server.Server - logger log.Logger - sourceIPs *middleware.SourceIPExtractor - indexPage *IndexPageContent -} - -func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logger) (*API, error) { - // Ensure the encoded path is used. Required for the rules API - s.HTTP.UseEncodedPath() - - var sourceIPs *middleware.SourceIPExtractor - if serverCfg.LogSourceIPs { - var err error - sourceIPs, err = middleware.NewSourceIPs(serverCfg.LogSourceIPsHeader, serverCfg.LogSourceIPsRegex) - if err != nil { - // This should have already been caught in the Server creation - return nil, err - } - } - - api := &API{ - cfg: cfg, - AuthMiddleware: cfg.HTTPAuthMiddleware, - server: s, - logger: logger, - sourceIPs: sourceIPs, - indexPage: newIndexPageContent(), - } - - // If no authentication middleware is present in the config, use the default authentication middleware. - if cfg.HTTPAuthMiddleware == nil { - api.AuthMiddleware = middleware.AuthenticateUser - } - - return api, nil -} - -// RegisterRoute registers a single route enforcing HTTP methods. A single -// route is expected to be specific about which HTTP methods are supported. -func (a *API) RegisterRoute(path string, handler http.Handler, auth bool, method string, methods ...string) { - methods = append([]string{method}, methods...) - - level.Debug(a.logger).Log("msg", "api: registering route", "methods", strings.Join(methods, ","), "path", path, "auth", auth) - - if auth { - handler = a.AuthMiddleware.Wrap(handler) - } - - if a.cfg.ResponseCompression { - handler = gziphandler.GzipHandler(handler) - } - - if len(methods) == 0 { - a.server.HTTP.Path(path).Handler(handler) - return - } - a.server.HTTP.Path(path).Methods(methods...).Handler(handler) -} - -func (a *API) RegisterRoutesWithPrefix(prefix string, handler http.Handler, auth bool, methods ...string) { - level.Debug(a.logger).Log("msg", "api: registering route", "methods", strings.Join(methods, ","), "prefix", prefix, "auth", auth) - if auth { - handler = a.AuthMiddleware.Wrap(handler) - } - - if a.cfg.ResponseCompression { - handler = gziphandler.GzipHandler(handler) - } - - if len(methods) == 0 { - a.server.HTTP.PathPrefix(prefix).Handler(handler) - return - } - a.server.HTTP.PathPrefix(prefix).Methods(methods...).Handler(handler) -} - -// RegisterAlertmanager registers endpoints associated with the alertmanager. It will only -// serve endpoints using the legacy http-prefix if it is not run as a single binary. -func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, target, apiEnabled bool) { - alertmanagerpb.RegisterAlertmanagerServer(a.server.GRPC, am) - - a.indexPage.AddLink(SectionAdminEndpoints, "/multitenant_alertmanager/status", "Alertmanager Status") - a.indexPage.AddLink(SectionAdminEndpoints, "/multitenant_alertmanager/ring", "Alertmanager Ring Status") - // Ensure this route is registered before the prefixed AM route - a.RegisterRoute("/multitenant_alertmanager/status", am.GetStatusHandler(), false, "GET") - a.RegisterRoute("/multitenant_alertmanager/configs", http.HandlerFunc(am.ListAllConfigs), false, "GET") - a.RegisterRoute("/multitenant_alertmanager/ring", http.HandlerFunc(am.RingHandler), false, "GET", "POST") - a.RegisterRoute("/multitenant_alertmanager/delete_tenant_config", http.HandlerFunc(am.DeleteUserConfig), true, "POST") - - // UI components lead to a large number of routes to support, utilize a path prefix instead - a.RegisterRoutesWithPrefix(a.cfg.AlertmanagerHTTPPrefix, am, true) - level.Debug(a.logger).Log("msg", "api: registering alertmanager", "path_prefix", a.cfg.AlertmanagerHTTPPrefix) - - // MultiTenant Alertmanager Experimental API routes - if apiEnabled { - a.RegisterRoute("/api/v1/alerts", http.HandlerFunc(am.GetUserConfig), true, "GET") - a.RegisterRoute("/api/v1/alerts", http.HandlerFunc(am.SetUserConfig), true, "POST") - a.RegisterRoute("/api/v1/alerts", http.HandlerFunc(am.DeleteUserConfig), true, "DELETE") - } - - // If the target is Alertmanager, enable the legacy behaviour. Otherwise only enable - // the component routed API. - if target { - a.RegisterRoute("/status", am.GetStatusHandler(), false, "GET") - // WARNING: If LegacyHTTPPrefix is an empty string, any other paths added after this point will be - // silently ignored by the HTTP service. Therefore, this must be the last route to be configured. - a.RegisterRoutesWithPrefix(a.cfg.LegacyHTTPPrefix, am, true) - } -} - -// RegisterAPI registers the standard endpoints associated with a running Cortex. -func (a *API) RegisterAPI(httpPathPrefix string, actualCfg interface{}, defaultCfg interface{}) { - a.indexPage.AddLink(SectionAdminEndpoints, "/config", "Current Config (including the default values)") - a.indexPage.AddLink(SectionAdminEndpoints, "/config?mode=diff", "Current Config (show only values that differ from the defaults)") - - a.RegisterRoute("/config", a.cfg.configHandler(actualCfg, defaultCfg), false, "GET") - a.RegisterRoute("/", indexHandler(httpPathPrefix, a.indexPage), false, "GET") - a.RegisterRoute("/debug/fgprof", fgprof.Handler(), false, "GET") -} - -// RegisterRuntimeConfig registers the endpoints associates with the runtime configuration -func (a *API) RegisterRuntimeConfig(runtimeConfigHandler http.HandlerFunc) { - a.indexPage.AddLink(SectionAdminEndpoints, "/runtime_config", "Current Runtime Config (incl. Overrides)") - a.indexPage.AddLink(SectionAdminEndpoints, "/runtime_config?mode=diff", "Current Runtime Config (show only values that differ from the defaults)") - - a.RegisterRoute("/runtime_config", runtimeConfigHandler, false, "GET") -} - -// RegisterDistributor registers the endpoints associated with the distributor. -func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config) { - distributorpb.RegisterDistributorServer(a.server.GRPC, d) - - a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") - - a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ring", "Distributor Ring Status") - a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/all_user_stats", "Usage Statistics") - a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ha_tracker", "HA Tracking Status") - - a.RegisterRoute("/distributor/ring", d, false, "GET", "POST") - a.RegisterRoute("/distributor/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false, "GET") - a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false, "GET") - - // Legacy Routes - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") - a.RegisterRoute("/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false, "GET") - a.RegisterRoute("/ha-tracker", d.HATracker, false, "GET") -} - -// Ingester is defined as an interface to allow for alternative implementations -// of ingesters to be passed into the API.RegisterIngester() method. -type Ingester interface { - client.IngesterServer - FlushHandler(http.ResponseWriter, *http.Request) - ShutdownHandler(http.ResponseWriter, *http.Request) - Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) -} - -// RegisterIngester registers the ingesters HTTP and GRPC service -func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) { - client.RegisterIngesterServer(a.server.GRPC, i) - - a.indexPage.AddLink(SectionDangerous, "/ingester/flush", "Trigger a Flush of data from Ingester to storage") - a.indexPage.AddLink(SectionDangerous, "/ingester/shutdown", "Trigger Ingester Shutdown (Dangerous)") - a.RegisterRoute("/ingester/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") - a.RegisterRoute("/ingester/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") - a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. - - // Legacy Routes - a.RegisterRoute("/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") - a.RegisterRoute("/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") - a.RegisterRoute("/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. -} - -// RegisterChunksPurger registers the endpoints associated with the Purger/DeleteStore. They do not exactly -// match the Prometheus API but mirror it closely enough to justify their routing under the Prometheus -// component/ -func (a *API) RegisterChunksPurger(store *purger.DeleteStore, deleteRequestCancelPeriod time.Duration) { - deleteRequestHandler := purger.NewDeleteRequestHandler(store, deleteRequestCancelPeriod, prometheus.DefaultRegisterer) - - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/admin/tsdb/delete_series"), http.HandlerFunc(deleteRequestHandler.AddDeleteRequestHandler), true, "PUT", "POST") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/admin/tsdb/delete_series"), http.HandlerFunc(deleteRequestHandler.GetAllDeleteRequestsHandler), true, "GET") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/admin/tsdb/cancel_delete_request"), http.HandlerFunc(deleteRequestHandler.CancelDeleteRequestHandler), true, "PUT", "POST") - - // Legacy Routes - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/admin/tsdb/delete_series"), http.HandlerFunc(deleteRequestHandler.AddDeleteRequestHandler), true, "PUT", "POST") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/admin/tsdb/delete_series"), http.HandlerFunc(deleteRequestHandler.GetAllDeleteRequestsHandler), true, "GET") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/admin/tsdb/cancel_delete_request"), http.HandlerFunc(deleteRequestHandler.CancelDeleteRequestHandler), true, "PUT", "POST") -} - -func (a *API) RegisterTenantDeletion(api *purger.TenantDeletionAPI) { - a.RegisterRoute("/purger/delete_tenant", http.HandlerFunc(api.DeleteTenant), true, "POST") - a.RegisterRoute("/purger/delete_tenant_status", http.HandlerFunc(api.DeleteTenantStatus), true, "GET") -} - -// RegisterRuler registers routes associated with the Ruler service. -func (a *API) RegisterRuler(r *ruler.Ruler) { - a.indexPage.AddLink(SectionAdminEndpoints, "/ruler/ring", "Ruler Ring Status") - a.RegisterRoute("/ruler/ring", r, false, "GET", "POST") - - // Administrative API, uses authentication to inform which user's configuration to delete. - a.RegisterRoute("/ruler/delete_tenant_config", http.HandlerFunc(r.DeleteTenantConfiguration), true, "POST") - - // Legacy Ring Route - a.RegisterRoute("/ruler_ring", r, false, "GET", "POST") - - // List all user rule groups - a.RegisterRoute("/ruler/rule_groups", http.HandlerFunc(r.ListAllRules), false, "GET") - - ruler.RegisterRulerServer(a.server.GRPC, r) -} - -// RegisterRulerAPI registers routes associated with the Ruler API -func (a *API) RegisterRulerAPI(r *ruler.API) { - // Prometheus Rule API Routes - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/rules"), http.HandlerFunc(r.PrometheusRules), true, "GET") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/alerts"), http.HandlerFunc(r.PrometheusAlerts), true, "GET") - - // Ruler API Routes - a.RegisterRoute("/api/v1/rules", http.HandlerFunc(r.ListRules), true, "GET") - a.RegisterRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.ListRules), true, "GET") - a.RegisterRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.GetRuleGroup), true, "GET") - a.RegisterRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.CreateRuleGroup), true, "POST") - a.RegisterRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.DeleteRuleGroup), true, "DELETE") - a.RegisterRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.DeleteNamespace), true, "DELETE") - - // Legacy Prometheus Rule API Routes - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/rules"), http.HandlerFunc(r.PrometheusRules), true, "GET") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/alerts"), http.HandlerFunc(r.PrometheusAlerts), true, "GET") - - // Legacy Ruler API Routes - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/rules"), http.HandlerFunc(r.ListRules), true, "GET") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/rules/{namespace}"), http.HandlerFunc(r.ListRules), true, "GET") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/rules/{namespace}/{groupName}"), http.HandlerFunc(r.GetRuleGroup), true, "GET") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/rules/{namespace}"), http.HandlerFunc(r.CreateRuleGroup), true, "POST") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/rules/{namespace}/{groupName}"), http.HandlerFunc(r.DeleteRuleGroup), true, "DELETE") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/rules/{namespace}"), http.HandlerFunc(r.DeleteNamespace), true, "DELETE") -} - -// RegisterRing registers the ring UI page associated with the distributor for writes. -func (a *API) RegisterRing(r *ring.Ring) { - a.indexPage.AddLink(SectionAdminEndpoints, "/ingester/ring", "Ingester Ring Status") - a.RegisterRoute("/ingester/ring", r, false, "GET", "POST") - - // Legacy Route - a.RegisterRoute("/ring", r, false, "GET", "POST") -} - -// RegisterStoreGateway registers the ring UI page associated with the store-gateway. -func (a *API) RegisterStoreGateway(s *storegateway.StoreGateway) { - storegatewaypb.RegisterStoreGatewayServer(a.server.GRPC, s) - - a.indexPage.AddLink(SectionAdminEndpoints, "/store-gateway/ring", "Store Gateway Ring") - a.RegisterRoute("/store-gateway/ring", http.HandlerFunc(s.RingHandler), false, "GET", "POST") -} - -// RegisterCompactor registers the ring UI page associated with the compactor. -func (a *API) RegisterCompactor(c *compactor.Compactor) { - a.indexPage.AddLink(SectionAdminEndpoints, "/compactor/ring", "Compactor Ring Status") - a.RegisterRoute("/compactor/ring", http.HandlerFunc(c.RingHandler), false, "GET", "POST") -} - -type Distributor interface { - querier.Distributor - UserStatsHandler(w http.ResponseWriter, r *http.Request) -} - -// RegisterQueryable registers the the default routes associated with the querier -// module. -func (a *API) RegisterQueryable( - queryable storage.SampleAndChunkQueryable, - distributor Distributor, -) { - // these routes are always registered to the default server - a.RegisterRoute("/api/v1/user_stats", http.HandlerFunc(distributor.UserStatsHandler), true, "GET") - a.RegisterRoute("/api/v1/chunks", querier.ChunksHandler(queryable), true, "GET") - - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/user_stats"), http.HandlerFunc(distributor.UserStatsHandler), true, "GET") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/chunks"), querier.ChunksHandler(queryable), true, "GET") -} - -// RegisterQueryAPI registers the Prometheus API routes with the provided handler. -func (a *API) RegisterQueryAPI(handler http.Handler) { - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/read"), handler, true, "POST") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/query"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/query_range"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/query_exemplars"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/labels"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/label/{name}/values"), handler, true, "GET") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/series"), handler, true, "GET", "POST", "DELETE") - a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/metadata"), handler, true, "GET") - - // Register Legacy Routers - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/read"), handler, true, "POST") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/query"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/query_range"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/query_exemplars"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/labels"), handler, true, "GET", "POST") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/label/{name}/values"), handler, true, "GET") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/series"), handler, true, "GET", "POST", "DELETE") - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/metadata"), handler, true, "GET") -} - -// RegisterQueryFrontend registers the Prometheus routes supported by the -// Cortex querier service. Currently this can not be registered simultaneously -// with the Querier. -func (a *API) RegisterQueryFrontendHandler(h http.Handler) { - a.RegisterQueryAPI(h) -} - -func (a *API) RegisterQueryFrontend1(f *frontendv1.Frontend) { - frontendv1pb.RegisterFrontendServer(a.server.GRPC, f) -} - -func (a *API) RegisterQueryFrontend2(f *frontendv2.Frontend) { - frontendv2pb.RegisterFrontendForQuerierServer(a.server.GRPC, f) -} - -func (a *API) RegisterQueryScheduler(f *scheduler.Scheduler) { - schedulerpb.RegisterSchedulerForFrontendServer(a.server.GRPC, f) - schedulerpb.RegisterSchedulerForQuerierServer(a.server.GRPC, f) -} - -// RegisterServiceMapHandler registers the Cortex structs service handler -// TODO: Refactor this code to be accomplished using the services.ServiceManager -// or a future module manager #2291 -func (a *API) RegisterServiceMapHandler(handler http.Handler) { - a.indexPage.AddLink(SectionAdminEndpoints, "/services", "Service Status") - a.RegisterRoute("/services", handler, false, "GET") -} - -func (a *API) RegisterMemberlistKV(handler http.Handler) { - a.indexPage.AddLink(SectionAdminEndpoints, "/memberlist", "Memberlist Status") - a.RegisterRoute("/memberlist", handler, false, "GET") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go deleted file mode 100644 index b775573e8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go +++ /dev/null @@ -1,275 +0,0 @@ -package api - -import ( - "context" - "html/template" - "net/http" - "path" - "sync" - - "github.com/go-kit/log" - "github.com/gorilla/mux" - "github.com/grafana/regexp" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/route" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - v1 "github.com/prometheus/prometheus/web/api/v1" - "github.com/weaveworks/common/instrument" - "github.com/weaveworks/common/middleware" - - "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/util" -) - -const ( - SectionAdminEndpoints = "Admin Endpoints:" - SectionDangerous = "Dangerous:" -) - -func newIndexPageContent() *IndexPageContent { - return &IndexPageContent{ - content: map[string]map[string]string{}, - } -} - -// IndexPageContent is a map of sections to path -> description. -type IndexPageContent struct { - mu sync.Mutex - content map[string]map[string]string -} - -func (pc *IndexPageContent) AddLink(section, path, description string) { - pc.mu.Lock() - defer pc.mu.Unlock() - - sectionMap := pc.content[section] - if sectionMap == nil { - sectionMap = make(map[string]string) - pc.content[section] = sectionMap - } - - sectionMap[path] = description -} - -func (pc *IndexPageContent) GetContent() map[string]map[string]string { - pc.mu.Lock() - defer pc.mu.Unlock() - - result := map[string]map[string]string{} - for k, v := range pc.content { - sm := map[string]string{} - for smK, smV := range v { - sm[smK] = smV - } - result[k] = sm - } - return result -} - -var indexPageTemplate = ` - - - - - Cortex - - -

Cortex

- {{ range $s, $links := . }} -

{{ $s }}

-
    - {{ range $path, $desc := $links }} -
  • {{ $desc }}
  • - {{ end }} -
- {{ end }} - -` - -func indexHandler(httpPathPrefix string, content *IndexPageContent) http.HandlerFunc { - templ := template.New("main") - templ.Funcs(map[string]interface{}{ - "AddPathPrefix": func(link string) string { - return path.Join(httpPathPrefix, link) - }, - }) - template.Must(templ.Parse(indexPageTemplate)) - - return func(w http.ResponseWriter, r *http.Request) { - err := templ.Execute(w, content.GetContent()) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - } -} - -func (cfg *Config) configHandler(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc { - if cfg.CustomConfigHandler != nil { - return cfg.CustomConfigHandler(actualCfg, defaultCfg) - } - return DefaultConfigHandler(actualCfg, defaultCfg) -} - -func DefaultConfigHandler(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - var output interface{} - switch r.URL.Query().Get("mode") { - case "diff": - defaultCfgObj, err := util.YAMLMarshalUnmarshal(defaultCfg) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - actualCfgObj, err := util.YAMLMarshalUnmarshal(actualCfg) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - diff, err := util.DiffConfig(defaultCfgObj, actualCfgObj) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - output = diff - - case "defaults": - output = defaultCfg - default: - output = actualCfg - } - - util.WriteYAMLResponse(w, output) - } -} - -// NewQuerierHandler returns a HTTP handler that can be used by the querier service to -// either register with the frontend worker query processor or with the external HTTP -// server to fulfill the Prometheus query API. -func NewQuerierHandler( - cfg Config, - queryable storage.SampleAndChunkQueryable, - exemplarQueryable storage.ExemplarQueryable, - engine *promql.Engine, - distributor Distributor, - tombstonesLoader *purger.TombstonesLoader, - reg prometheus.Registerer, - logger log.Logger, -) http.Handler { - // Prometheus histograms for requests to the querier. - querierRequestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_request_duration_seconds", - Help: "Time (in seconds) spent serving HTTP requests to the querier.", - Buckets: instrument.DefBuckets, - }, []string{"method", "route", "status_code", "ws"}) - - receivedMessageSize := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_request_message_bytes", - Help: "Size (in bytes) of messages received in the request to the querier.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - - sentMessageSize := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_response_message_bytes", - Help: "Size (in bytes) of messages sent in response by the querier.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - - inflightRequests := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "querier_inflight_requests", - Help: "Current number of inflight requests to the querier.", - }, []string{"method", "route"}) - - api := v1.NewAPI( - engine, - querier.NewErrorTranslateSampleAndChunkQueryable(queryable), // Translate errors to errors expected by API. - nil, // No remote write support. - exemplarQueryable, - func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} }, - func(context.Context) v1.AlertmanagerRetriever { return &querier.DummyAlertmanagerRetriever{} }, - func() config.Config { return config.Config{} }, - map[string]string{}, // TODO: include configuration flags - v1.GlobalURLOptions{}, - func(f http.HandlerFunc) http.HandlerFunc { return f }, - nil, // Only needed for admin APIs. - "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. - false, // Disable admin APIs. - logger, - func(context.Context) v1.RulesRetriever { return &querier.DummyRulesRetriever{} }, - 0, 0, 0, // Remote read samples and concurrency limit. - false, - regexp.MustCompile(".*"), - func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, - &v1.PrometheusVersion{}, - // This is used for the stats API which we should not support. Or find other ways to. - prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }), - reg, - nil, - ) - - router := mux.NewRouter() - - // Use a separate metric for the querier in order to differentiate requests from the query-frontend when - // running Cortex as a single binary. - inst := middleware.Instrument{ - RouteMatcher: router, - Duration: querierRequestDuration, - RequestBodySize: receivedMessageSize, - ResponseBodySize: sentMessageSize, - InflightRequests: inflightRequests, - } - cacheGenHeaderMiddleware := getHTTPCacheGenNumberHeaderSetterMiddleware(tombstonesLoader) - middlewares := middleware.Merge(inst, cacheGenHeaderMiddleware) - router.Use(middlewares.Wrap) - - // Define the prefixes for all routes - prefix := path.Join(cfg.ServerPrefix, cfg.PrometheusHTTPPrefix) - legacyPrefix := path.Join(cfg.ServerPrefix, cfg.LegacyHTTPPrefix) - - promRouter := route.New().WithPrefix(path.Join(prefix, "/api/v1")) - api.Register(promRouter) - - legacyPromRouter := route.New().WithPrefix(path.Join(legacyPrefix, "/api/v1")) - api.Register(legacyPromRouter) - - // TODO(gotjosh): This custom handler is temporary until we're able to vendor the changes in: - // https://github.com/prometheus/prometheus/pull/7125/files - router.Path(path.Join(prefix, "/api/v1/metadata")).Handler(querier.MetadataHandler(distributor)) - router.Path(path.Join(prefix, "/api/v1/read")).Handler(querier.RemoteReadHandler(queryable, logger)) - router.Path(path.Join(prefix, "/api/v1/read")).Methods("POST").Handler(promRouter) - router.Path(path.Join(prefix, "/api/v1/query")).Methods("GET", "POST").Handler(promRouter) - router.Path(path.Join(prefix, "/api/v1/query_range")).Methods("GET", "POST").Handler(promRouter) - router.Path(path.Join(prefix, "/api/v1/query_exemplars")).Methods("GET", "POST").Handler(promRouter) - router.Path(path.Join(prefix, "/api/v1/labels")).Methods("GET", "POST").Handler(promRouter) - router.Path(path.Join(prefix, "/api/v1/label/{name}/values")).Methods("GET").Handler(promRouter) - router.Path(path.Join(prefix, "/api/v1/series")).Methods("GET", "POST", "DELETE").Handler(promRouter) - router.Path(path.Join(prefix, "/api/v1/metadata")).Methods("GET").Handler(promRouter) - - // TODO(gotjosh): This custom handler is temporary until we're able to vendor the changes in: - // https://github.com/prometheus/prometheus/pull/7125/files - router.Path(path.Join(legacyPrefix, "/api/v1/metadata")).Handler(querier.MetadataHandler(distributor)) - router.Path(path.Join(legacyPrefix, "/api/v1/read")).Handler(querier.RemoteReadHandler(queryable, logger)) - router.Path(path.Join(legacyPrefix, "/api/v1/read")).Methods("POST").Handler(legacyPromRouter) - router.Path(path.Join(legacyPrefix, "/api/v1/query")).Methods("GET", "POST").Handler(legacyPromRouter) - router.Path(path.Join(legacyPrefix, "/api/v1/query_range")).Methods("GET", "POST").Handler(legacyPromRouter) - router.Path(path.Join(legacyPrefix, "/api/v1/query_exemplars")).Methods("GET", "POST").Handler(legacyPromRouter) - router.Path(path.Join(legacyPrefix, "/api/v1/labels")).Methods("GET", "POST").Handler(legacyPromRouter) - router.Path(path.Join(legacyPrefix, "/api/v1/label/{name}/values")).Methods("GET").Handler(legacyPromRouter) - router.Path(path.Join(legacyPrefix, "/api/v1/series")).Methods("GET", "POST", "DELETE").Handler(legacyPromRouter) - router.Path(path.Join(legacyPrefix, "/api/v1/metadata")).Methods("GET").Handler(legacyPromRouter) - - // Track execution time. - return stats.NewWallTimeMiddleware().Wrap(router) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go b/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go deleted file mode 100644 index 7e0e88e80..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go +++ /dev/null @@ -1,29 +0,0 @@ -package api - -import ( - "net/http" - - "github.com/weaveworks/common/middleware" - - "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/querier/queryrange" - "github.com/cortexproject/cortex/pkg/tenant" -) - -// middleware for setting cache gen header to let consumer of response know all previous responses could be invalid due to delete operation -func getHTTPCacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader *purger.TombstonesLoader) middleware.Interface { - return middleware.Func(func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - tenantIDs, err := tenant.TenantIDs(r.Context()) - if err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - cacheGenNumber := cacheGenNumbersLoader.GetResultsCacheGenNumber(tenantIDs) - - w.Header().Set(queryrange.ResultsCacheGenNumberHeaderName, cacheGenNumber) - next.ServeHTTP(w, r) - }) - }) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/authenticator.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/authenticator.go deleted file mode 100644 index d13741e7b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/authenticator.go +++ /dev/null @@ -1,43 +0,0 @@ -package cassandra - -import ( - "fmt" - - "github.com/gocql/gocql" -) - -// CustomPasswordAuthenticator provides the default behaviour for Username/Password authentication with -// Cassandra while allowing users to specify a non-default Authenticator to accept. -type CustomPasswordAuthenticator struct { - ApprovedAuthenticators []string - Username string - Password string -} - -func (p CustomPasswordAuthenticator) approve(authenticator string) bool { - for _, s := range p.ApprovedAuthenticators { - if authenticator == s { - return true - } - } - return false -} - -// Challenge verifies the name of the authenticator and formats the provided username and password -// into a response -func (p CustomPasswordAuthenticator) Challenge(req []byte) ([]byte, gocql.Authenticator, error) { - if !p.approve(string(req)) { - return nil, nil, fmt.Errorf("unexpected authenticator %q", req) - } - resp := make([]byte, 2+len(p.Username)+len(p.Password)) - resp[0] = 0 - copy(resp[1:], p.Username) - resp[len(p.Username)+1] = 0 - copy(resp[2+len(p.Username):], p.Password) - return resp, nil, nil -} - -// Success returns nil by default, identical to the default PasswordAuthenticator -func (p CustomPasswordAuthenticator) Success(data []byte) error { - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go deleted file mode 100644 index b55cb16af..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go +++ /dev/null @@ -1,76 +0,0 @@ -package cassandra - -import ( - "context" - "io" - "os" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/testutils" - "github.com/cortexproject/cortex/pkg/util/flagext" -) - -// GOCQL doesn't provide nice mocks, so we use a real Cassandra instance. -// To enable these tests: -// $ docker run -d --name cassandra --rm -p 9042:9042 cassandra:3.11 -// $ CASSANDRA_TEST_ADDRESSES=localhost:9042 go test ./pkg/chunk/storage - -type fixture struct { - name string - addresses string -} - -func (f *fixture) Name() string { - return f.name -} - -func (f *fixture) Clients() (chunk.IndexClient, chunk.Client, chunk.TableClient, chunk.SchemaConfig, io.Closer, error) { - var cfg Config - flagext.DefaultValues(&cfg) - cfg.Addresses = f.addresses - cfg.Keyspace = "test" - cfg.Consistency = "QUORUM" - cfg.ReplicationFactor = 1 - - // Get a SchemaConfig with the defaults. - schemaConfig := testutils.DefaultSchemaConfig("cassandra") - - storageClient, err := NewStorageClient(cfg, schemaConfig, nil) - if err != nil { - return nil, nil, nil, schemaConfig, nil, err - } - - objectClient, err := NewObjectClient(cfg, schemaConfig, nil) - if err != nil { - return nil, nil, nil, schemaConfig, nil, err - } - - tableClient, err := NewTableClient(context.Background(), cfg, nil) - if err != nil { - return nil, nil, nil, schemaConfig, nil, err - } - - closer := testutils.CloserFunc(func() error { - storageClient.Stop() - objectClient.Stop() - tableClient.Stop() - return nil - }) - - return storageClient, objectClient, tableClient, schemaConfig, closer, nil -} - -// Fixtures for unit testing Cassandra integration. -func Fixtures() []testutils.Fixture { - addresses := os.Getenv("CASSANDRA_TEST_ADDRESSES") - if addresses == "" { - return nil - } - - return []testutils.Fixture{ - &fixture{ - name: "Cassandra", - addresses: addresses, - }, - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/instrumentation.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/instrumentation.go deleted file mode 100644 index 045b51ef6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/instrumentation.go +++ /dev/null @@ -1,38 +0,0 @@ -package cassandra - -import ( - "context" - "strings" - - "github.com/gocql/gocql" - "github.com/prometheus/client_golang/prometheus" -) - -var requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "cassandra_request_duration_seconds", - Help: "Time spent doing Cassandra requests.", - Buckets: prometheus.ExponentialBuckets(0.001, 4, 9), -}, []string{"operation", "status_code"}) - -func init() { - prometheus.MustRegister(requestDuration) -} - -type observer struct{} - -func err(err error) string { - if err != nil { - return "500" - } - return "200" -} - -func (observer) ObserveBatch(ctx context.Context, b gocql.ObservedBatch) { - requestDuration.WithLabelValues("BATCH", err(b.Err)).Observe(b.End.Sub(b.Start).Seconds()) -} - -func (observer) ObserveQuery(cts context.Context, q gocql.ObservedQuery) { - parts := strings.SplitN(q.Statement, " ", 2) - requestDuration.WithLabelValues(parts[0], err(q.Err)).Observe(q.End.Sub(q.Start).Seconds()) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go deleted file mode 100644 index 5283242e8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ /dev/null @@ -1,565 +0,0 @@ -package cassandra - -import ( - "bytes" - "context" - "crypto/tls" - "flag" - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gocql/gocql" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/sync/semaphore" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// Config for a StorageClient -type Config struct { - Addresses string `yaml:"addresses"` - Port int `yaml:"port"` - Keyspace string `yaml:"keyspace"` - Consistency string `yaml:"consistency"` - ReplicationFactor int `yaml:"replication_factor"` - DisableInitialHostLookup bool `yaml:"disable_initial_host_lookup"` - SSL bool `yaml:"SSL"` - HostVerification bool `yaml:"host_verification"` - HostSelectionPolicy string `yaml:"host_selection_policy"` - CAPath string `yaml:"CA_path"` - CertPath string `yaml:"tls_cert_path"` - KeyPath string `yaml:"tls_key_path"` - Auth bool `yaml:"auth"` - Username string `yaml:"username"` - Password flagext.Secret `yaml:"password"` - PasswordFile string `yaml:"password_file"` - CustomAuthenticators flagext.StringSlice `yaml:"custom_authenticators"` - Timeout time.Duration `yaml:"timeout"` - ConnectTimeout time.Duration `yaml:"connect_timeout"` - ReconnectInterval time.Duration `yaml:"reconnect_interval"` - Retries int `yaml:"max_retries"` - MaxBackoff time.Duration `yaml:"retry_max_backoff"` - MinBackoff time.Duration `yaml:"retry_min_backoff"` - QueryConcurrency int `yaml:"query_concurrency"` - NumConnections int `yaml:"num_connections"` - ConvictHosts bool `yaml:"convict_hosts_on_failure"` - TableOptions string `yaml:"table_options"` -} - -const ( - HostPolicyRoundRobin = "round-robin" - HostPolicyTokenAware = "token-aware" -) - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Addresses, "cassandra.addresses", "", "Comma-separated hostnames or IPs of Cassandra instances.") - f.IntVar(&cfg.Port, "cassandra.port", 9042, "Port that Cassandra is running on") - f.StringVar(&cfg.Keyspace, "cassandra.keyspace", "", "Keyspace to use in Cassandra.") - f.StringVar(&cfg.Consistency, "cassandra.consistency", "QUORUM", "Consistency level for Cassandra.") - f.IntVar(&cfg.ReplicationFactor, "cassandra.replication-factor", 3, "Replication factor to use in Cassandra.") - f.BoolVar(&cfg.DisableInitialHostLookup, "cassandra.disable-initial-host-lookup", false, "Instruct the cassandra driver to not attempt to get host info from the system.peers table.") - f.BoolVar(&cfg.SSL, "cassandra.ssl", false, "Use SSL when connecting to cassandra instances.") - f.BoolVar(&cfg.HostVerification, "cassandra.host-verification", true, "Require SSL certificate validation.") - f.StringVar(&cfg.HostSelectionPolicy, "cassandra.host-selection-policy", HostPolicyRoundRobin, "Policy for selecting Cassandra host. Supported values are: round-robin, token-aware.") - f.StringVar(&cfg.CAPath, "cassandra.ca-path", "", "Path to certificate file to verify the peer.") - f.StringVar(&cfg.CertPath, "cassandra.tls-cert-path", "", "Path to certificate file used by TLS.") - f.StringVar(&cfg.KeyPath, "cassandra.tls-key-path", "", "Path to private key file used by TLS.") - f.BoolVar(&cfg.Auth, "cassandra.auth", false, "Enable password authentication when connecting to cassandra.") - f.StringVar(&cfg.Username, "cassandra.username", "", "Username to use when connecting to cassandra.") - f.Var(&cfg.Password, "cassandra.password", "Password to use when connecting to cassandra.") - f.StringVar(&cfg.PasswordFile, "cassandra.password-file", "", "File containing password to use when connecting to cassandra.") - f.Var(&cfg.CustomAuthenticators, "cassandra.custom-authenticator", "If set, when authenticating with cassandra a custom authenticator will be expected during the handshake. This flag can be set multiple times.") - f.DurationVar(&cfg.Timeout, "cassandra.timeout", 2*time.Second, "Timeout when connecting to cassandra.") - f.DurationVar(&cfg.ConnectTimeout, "cassandra.connect-timeout", 5*time.Second, "Initial connection timeout, used during initial dial to server.") - f.DurationVar(&cfg.ReconnectInterval, "cassandra.reconnent-interval", 1*time.Second, "Interval to retry connecting to cassandra nodes marked as DOWN.") - f.IntVar(&cfg.Retries, "cassandra.max-retries", 0, "Number of retries to perform on a request. Set to 0 to disable retries.") - f.DurationVar(&cfg.MinBackoff, "cassandra.retry-min-backoff", 100*time.Millisecond, "Minimum time to wait before retrying a failed request.") - f.DurationVar(&cfg.MaxBackoff, "cassandra.retry-max-backoff", 10*time.Second, "Maximum time to wait before retrying a failed request.") - f.IntVar(&cfg.QueryConcurrency, "cassandra.query-concurrency", 0, "Limit number of concurrent queries to Cassandra. Set to 0 to disable the limit.") - f.IntVar(&cfg.NumConnections, "cassandra.num-connections", 2, "Number of TCP connections per host.") - f.BoolVar(&cfg.ConvictHosts, "cassandra.convict-hosts-on-failure", true, "Convict hosts of being down on failure.") - f.StringVar(&cfg.TableOptions, "cassandra.table-options", "", "Table options used to create index or chunk tables. This value is used as plain text in the table `WITH` like this, \"CREATE TABLE (...) WITH \". For details, see https://cortexmetrics.io/docs/production/cassandra. By default it will use the default table options of your Cassandra cluster.") -} - -func (cfg *Config) Validate() error { - if cfg.Password.Value != "" && cfg.PasswordFile != "" { - return errors.Errorf("The password and password_file config options are mutually exclusive.") - } - if cfg.SSL && cfg.HostVerification && len(strings.Split(cfg.Addresses, ",")) != 1 { - return errors.Errorf("Host verification is only possible for a single host.") - } - if cfg.SSL && cfg.CertPath != "" && cfg.KeyPath == "" { - return errors.Errorf("TLS certificate specified, but private key configuration is missing.") - } - if cfg.SSL && cfg.KeyPath != "" && cfg.CertPath == "" { - return errors.Errorf("TLS private key specified, but certificate configuration is missing.") - } - return nil -} - -func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Session, error) { - cluster := gocql.NewCluster(strings.Split(cfg.Addresses, ",")...) - cluster.Port = cfg.Port - cluster.Keyspace = cfg.Keyspace - cluster.BatchObserver = observer{} - cluster.QueryObserver = observer{} - cluster.Timeout = cfg.Timeout - cluster.ConnectTimeout = cfg.ConnectTimeout - cluster.ReconnectInterval = cfg.ReconnectInterval - cluster.NumConns = cfg.NumConnections - cluster.Logger = log.With(util_log.Logger, "module", "gocql", "client", name) - cluster.Registerer = prometheus.WrapRegistererWith( - prometheus.Labels{"client": name}, reg) - if cfg.Retries > 0 { - cluster.RetryPolicy = &gocql.ExponentialBackoffRetryPolicy{ - NumRetries: cfg.Retries, - Min: cfg.MinBackoff, - Max: cfg.MaxBackoff, - } - } - if !cfg.ConvictHosts { - cluster.ConvictionPolicy = noopConvictionPolicy{} - } - if err := cfg.setClusterConfig(cluster); err != nil { - return nil, errors.WithStack(err) - } - - session, err := cluster.CreateSession() - if err == nil { - return session, nil - } - // ErrNoConnectionsStarted will be returned if keyspace don't exist or is invalid. - // ref. https://github.com/gocql/gocql/blob/07ace3bab0f84bb88477bab5d79ba1f7e1da0169/cassandra_test.go#L85-L97 - if err != gocql.ErrNoConnectionsStarted { - return nil, errors.WithStack(err) - } - // keyspace not exist - if err := cfg.createKeyspace(); err != nil { - return nil, errors.WithStack(err) - } - session, err = cluster.CreateSession() - return session, errors.WithStack(err) -} - -// apply config settings to a cassandra ClusterConfig -func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) error { - consistency, err := gocql.ParseConsistencyWrapper(cfg.Consistency) - if err != nil { - return errors.Wrap(err, "unable to parse the configured consistency") - } - - cluster.Consistency = consistency - cluster.DisableInitialHostLookup = cfg.DisableInitialHostLookup - - if cfg.SSL { - tlsConfig := &tls.Config{} - - if cfg.CertPath != "" { - cert, err := tls.LoadX509KeyPair(cfg.CertPath, cfg.KeyPath) - if err != nil { - return errors.Wrap(err, "Unable to load TLS certificate and private key") - } - - tlsConfig.Certificates = []tls.Certificate{cert} - } - - if cfg.HostVerification { - tlsConfig.ServerName = strings.Split(cfg.Addresses, ",")[0] - - cluster.SslOpts = &gocql.SslOptions{ - CaPath: cfg.CAPath, - EnableHostVerification: true, - Config: tlsConfig, - } - } else { - cluster.SslOpts = &gocql.SslOptions{ - EnableHostVerification: false, - Config: tlsConfig, - } - } - } - - if cfg.HostSelectionPolicy == HostPolicyRoundRobin { - cluster.PoolConfig.HostSelectionPolicy = gocql.RoundRobinHostPolicy() - } else if cfg.HostSelectionPolicy == HostPolicyTokenAware { - cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(gocql.RoundRobinHostPolicy()) - } else { - return errors.New("Unknown host selection policy") - } - - if cfg.Auth { - password := cfg.Password.Value - if cfg.PasswordFile != "" { - passwordBytes, err := ioutil.ReadFile(cfg.PasswordFile) - if err != nil { - return errors.Errorf("Could not read Cassandra password file: %v", err) - } - passwordBytes = bytes.TrimRight(passwordBytes, "\n") - password = string(passwordBytes) - } - if len(cfg.CustomAuthenticators) != 0 { - cluster.Authenticator = CustomPasswordAuthenticator{ - ApprovedAuthenticators: cfg.CustomAuthenticators, - Username: cfg.Username, - Password: password, - } - return nil - } - cluster.Authenticator = gocql.PasswordAuthenticator{ - Username: cfg.Username, - Password: password, - } - } - return nil -} - -// createKeyspace will create the desired keyspace if it doesn't exist. -func (cfg *Config) createKeyspace() error { - cluster := gocql.NewCluster(strings.Split(cfg.Addresses, ",")...) - cluster.Port = cfg.Port - cluster.Keyspace = "system" - cluster.Timeout = 20 * time.Second - cluster.ConnectTimeout = 20 * time.Second - - if err := cfg.setClusterConfig(cluster); err != nil { - return errors.WithStack(err) - } - - session, err := cluster.CreateSession() - if err != nil { - return errors.WithStack(err) - } - defer session.Close() - - err = session.Query(fmt.Sprintf( - `CREATE KEYSPACE IF NOT EXISTS %s - WITH replication = { - 'class' : 'SimpleStrategy', - 'replication_factor' : %d - }`, - cfg.Keyspace, cfg.ReplicationFactor)).Exec() - return errors.WithStack(err) -} - -// StorageClient implements chunk.IndexClient and chunk.ObjectClient for Cassandra. -type StorageClient struct { - cfg Config - schemaCfg chunk.SchemaConfig - readSession *gocql.Session - writeSession *gocql.Session - querySemaphore *semaphore.Weighted -} - -// NewStorageClient returns a new StorageClient. -func NewStorageClient(cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*StorageClient, error) { - readSession, err := cfg.session("index-read", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - writeSession, err := cfg.session("index-write", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - var querySemaphore *semaphore.Weighted - if cfg.QueryConcurrency > 0 { - querySemaphore = semaphore.NewWeighted(int64(cfg.QueryConcurrency)) - } - - client := &StorageClient{ - cfg: cfg, - schemaCfg: schemaCfg, - readSession: readSession, - writeSession: writeSession, - querySemaphore: querySemaphore, - } - return client, nil -} - -// Stop implement chunk.IndexClient. -func (s *StorageClient) Stop() { - s.readSession.Close() - s.writeSession.Close() -} - -// Cassandra batching isn't really useful in this case, its more to do multiple -// atomic writes. Therefore we just do a bunch of writes in parallel. -type writeBatch struct { - entries []chunk.IndexEntry - deletes []chunk.IndexEntry -} - -// NewWriteBatch implement chunk.IndexClient. -func (s *StorageClient) NewWriteBatch() chunk.WriteBatch { - return &writeBatch{} -} - -func (b *writeBatch) Add(tableName, hashValue string, rangeValue []byte, value []byte) { - b.entries = append(b.entries, chunk.IndexEntry{ - TableName: tableName, - HashValue: hashValue, - RangeValue: rangeValue, - Value: value, - }) -} - -func (b *writeBatch) Delete(tableName, hashValue string, rangeValue []byte) { - b.deletes = append(b.deletes, chunk.IndexEntry{ - TableName: tableName, - HashValue: hashValue, - RangeValue: rangeValue, - }) -} - -// BatchWrite implement chunk.IndexClient. -func (s *StorageClient) BatchWrite(ctx context.Context, batch chunk.WriteBatch) error { - b := batch.(*writeBatch) - - for _, entry := range b.entries { - err := s.writeSession.Query(fmt.Sprintf("INSERT INTO %s (hash, range, value) VALUES (?, ?, ?)", - entry.TableName), entry.HashValue, entry.RangeValue, entry.Value).WithContext(ctx).Exec() - if err != nil { - return errors.WithStack(err) - } - } - - for _, entry := range b.deletes { - err := s.writeSession.Query(fmt.Sprintf("DELETE FROM %s WHERE hash = ? and range = ?", - entry.TableName), entry.HashValue, entry.RangeValue).WithContext(ctx).Exec() - if err != nil { - return errors.WithStack(err) - } - } - - return nil -} - -// QueryPages implement chunk.IndexClient. -func (s *StorageClient) QueryPages(ctx context.Context, queries []chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) bool) error { - return util.DoParallelQueries(ctx, s.query, queries, callback) -} - -func (s *StorageClient) query(ctx context.Context, query chunk.IndexQuery, callback util.Callback) error { - if s.querySemaphore != nil { - if err := s.querySemaphore.Acquire(ctx, 1); err != nil { - return err - } - defer s.querySemaphore.Release(1) - } - - var q *gocql.Query - - switch { - case len(query.RangeValuePrefix) > 0 && query.ValueEqual == nil: - q = s.readSession.Query(fmt.Sprintf("SELECT range, value FROM %s WHERE hash = ? AND range >= ? AND range < ?", - query.TableName), query.HashValue, query.RangeValuePrefix, append(query.RangeValuePrefix, '\xff')) - - case len(query.RangeValuePrefix) > 0 && query.ValueEqual != nil: - q = s.readSession.Query(fmt.Sprintf("SELECT range, value FROM %s WHERE hash = ? AND range >= ? AND range < ? AND value = ? ALLOW FILTERING", - query.TableName), query.HashValue, query.RangeValuePrefix, append(query.RangeValuePrefix, '\xff'), query.ValueEqual) - - case len(query.RangeValueStart) > 0 && query.ValueEqual == nil: - q = s.readSession.Query(fmt.Sprintf("SELECT range, value FROM %s WHERE hash = ? AND range >= ?", - query.TableName), query.HashValue, query.RangeValueStart) - - case len(query.RangeValueStart) > 0 && query.ValueEqual != nil: - q = s.readSession.Query(fmt.Sprintf("SELECT range, value FROM %s WHERE hash = ? AND range >= ? AND value = ? ALLOW FILTERING", - query.TableName), query.HashValue, query.RangeValueStart, query.ValueEqual) - - case query.ValueEqual == nil: - q = s.readSession.Query(fmt.Sprintf("SELECT range, value FROM %s WHERE hash = ?", - query.TableName), query.HashValue) - - case query.ValueEqual != nil: - q = s.readSession.Query(fmt.Sprintf("SELECT range, value FROM %s WHERE hash = ? AND value = ? ALLOW FILTERING", - query.TableName), query.HashValue, query.ValueEqual) - } - - iter := q.WithContext(ctx).Iter() - defer iter.Close() - scanner := iter.Scanner() - for scanner.Next() { - b := &readBatch{} - if err := scanner.Scan(&b.rangeValue, &b.value); err != nil { - return errors.WithStack(err) - } - if !callback(query, b) { - return nil - } - } - return errors.WithStack(scanner.Err()) -} - -// Allow other packages to interact with Cassandra directly -func (s *StorageClient) GetReadSession() *gocql.Session { - return s.readSession -} - -// readBatch represents a batch of rows read from Cassandra. -type readBatch struct { - rangeValue []byte - value []byte -} - -func (r *readBatch) Iterator() chunk.ReadBatchIterator { - return &readBatchIter{ - readBatch: r, - } -} - -type readBatchIter struct { - consumed bool - *readBatch -} - -func (b *readBatchIter) Next() bool { - if b.consumed { - return false - } - b.consumed = true - return true -} - -func (b *readBatchIter) RangeValue() []byte { - return b.rangeValue -} - -func (b *readBatchIter) Value() []byte { - return b.value -} - -// ObjectClient implements chunk.ObjectClient for Cassandra. -type ObjectClient struct { - cfg Config - schemaCfg chunk.SchemaConfig - readSession *gocql.Session - writeSession *gocql.Session - querySemaphore *semaphore.Weighted -} - -// NewObjectClient returns a new ObjectClient. -func NewObjectClient(cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*ObjectClient, error) { - readSession, err := cfg.session("chunks-read", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - writeSession, err := cfg.session("chunks-write", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - - var querySemaphore *semaphore.Weighted - if cfg.QueryConcurrency > 0 { - querySemaphore = semaphore.NewWeighted(int64(cfg.QueryConcurrency)) - } - - client := &ObjectClient{ - cfg: cfg, - schemaCfg: schemaCfg, - readSession: readSession, - writeSession: writeSession, - querySemaphore: querySemaphore, - } - return client, nil -} - -// PutChunks implements chunk.ObjectClient. -func (s *ObjectClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { - for i := range chunks { - buf, err := chunks[i].Encoded() - if err != nil { - return errors.WithStack(err) - } - key := chunks[i].ExternalKey() - tableName, err := s.schemaCfg.ChunkTableFor(chunks[i].From) - if err != nil { - return err - } - - // Must provide a range key, even though its not useds - hence 0x00. - q := s.writeSession.Query(fmt.Sprintf("INSERT INTO %s (hash, range, value) VALUES (?, 0x00, ?)", - tableName), key, buf) - if err := q.WithContext(ctx).Exec(); err != nil { - return errors.WithStack(err) - } - } - - return nil -} - -// GetChunks implements chunk.ObjectClient. -func (s *ObjectClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) { - return util.GetParallelChunks(ctx, input, s.getChunk) -} - -func (s *ObjectClient) getChunk(ctx context.Context, decodeContext *chunk.DecodeContext, input chunk.Chunk) (chunk.Chunk, error) { - if s.querySemaphore != nil { - if err := s.querySemaphore.Acquire(ctx, 1); err != nil { - return input, err - } - defer s.querySemaphore.Release(1) - } - - tableName, err := s.schemaCfg.ChunkTableFor(input.From) - if err != nil { - return input, err - } - - var buf []byte - if err := s.readSession.Query(fmt.Sprintf("SELECT value FROM %s WHERE hash = ?", tableName), input.ExternalKey()). - WithContext(ctx).Scan(&buf); err != nil { - return input, errors.WithStack(err) - } - err = input.Decode(decodeContext, buf) - return input, err -} - -func (s *ObjectClient) DeleteChunk(ctx context.Context, userID, chunkID string) error { - chunkRef, err := chunk.ParseExternalKey(userID, chunkID) - if err != nil { - return err - } - - tableName, err := s.schemaCfg.ChunkTableFor(chunkRef.From) - if err != nil { - return err - } - - q := s.writeSession.Query(fmt.Sprintf("DELETE FROM %s WHERE hash = ?", - tableName), chunkID) - if err := q.WithContext(ctx).Exec(); err != nil { - return errors.WithStack(err) - } - - return nil -} - -// Stop implement chunk.ObjectClient. -func (s *ObjectClient) Stop() { - s.readSession.Close() - s.writeSession.Close() -} - -type noopConvictionPolicy struct{} - -// AddFailure should return `true` if the host should be convicted, `false` otherwise. -// Convicted means connections are removed - we don't want that. -// Implementats gocql.ConvictionPolicy. -func (noopConvictionPolicy) AddFailure(err error, host *gocql.HostInfo) bool { - level.Error(util_log.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String()) - return false -} - -// Implementats gocql.ConvictionPolicy. -func (noopConvictionPolicy) Reset(host *gocql.HostInfo) {} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go deleted file mode 100644 index fc269e264..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go +++ /dev/null @@ -1,81 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - - "github.com/gocql/gocql" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -type tableClient struct { - cfg Config - session *gocql.Session -} - -// NewTableClient returns a new TableClient. -func NewTableClient(ctx context.Context, cfg Config, registerer prometheus.Registerer) (chunk.TableClient, error) { - session, err := cfg.session("table-manager", registerer) - if err != nil { - return nil, errors.WithStack(err) - } - return &tableClient{ - cfg: cfg, - session: session, - }, nil -} - -func (c *tableClient) ListTables(ctx context.Context) ([]string, error) { - md, err := c.session.KeyspaceMetadata(c.cfg.Keyspace) - if err != nil { - return nil, errors.WithStack(err) - } - result := []string{} - for name := range md.Tables { - result = append(result, name) - } - return result, nil -} - -func (c *tableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { - query := c.getCreateTableQuery(&desc) - err := c.session.Query(query).WithContext(ctx).Exec() - return errors.WithStack(err) -} - -func (c *tableClient) DeleteTable(ctx context.Context, name string) error { - err := c.session.Query(fmt.Sprintf(` - DROP TABLE IF EXISTS %s;`, name)).WithContext(ctx).Exec() - return errors.WithStack(err) -} - -func (c *tableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { - return chunk.TableDesc{ - Name: name, - }, true, nil -} - -func (c *tableClient) UpdateTable(ctx context.Context, current, expected chunk.TableDesc) error { - return nil -} - -func (c *tableClient) Stop() { - c.session.Close() -} - -func (c *tableClient) getCreateTableQuery(desc *chunk.TableDesc) (query string) { - query = fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - hash text, - range blob, - value blob, - PRIMARY KEY (hash, range) - )`, desc.Name) - if c.cfg.TableOptions != "" { - query = fmt.Sprintf("%s WITH %s", query, c.cfg.TableOptions) - } - return -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.pb.go deleted file mode 100644 index 9ff2df445..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.pb.go +++ /dev/null @@ -1,6481 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: grpc.proto - -package grpc - -import ( - bytes "bytes" - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - empty "github.com/golang/protobuf/ptypes/empty" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type PutChunksRequest struct { - Chunks []*Chunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` -} - -func (m *PutChunksRequest) Reset() { *m = PutChunksRequest{} } -func (*PutChunksRequest) ProtoMessage() {} -func (*PutChunksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{0} -} -func (m *PutChunksRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PutChunksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PutChunksRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PutChunksRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutChunksRequest.Merge(m, src) -} -func (m *PutChunksRequest) XXX_Size() int { - return m.Size() -} -func (m *PutChunksRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutChunksRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PutChunksRequest proto.InternalMessageInfo - -func (m *PutChunksRequest) GetChunks() []*Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -type GetChunksRequest struct { - Chunks []*Chunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` -} - -func (m *GetChunksRequest) Reset() { *m = GetChunksRequest{} } -func (*GetChunksRequest) ProtoMessage() {} -func (*GetChunksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{1} -} -func (m *GetChunksRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetChunksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetChunksRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetChunksRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetChunksRequest.Merge(m, src) -} -func (m *GetChunksRequest) XXX_Size() int { - return m.Size() -} -func (m *GetChunksRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetChunksRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetChunksRequest proto.InternalMessageInfo - -func (m *GetChunksRequest) GetChunks() []*Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -type GetChunksResponse struct { - Chunks []*Chunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` -} - -func (m *GetChunksResponse) Reset() { *m = GetChunksResponse{} } -func (*GetChunksResponse) ProtoMessage() {} -func (*GetChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{2} -} -func (m *GetChunksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetChunksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetChunksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetChunksResponse.Merge(m, src) -} -func (m *GetChunksResponse) XXX_Size() int { - return m.Size() -} -func (m *GetChunksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetChunksResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetChunksResponse proto.InternalMessageInfo - -func (m *GetChunksResponse) GetChunks() []*Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -type Chunk struct { - Encoded []byte `protobuf:"bytes,1,opt,name=encoded,proto3" json:"encoded,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - TableName string `protobuf:"bytes,3,opt,name=tableName,proto3" json:"tableName,omitempty"` -} - -func (m *Chunk) Reset() { *m = Chunk{} } -func (*Chunk) ProtoMessage() {} -func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{3} -} -func (m *Chunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Chunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chunk.Merge(m, src) -} -func (m *Chunk) XXX_Size() int { - return m.Size() -} -func (m *Chunk) XXX_DiscardUnknown() { - xxx_messageInfo_Chunk.DiscardUnknown(m) -} - -var xxx_messageInfo_Chunk proto.InternalMessageInfo - -func (m *Chunk) GetEncoded() []byte { - if m != nil { - return m.Encoded - } - return nil -} - -func (m *Chunk) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *Chunk) GetTableName() string { - if m != nil { - return m.TableName - } - return "" -} - -type ChunkID struct { - ChunkID string `protobuf:"bytes,1,opt,name=chunkID,proto3" json:"chunkID,omitempty"` -} - -func (m *ChunkID) Reset() { *m = ChunkID{} } -func (*ChunkID) ProtoMessage() {} -func (*ChunkID) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{4} -} -func (m *ChunkID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChunkID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChunkID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChunkID) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChunkID.Merge(m, src) -} -func (m *ChunkID) XXX_Size() int { - return m.Size() -} -func (m *ChunkID) XXX_DiscardUnknown() { - xxx_messageInfo_ChunkID.DiscardUnknown(m) -} - -var xxx_messageInfo_ChunkID proto.InternalMessageInfo - -func (m *ChunkID) GetChunkID() string { - if m != nil { - return m.ChunkID - } - return "" -} - -type DeleteTableRequest struct { - TableName string `protobuf:"bytes,1,opt,name=tableName,proto3" json:"tableName,omitempty"` -} - -func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } -func (*DeleteTableRequest) ProtoMessage() {} -func (*DeleteTableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{5} -} -func (m *DeleteTableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteTableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteTableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteTableRequest.Merge(m, src) -} -func (m *DeleteTableRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteTableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteTableRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteTableRequest proto.InternalMessageInfo - -func (m *DeleteTableRequest) GetTableName() string { - if m != nil { - return m.TableName - } - return "" -} - -type DescribeTableRequest struct { - TableName string `protobuf:"bytes,1,opt,name=tableName,proto3" json:"tableName,omitempty"` -} - -func (m *DescribeTableRequest) Reset() { *m = DescribeTableRequest{} } -func (*DescribeTableRequest) ProtoMessage() {} -func (*DescribeTableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{6} -} -func (m *DescribeTableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DescribeTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DescribeTableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DescribeTableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescribeTableRequest.Merge(m, src) -} -func (m *DescribeTableRequest) XXX_Size() int { - return m.Size() -} -func (m *DescribeTableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DescribeTableRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DescribeTableRequest proto.InternalMessageInfo - -func (m *DescribeTableRequest) GetTableName() string { - if m != nil { - return m.TableName - } - return "" -} - -type WriteBatch struct { - Writes []*IndexEntry `protobuf:"bytes,1,rep,name=writes,proto3" json:"writes,omitempty"` - Deletes []*IndexEntry `protobuf:"bytes,2,rep,name=deletes,proto3" json:"deletes,omitempty"` -} - -func (m *WriteBatch) Reset() { *m = WriteBatch{} } -func (*WriteBatch) ProtoMessage() {} -func (*WriteBatch) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{7} -} -func (m *WriteBatch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteBatch.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteBatch) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteBatch.Merge(m, src) -} -func (m *WriteBatch) XXX_Size() int { - return m.Size() -} -func (m *WriteBatch) XXX_DiscardUnknown() { - xxx_messageInfo_WriteBatch.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteBatch proto.InternalMessageInfo - -func (m *WriteBatch) GetWrites() []*IndexEntry { - if m != nil { - return m.Writes - } - return nil -} - -func (m *WriteBatch) GetDeletes() []*IndexEntry { - if m != nil { - return m.Deletes - } - return nil -} - -type WriteIndexRequest struct { - Writes []*IndexEntry `protobuf:"bytes,1,rep,name=writes,proto3" json:"writes,omitempty"` -} - -func (m *WriteIndexRequest) Reset() { *m = WriteIndexRequest{} } -func (*WriteIndexRequest) ProtoMessage() {} -func (*WriteIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{8} -} -func (m *WriteIndexRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteIndexRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteIndexRequest.Merge(m, src) -} -func (m *WriteIndexRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteIndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteIndexRequest proto.InternalMessageInfo - -func (m *WriteIndexRequest) GetWrites() []*IndexEntry { - if m != nil { - return m.Writes - } - return nil -} - -type DeleteIndexRequest struct { - Deletes []*IndexEntry `protobuf:"bytes,1,rep,name=deletes,proto3" json:"deletes,omitempty"` -} - -func (m *DeleteIndexRequest) Reset() { *m = DeleteIndexRequest{} } -func (*DeleteIndexRequest) ProtoMessage() {} -func (*DeleteIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{9} -} -func (m *DeleteIndexRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteIndexRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteIndexRequest.Merge(m, src) -} -func (m *DeleteIndexRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteIndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteIndexRequest proto.InternalMessageInfo - -func (m *DeleteIndexRequest) GetDeletes() []*IndexEntry { - if m != nil { - return m.Deletes - } - return nil -} - -type QueryIndexResponse struct { - Rows []*Row `protobuf:"bytes,1,rep,name=rows,proto3" json:"rows,omitempty"` -} - -func (m *QueryIndexResponse) Reset() { *m = QueryIndexResponse{} } -func (*QueryIndexResponse) ProtoMessage() {} -func (*QueryIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{10} -} -func (m *QueryIndexResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryIndexResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryIndexResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryIndexResponse.Merge(m, src) -} -func (m *QueryIndexResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryIndexResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryIndexResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryIndexResponse proto.InternalMessageInfo - -func (m *QueryIndexResponse) GetRows() []*Row { - if m != nil { - return m.Rows - } - return nil -} - -type Row struct { - RangeValue []byte `protobuf:"bytes,1,opt,name=rangeValue,proto3" json:"rangeValue,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Row) Reset() { *m = Row{} } -func (*Row) ProtoMessage() {} -func (*Row) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{11} -} -func (m *Row) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Row.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Row) XXX_Merge(src proto.Message) { - xxx_messageInfo_Row.Merge(m, src) -} -func (m *Row) XXX_Size() int { - return m.Size() -} -func (m *Row) XXX_DiscardUnknown() { - xxx_messageInfo_Row.DiscardUnknown(m) -} - -var xxx_messageInfo_Row proto.InternalMessageInfo - -func (m *Row) GetRangeValue() []byte { - if m != nil { - return m.RangeValue - } - return nil -} - -func (m *Row) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type IndexEntry struct { - TableName string `protobuf:"bytes,1,opt,name=tableName,proto3" json:"tableName,omitempty"` - HashValue string `protobuf:"bytes,2,opt,name=hashValue,proto3" json:"hashValue,omitempty"` - RangeValue []byte `protobuf:"bytes,3,opt,name=rangeValue,proto3" json:"rangeValue,omitempty"` - Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *IndexEntry) Reset() { *m = IndexEntry{} } -func (*IndexEntry) ProtoMessage() {} -func (*IndexEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{12} -} -func (m *IndexEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IndexEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_IndexEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *IndexEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexEntry.Merge(m, src) -} -func (m *IndexEntry) XXX_Size() int { - return m.Size() -} -func (m *IndexEntry) XXX_DiscardUnknown() { - xxx_messageInfo_IndexEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexEntry proto.InternalMessageInfo - -func (m *IndexEntry) GetTableName() string { - if m != nil { - return m.TableName - } - return "" -} - -func (m *IndexEntry) GetHashValue() string { - if m != nil { - return m.HashValue - } - return "" -} - -func (m *IndexEntry) GetRangeValue() []byte { - if m != nil { - return m.RangeValue - } - return nil -} - -func (m *IndexEntry) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type QueryIndexRequest struct { - TableName string `protobuf:"bytes,1,opt,name=tableName,proto3" json:"tableName,omitempty"` - HashValue string `protobuf:"bytes,2,opt,name=hashValue,proto3" json:"hashValue,omitempty"` - RangeValuePrefix []byte `protobuf:"bytes,3,opt,name=rangeValuePrefix,proto3" json:"rangeValuePrefix,omitempty"` - RangeValueStart []byte `protobuf:"bytes,4,opt,name=rangeValueStart,proto3" json:"rangeValueStart,omitempty"` - ValueEqual []byte `protobuf:"bytes,5,opt,name=valueEqual,proto3" json:"valueEqual,omitempty"` - Immutable bool `protobuf:"varint,6,opt,name=immutable,proto3" json:"immutable,omitempty"` -} - -func (m *QueryIndexRequest) Reset() { *m = QueryIndexRequest{} } -func (*QueryIndexRequest) ProtoMessage() {} -func (*QueryIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{13} -} -func (m *QueryIndexRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryIndexRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryIndexRequest.Merge(m, src) -} -func (m *QueryIndexRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryIndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryIndexRequest proto.InternalMessageInfo - -func (m *QueryIndexRequest) GetTableName() string { - if m != nil { - return m.TableName - } - return "" -} - -func (m *QueryIndexRequest) GetHashValue() string { - if m != nil { - return m.HashValue - } - return "" -} - -func (m *QueryIndexRequest) GetRangeValuePrefix() []byte { - if m != nil { - return m.RangeValuePrefix - } - return nil -} - -func (m *QueryIndexRequest) GetRangeValueStart() []byte { - if m != nil { - return m.RangeValueStart - } - return nil -} - -func (m *QueryIndexRequest) GetValueEqual() []byte { - if m != nil { - return m.ValueEqual - } - return nil -} - -func (m *QueryIndexRequest) GetImmutable() bool { - if m != nil { - return m.Immutable - } - return false -} - -type UpdateTableRequest struct { - Current *TableDesc `protobuf:"bytes,1,opt,name=current,proto3" json:"current,omitempty"` - Expected *TableDesc `protobuf:"bytes,2,opt,name=expected,proto3" json:"expected,omitempty"` -} - -func (m *UpdateTableRequest) Reset() { *m = UpdateTableRequest{} } -func (*UpdateTableRequest) ProtoMessage() {} -func (*UpdateTableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{14} -} -func (m *UpdateTableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateTableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UpdateTableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateTableRequest.Merge(m, src) -} -func (m *UpdateTableRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateTableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateTableRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateTableRequest proto.InternalMessageInfo - -func (m *UpdateTableRequest) GetCurrent() *TableDesc { - if m != nil { - return m.Current - } - return nil -} - -func (m *UpdateTableRequest) GetExpected() *TableDesc { - if m != nil { - return m.Expected - } - return nil -} - -type DescribeTableResponse struct { - Desc *TableDesc `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc,omitempty"` - IsActive bool `protobuf:"varint,2,opt,name=isActive,proto3" json:"isActive,omitempty"` -} - -func (m *DescribeTableResponse) Reset() { *m = DescribeTableResponse{} } -func (*DescribeTableResponse) ProtoMessage() {} -func (*DescribeTableResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{15} -} -func (m *DescribeTableResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DescribeTableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DescribeTableResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DescribeTableResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescribeTableResponse.Merge(m, src) -} -func (m *DescribeTableResponse) XXX_Size() int { - return m.Size() -} -func (m *DescribeTableResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DescribeTableResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DescribeTableResponse proto.InternalMessageInfo - -func (m *DescribeTableResponse) GetDesc() *TableDesc { - if m != nil { - return m.Desc - } - return nil -} - -func (m *DescribeTableResponse) GetIsActive() bool { - if m != nil { - return m.IsActive - } - return false -} - -type CreateTableRequest struct { - Desc *TableDesc `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc,omitempty"` -} - -func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } -func (*CreateTableRequest) ProtoMessage() {} -func (*CreateTableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{16} -} -func (m *CreateTableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CreateTableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTableRequest.Merge(m, src) -} -func (m *CreateTableRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateTableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTableRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateTableRequest proto.InternalMessageInfo - -func (m *CreateTableRequest) GetDesc() *TableDesc { - if m != nil { - return m.Desc - } - return nil -} - -type TableDesc struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - UseOnDemandIOMode bool `protobuf:"varint,2,opt,name=useOnDemandIOMode,proto3" json:"useOnDemandIOMode,omitempty"` - ProvisionedRead int64 `protobuf:"varint,3,opt,name=provisionedRead,proto3" json:"provisionedRead,omitempty"` - ProvisionedWrite int64 `protobuf:"varint,4,opt,name=provisionedWrite,proto3" json:"provisionedWrite,omitempty"` - Tags map[string]string `protobuf:"bytes,5,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *TableDesc) Reset() { *m = TableDesc{} } -func (*TableDesc) ProtoMessage() {} -func (*TableDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{17} -} -func (m *TableDesc) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TableDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TableDesc.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TableDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_TableDesc.Merge(m, src) -} -func (m *TableDesc) XXX_Size() int { - return m.Size() -} -func (m *TableDesc) XXX_DiscardUnknown() { - xxx_messageInfo_TableDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_TableDesc proto.InternalMessageInfo - -func (m *TableDesc) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *TableDesc) GetUseOnDemandIOMode() bool { - if m != nil { - return m.UseOnDemandIOMode - } - return false -} - -func (m *TableDesc) GetProvisionedRead() int64 { - if m != nil { - return m.ProvisionedRead - } - return 0 -} - -func (m *TableDesc) GetProvisionedWrite() int64 { - if m != nil { - return m.ProvisionedWrite - } - return 0 -} - -func (m *TableDesc) GetTags() map[string]string { - if m != nil { - return m.Tags - } - return nil -} - -type ListTablesResponse struct { - TableNames []string `protobuf:"bytes,1,rep,name=tableNames,proto3" json:"tableNames,omitempty"` -} - -func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} } -func (*ListTablesResponse) ProtoMessage() {} -func (*ListTablesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{18} -} -func (m *ListTablesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListTablesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListTablesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListTablesResponse.Merge(m, src) -} -func (m *ListTablesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListTablesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListTablesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListTablesResponse proto.InternalMessageInfo - -func (m *ListTablesResponse) GetTableNames() []string { - if m != nil { - return m.TableNames - } - return nil -} - -type Labels struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Labels) Reset() { *m = Labels{} } -func (*Labels) ProtoMessage() {} -func (*Labels) Descriptor() ([]byte, []int) { - return fileDescriptor_bedfbfc9b54e5600, []int{19} -} -func (m *Labels) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Labels.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Labels) XXX_Merge(src proto.Message) { - xxx_messageInfo_Labels.Merge(m, src) -} -func (m *Labels) XXX_Size() int { - return m.Size() -} -func (m *Labels) XXX_DiscardUnknown() { - xxx_messageInfo_Labels.DiscardUnknown(m) -} - -var xxx_messageInfo_Labels proto.InternalMessageInfo - -func (m *Labels) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Labels) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func init() { - proto.RegisterType((*PutChunksRequest)(nil), "grpc.PutChunksRequest") - proto.RegisterType((*GetChunksRequest)(nil), "grpc.GetChunksRequest") - proto.RegisterType((*GetChunksResponse)(nil), "grpc.GetChunksResponse") - proto.RegisterType((*Chunk)(nil), "grpc.Chunk") - proto.RegisterType((*ChunkID)(nil), "grpc.ChunkID") - proto.RegisterType((*DeleteTableRequest)(nil), "grpc.DeleteTableRequest") - proto.RegisterType((*DescribeTableRequest)(nil), "grpc.DescribeTableRequest") - proto.RegisterType((*WriteBatch)(nil), "grpc.WriteBatch") - proto.RegisterType((*WriteIndexRequest)(nil), "grpc.WriteIndexRequest") - proto.RegisterType((*DeleteIndexRequest)(nil), "grpc.DeleteIndexRequest") - proto.RegisterType((*QueryIndexResponse)(nil), "grpc.QueryIndexResponse") - proto.RegisterType((*Row)(nil), "grpc.Row") - proto.RegisterType((*IndexEntry)(nil), "grpc.IndexEntry") - proto.RegisterType((*QueryIndexRequest)(nil), "grpc.QueryIndexRequest") - proto.RegisterType((*UpdateTableRequest)(nil), "grpc.UpdateTableRequest") - proto.RegisterType((*DescribeTableResponse)(nil), "grpc.DescribeTableResponse") - proto.RegisterType((*CreateTableRequest)(nil), "grpc.CreateTableRequest") - proto.RegisterType((*TableDesc)(nil), "grpc.TableDesc") - proto.RegisterMapType((map[string]string)(nil), "grpc.TableDesc.TagsEntry") - proto.RegisterType((*ListTablesResponse)(nil), "grpc.ListTablesResponse") - proto.RegisterType((*Labels)(nil), "grpc.Labels") -} - -func init() { proto.RegisterFile("grpc.proto", fileDescriptor_bedfbfc9b54e5600) } - -var fileDescriptor_bedfbfc9b54e5600 = []byte{ - // 921 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x93, 0xd4, 0x44, - 0x14, 0x9f, 0x9e, 0x7f, 0xbb, 0x79, 0x03, 0xc5, 0x6e, 0x17, 0x42, 0x0c, 0x9a, 0xda, 0x0a, 0x97, - 0x11, 0x75, 0xb0, 0x86, 0xb5, 0x40, 0x29, 0x44, 0x60, 0xb6, 0x74, 0xab, 0x50, 0xa0, 0x45, 0xf4, - 0x66, 0x65, 0x92, 0xc7, 0x6c, 0x8a, 0x99, 0x64, 0x48, 0x3a, 0xfb, 0xe7, 0x62, 0x79, 0xf7, 0xe2, - 0xc7, 0xf0, 0xa3, 0x78, 0xdc, 0x23, 0x47, 0x77, 0xf6, 0xe2, 0x91, 0x8f, 0x60, 0xa5, 0x3b, 0x9d, - 0x64, 0x92, 0x09, 0xbb, 0x7a, 0xeb, 0xfe, 0xbd, 0x7f, 0xbf, 0xd7, 0xaf, 0xdf, 0x7b, 0x00, 0x93, - 0x70, 0xee, 0x0c, 0xe6, 0x61, 0xc0, 0x03, 0xda, 0x4e, 0xce, 0xc6, 0xb5, 0x49, 0x10, 0x4c, 0xa6, - 0x78, 0x53, 0x60, 0xe3, 0xf8, 0xe5, 0x4d, 0x9c, 0xcd, 0xf9, 0x91, 0x54, 0xb1, 0x6e, 0xc3, 0xc6, - 0xd3, 0x98, 0x3f, 0xda, 0x8b, 0xfd, 0x57, 0x11, 0xc3, 0xd7, 0x31, 0x46, 0x9c, 0x5e, 0x87, 0xae, - 0x23, 0x00, 0x9d, 0x6c, 0xb5, 0xfa, 0xbd, 0x61, 0x6f, 0x20, 0x7c, 0x0a, 0x25, 0x96, 0x8a, 0x12, - 0xc3, 0x6f, 0xf0, 0xff, 0x18, 0xde, 0x81, 0xcd, 0x82, 0x61, 0x34, 0x0f, 0xfc, 0x08, 0xcf, 0x67, - 0xf9, 0x0c, 0x3a, 0x02, 0xa0, 0x3a, 0xac, 0xa1, 0xef, 0x04, 0x2e, 0xba, 0x3a, 0xd9, 0x22, 0xfd, - 0x0b, 0x4c, 0x5d, 0xe9, 0x06, 0xb4, 0x5e, 0xe1, 0x91, 0xde, 0xdc, 0x22, 0x7d, 0x8d, 0x25, 0x47, - 0xfa, 0x01, 0x68, 0xdc, 0x1e, 0x4f, 0xf1, 0x7b, 0x7b, 0x86, 0x7a, 0x4b, 0xe0, 0x39, 0x60, 0x5d, - 0x87, 0x35, 0xe1, 0x72, 0x77, 0x94, 0x38, 0x75, 0xe4, 0x51, 0x38, 0xd5, 0x98, 0xba, 0x5a, 0x43, - 0xa0, 0x23, 0x9c, 0x22, 0xc7, 0xe7, 0x89, 0x9d, 0x4a, 0x76, 0xc9, 0x31, 0x29, 0x3b, 0xde, 0x86, - 0xcb, 0x23, 0x8c, 0x9c, 0xd0, 0x1b, 0xff, 0x17, 0xab, 0x31, 0xc0, 0x4f, 0xa1, 0xc7, 0xf1, 0xa1, - 0xcd, 0x9d, 0x3d, 0xda, 0x87, 0xee, 0x41, 0x72, 0x53, 0x8f, 0xb2, 0x21, 0x1f, 0x65, 0xd7, 0x77, - 0xf1, 0x70, 0xc7, 0xe7, 0xe1, 0x11, 0x4b, 0xe5, 0xf4, 0x06, 0xac, 0xb9, 0x82, 0x61, 0xa4, 0x37, - 0x6b, 0x54, 0x95, 0x82, 0x75, 0x0f, 0x36, 0x45, 0x0c, 0x21, 0x53, 0xb4, 0xce, 0x1d, 0xca, 0xfa, - 0x5a, 0x3d, 0xc6, 0x92, 0x7d, 0x81, 0x00, 0x39, 0x8b, 0xc0, 0x2d, 0xa0, 0xcf, 0x62, 0x0c, 0x8f, - 0x52, 0x07, 0xe9, 0x0f, 0xf8, 0x10, 0xda, 0x61, 0x70, 0xa0, 0xcc, 0x35, 0x69, 0xce, 0x82, 0x03, - 0x26, 0x60, 0xeb, 0x2e, 0xb4, 0x58, 0x70, 0x40, 0x4d, 0x80, 0xd0, 0xf6, 0x27, 0xf8, 0xc2, 0x9e, - 0xc6, 0x98, 0x16, 0xbf, 0x80, 0xd0, 0xcb, 0xd0, 0xd9, 0x17, 0xa2, 0xa6, 0x10, 0xc9, 0x8b, 0xf5, - 0x2b, 0x40, 0x4e, 0xe4, 0xdd, 0x25, 0x48, 0xa4, 0x7b, 0x76, 0xb4, 0xf7, 0x22, 0xf3, 0xa2, 0xb1, - 0x1c, 0x28, 0xc5, 0x6f, 0xd5, 0xc7, 0x6f, 0x17, 0xe3, 0x9f, 0x12, 0xd8, 0x2c, 0xa6, 0x7c, 0x8e, - 0xaf, 0x70, 0x06, 0x8f, 0x1b, 0xb0, 0x91, 0x47, 0x7d, 0x1a, 0xe2, 0x4b, 0xef, 0x30, 0x65, 0x53, - 0xc1, 0x69, 0x1f, 0x2e, 0xe5, 0xd8, 0x0f, 0xdc, 0x0e, 0x79, 0xca, 0xae, 0x0c, 0x27, 0xd9, 0x09, - 0xc2, 0x3b, 0xaf, 0x63, 0x7b, 0xaa, 0x77, 0x64, 0x76, 0x39, 0x92, 0x70, 0xf2, 0x66, 0xb3, 0x58, - 0x90, 0xd4, 0xbb, 0x5b, 0xa4, 0xbf, 0xce, 0x72, 0xc0, 0x9a, 0x02, 0xfd, 0x71, 0xee, 0xda, 0xa5, - 0x36, 0xf9, 0x08, 0xd6, 0x9c, 0x38, 0x0c, 0xd1, 0xe7, 0x22, 0xc7, 0xde, 0xf0, 0x92, 0x2c, 0xad, - 0x50, 0x4a, 0x5a, 0x84, 0x29, 0x39, 0xfd, 0x18, 0xd6, 0xf1, 0x70, 0x8e, 0x0e, 0x47, 0x57, 0x64, - 0xbc, 0x42, 0x37, 0x53, 0xb0, 0x7e, 0x86, 0xf7, 0x4a, 0x0d, 0x96, 0x8d, 0x92, 0xb6, 0x8b, 0x91, - 0x53, 0x17, 0x4d, 0x08, 0xa9, 0x01, 0xeb, 0x5e, 0xf4, 0xc0, 0xe1, 0xde, 0xbe, 0x7c, 0xdc, 0x75, - 0x96, 0xdd, 0xad, 0x2f, 0x80, 0x3e, 0x0a, 0xb1, 0x9c, 0xc7, 0x79, 0xdc, 0x5a, 0xbf, 0x37, 0x41, - 0xcb, 0x30, 0x4a, 0xa1, 0xed, 0xe7, 0xb5, 0x15, 0x67, 0xfa, 0x09, 0x6c, 0xc6, 0x11, 0x3e, 0xf1, - 0x47, 0x38, 0xb3, 0x7d, 0x77, 0xf7, 0xc9, 0x77, 0x81, 0xab, 0x18, 0x54, 0x05, 0x49, 0xe9, 0xe6, - 0x61, 0xb0, 0xef, 0x45, 0x5e, 0xe0, 0xa3, 0xcb, 0xd0, 0x76, 0x45, 0x95, 0x5b, 0xac, 0x0c, 0x27, - 0x1f, 0xa2, 0x00, 0x89, 0x06, 0x17, 0x55, 0x6e, 0xb1, 0x0a, 0x4e, 0x3f, 0x85, 0x36, 0xb7, 0x27, - 0x91, 0xde, 0x11, 0xad, 0xf6, 0x7e, 0x29, 0x95, 0xc1, 0x73, 0x7b, 0x12, 0xc9, 0x96, 0x15, 0x6a, - 0xc6, 0xed, 0x24, 0xa7, 0x14, 0x52, 0x03, 0x96, 0xe4, 0x03, 0x76, 0xa9, 0xe5, 0xb4, 0xf4, 0xcb, - 0x7f, 0xd9, 0xbc, 0x43, 0xac, 0x6d, 0xa0, 0x8f, 0xbd, 0x88, 0x0b, 0xcf, 0xf9, 0xa8, 0x37, 0x01, - 0xb2, 0x5f, 0x2e, 0xdb, 0x5d, 0x63, 0x05, 0xc4, 0x1a, 0x42, 0xf7, 0xb1, 0x3d, 0xc6, 0x69, 0xb4, - 0xf2, 0xfd, 0x56, 0x46, 0x1b, 0x1e, 0x77, 0xe4, 0xde, 0xfb, 0x25, 0xe2, 0x41, 0x88, 0xf4, 0x5e, - 0x3a, 0x46, 0x45, 0xbb, 0xd1, 0xab, 0x32, 0xc1, 0xca, 0xd0, 0x33, 0xae, 0x0c, 0xe4, 0x66, 0x1c, - 0xa8, 0xcd, 0x38, 0xd8, 0x49, 0x36, 0x23, 0x7d, 0x00, 0x90, 0x77, 0xab, 0x32, 0xaf, 0xf4, 0xaf, - 0xa1, 0x57, 0x05, 0x32, 0xc5, 0xcf, 0x08, 0xbd, 0x0f, 0xbd, 0xc2, 0x94, 0xa4, 0xa9, 0x6a, 0x75, - 0x70, 0xd6, 0x72, 0xb8, 0x0b, 0x5a, 0xb6, 0x97, 0xe9, 0x15, 0x69, 0x5e, 0x5e, 0xd4, 0xb5, 0xc6, - 0x5f, 0x81, 0x96, 0xad, 0x58, 0x65, 0x5c, 0x5e, 0xd6, 0xc6, 0xd5, 0x0a, 0x9e, 0xb1, 0xff, 0x1c, - 0x2e, 0x48, 0xaa, 0xa9, 0x8b, 0x8b, 0x85, 0x6d, 0xbc, 0x3b, 0x7a, 0x47, 0x58, 0xc8, 0xeb, 0x4d, - 0x6b, 0xb4, 0xd4, 0xb3, 0xad, 0xf8, 0x19, 0xf7, 0xa1, 0x57, 0x68, 0x3c, 0xf5, 0x68, 0xd5, 0x5e, - 0xac, 0x25, 0x90, 0xbd, 0xfa, 0x92, 0x83, 0xea, 0xee, 0xae, 0x75, 0xf0, 0x2d, 0x5c, 0x5c, 0x1a, - 0x2a, 0xd4, 0x50, 0x2e, 0xaa, 0xab, 0xdc, 0xb8, 0xb6, 0x52, 0x96, 0xe7, 0x52, 0x18, 0x86, 0x8a, - 0x4a, 0x75, 0x3e, 0xd6, 0x51, 0x79, 0xb8, 0x7d, 0x7c, 0x62, 0x36, 0xde, 0x9c, 0x98, 0x8d, 0xb7, - 0x27, 0x26, 0xf9, 0x6d, 0x61, 0x92, 0x3f, 0x17, 0x26, 0xf9, 0x6b, 0x61, 0x92, 0xe3, 0x85, 0x49, - 0xfe, 0x5e, 0x98, 0xe4, 0x9f, 0x85, 0xd9, 0x78, 0xbb, 0x30, 0xc9, 0x1f, 0xa7, 0x66, 0xe3, 0xf8, - 0xd4, 0x6c, 0xbc, 0x39, 0x35, 0x1b, 0xe3, 0xae, 0xf0, 0x72, 0xeb, 0xdf, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x42, 0x8c, 0xec, 0xe3, 0x06, 0x0a, 0x00, 0x00, -} - -func (this *PutChunksRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PutChunksRequest) - if !ok { - that2, ok := that.(PutChunksRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(that1.Chunks[i]) { - return false - } - } - return true -} -func (this *GetChunksRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetChunksRequest) - if !ok { - that2, ok := that.(GetChunksRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(that1.Chunks[i]) { - return false - } - } - return true -} -func (this *GetChunksResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetChunksResponse) - if !ok { - that2, ok := that.(GetChunksResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(that1.Chunks[i]) { - return false - } - } - return true -} -func (this *Chunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Chunk) - if !ok { - that2, ok := that.(Chunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Encoded, that1.Encoded) { - return false - } - if this.Key != that1.Key { - return false - } - if this.TableName != that1.TableName { - return false - } - return true -} -func (this *ChunkID) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChunkID) - if !ok { - that2, ok := that.(ChunkID) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ChunkID != that1.ChunkID { - return false - } - return true -} -func (this *DeleteTableRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DeleteTableRequest) - if !ok { - that2, ok := that.(DeleteTableRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.TableName != that1.TableName { - return false - } - return true -} -func (this *DescribeTableRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DescribeTableRequest) - if !ok { - that2, ok := that.(DescribeTableRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.TableName != that1.TableName { - return false - } - return true -} -func (this *WriteBatch) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteBatch) - if !ok { - that2, ok := that.(WriteBatch) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Writes) != len(that1.Writes) { - return false - } - for i := range this.Writes { - if !this.Writes[i].Equal(that1.Writes[i]) { - return false - } - } - if len(this.Deletes) != len(that1.Deletes) { - return false - } - for i := range this.Deletes { - if !this.Deletes[i].Equal(that1.Deletes[i]) { - return false - } - } - return true -} -func (this *WriteIndexRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteIndexRequest) - if !ok { - that2, ok := that.(WriteIndexRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Writes) != len(that1.Writes) { - return false - } - for i := range this.Writes { - if !this.Writes[i].Equal(that1.Writes[i]) { - return false - } - } - return true -} -func (this *DeleteIndexRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DeleteIndexRequest) - if !ok { - that2, ok := that.(DeleteIndexRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Deletes) != len(that1.Deletes) { - return false - } - for i := range this.Deletes { - if !this.Deletes[i].Equal(that1.Deletes[i]) { - return false - } - } - return true -} -func (this *QueryIndexResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryIndexResponse) - if !ok { - that2, ok := that.(QueryIndexResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Rows) != len(that1.Rows) { - return false - } - for i := range this.Rows { - if !this.Rows[i].Equal(that1.Rows[i]) { - return false - } - } - return true -} -func (this *Row) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Row) - if !ok { - that2, ok := that.(Row) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.RangeValue, that1.RangeValue) { - return false - } - if !bytes.Equal(this.Value, that1.Value) { - return false - } - return true -} -func (this *IndexEntry) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*IndexEntry) - if !ok { - that2, ok := that.(IndexEntry) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.TableName != that1.TableName { - return false - } - if this.HashValue != that1.HashValue { - return false - } - if !bytes.Equal(this.RangeValue, that1.RangeValue) { - return false - } - if !bytes.Equal(this.Value, that1.Value) { - return false - } - return true -} -func (this *QueryIndexRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryIndexRequest) - if !ok { - that2, ok := that.(QueryIndexRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.TableName != that1.TableName { - return false - } - if this.HashValue != that1.HashValue { - return false - } - if !bytes.Equal(this.RangeValuePrefix, that1.RangeValuePrefix) { - return false - } - if !bytes.Equal(this.RangeValueStart, that1.RangeValueStart) { - return false - } - if !bytes.Equal(this.ValueEqual, that1.ValueEqual) { - return false - } - if this.Immutable != that1.Immutable { - return false - } - return true -} -func (this *UpdateTableRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UpdateTableRequest) - if !ok { - that2, ok := that.(UpdateTableRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Current.Equal(that1.Current) { - return false - } - if !this.Expected.Equal(that1.Expected) { - return false - } - return true -} -func (this *DescribeTableResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DescribeTableResponse) - if !ok { - that2, ok := that.(DescribeTableResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Desc.Equal(that1.Desc) { - return false - } - if this.IsActive != that1.IsActive { - return false - } - return true -} -func (this *CreateTableRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CreateTableRequest) - if !ok { - that2, ok := that.(CreateTableRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Desc.Equal(that1.Desc) { - return false - } - return true -} -func (this *TableDesc) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TableDesc) - if !ok { - that2, ok := that.(TableDesc) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.UseOnDemandIOMode != that1.UseOnDemandIOMode { - return false - } - if this.ProvisionedRead != that1.ProvisionedRead { - return false - } - if this.ProvisionedWrite != that1.ProvisionedWrite { - return false - } - if len(this.Tags) != len(that1.Tags) { - return false - } - for i := range this.Tags { - if this.Tags[i] != that1.Tags[i] { - return false - } - } - return true -} -func (this *ListTablesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ListTablesResponse) - if !ok { - that2, ok := that.(ListTablesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.TableNames) != len(that1.TableNames) { - return false - } - for i := range this.TableNames { - if this.TableNames[i] != that1.TableNames[i] { - return false - } - } - return true -} -func (this *Labels) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Labels) - if !ok { - that2, ok := that.(Labels) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.Value != that1.Value { - return false - } - return true -} -func (this *PutChunksRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.PutChunksRequest{") - if this.Chunks != nil { - s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetChunksRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.GetChunksRequest{") - if this.Chunks != nil { - s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetChunksResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.GetChunksResponse{") - if this.Chunks != nil { - s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Chunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&grpc.Chunk{") - s = append(s, "Encoded: "+fmt.Sprintf("%#v", this.Encoded)+",\n") - s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") - s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ChunkID) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.ChunkID{") - s = append(s, "ChunkID: "+fmt.Sprintf("%#v", this.ChunkID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DeleteTableRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.DeleteTableRequest{") - s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescribeTableRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.DescribeTableRequest{") - s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WriteBatch) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&grpc.WriteBatch{") - if this.Writes != nil { - s = append(s, "Writes: "+fmt.Sprintf("%#v", this.Writes)+",\n") - } - if this.Deletes != nil { - s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WriteIndexRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.WriteIndexRequest{") - if this.Writes != nil { - s = append(s, "Writes: "+fmt.Sprintf("%#v", this.Writes)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DeleteIndexRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.DeleteIndexRequest{") - if this.Deletes != nil { - s = append(s, "Deletes: "+fmt.Sprintf("%#v", this.Deletes)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryIndexResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.QueryIndexResponse{") - if this.Rows != nil { - s = append(s, "Rows: "+fmt.Sprintf("%#v", this.Rows)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Row) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&grpc.Row{") - s = append(s, "RangeValue: "+fmt.Sprintf("%#v", this.RangeValue)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *IndexEntry) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&grpc.IndexEntry{") - s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") - s = append(s, "HashValue: "+fmt.Sprintf("%#v", this.HashValue)+",\n") - s = append(s, "RangeValue: "+fmt.Sprintf("%#v", this.RangeValue)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryIndexRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&grpc.QueryIndexRequest{") - s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") - s = append(s, "HashValue: "+fmt.Sprintf("%#v", this.HashValue)+",\n") - s = append(s, "RangeValuePrefix: "+fmt.Sprintf("%#v", this.RangeValuePrefix)+",\n") - s = append(s, "RangeValueStart: "+fmt.Sprintf("%#v", this.RangeValueStart)+",\n") - s = append(s, "ValueEqual: "+fmt.Sprintf("%#v", this.ValueEqual)+",\n") - s = append(s, "Immutable: "+fmt.Sprintf("%#v", this.Immutable)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UpdateTableRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&grpc.UpdateTableRequest{") - if this.Current != nil { - s = append(s, "Current: "+fmt.Sprintf("%#v", this.Current)+",\n") - } - if this.Expected != nil { - s = append(s, "Expected: "+fmt.Sprintf("%#v", this.Expected)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescribeTableResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&grpc.DescribeTableResponse{") - if this.Desc != nil { - s = append(s, "Desc: "+fmt.Sprintf("%#v", this.Desc)+",\n") - } - s = append(s, "IsActive: "+fmt.Sprintf("%#v", this.IsActive)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CreateTableRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.CreateTableRequest{") - if this.Desc != nil { - s = append(s, "Desc: "+fmt.Sprintf("%#v", this.Desc)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TableDesc) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&grpc.TableDesc{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "UseOnDemandIOMode: "+fmt.Sprintf("%#v", this.UseOnDemandIOMode)+",\n") - s = append(s, "ProvisionedRead: "+fmt.Sprintf("%#v", this.ProvisionedRead)+",\n") - s = append(s, "ProvisionedWrite: "+fmt.Sprintf("%#v", this.ProvisionedWrite)+",\n") - keysForTags := make([]string, 0, len(this.Tags)) - for k, _ := range this.Tags { - keysForTags = append(keysForTags, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForTags) - mapStringForTags := "map[string]string{" - for _, k := range keysForTags { - mapStringForTags += fmt.Sprintf("%#v: %#v,", k, this.Tags[k]) - } - mapStringForTags += "}" - if this.Tags != nil { - s = append(s, "Tags: "+mapStringForTags+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ListTablesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&grpc.ListTablesResponse{") - s = append(s, "TableNames: "+fmt.Sprintf("%#v", this.TableNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Labels) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&grpc.Labels{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringGrpc(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// GrpcStoreClient is the client API for GrpcStore service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GrpcStoreClient interface { - /// WriteIndex writes batch of indexes to the index tables. - WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) - /// QueryIndex reads the indexes required for given query & sends back the batch of rows - /// in rpc streams - QueryIndex(ctx context.Context, in *QueryIndexRequest, opts ...grpc.CallOption) (GrpcStore_QueryIndexClient, error) - /// DeleteIndex deletes the batch of index entries from the index tables - DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) - /// PutChunks saves the batch of chunks into the chunk tables. - PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*empty.Empty, error) - /// GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams - /// batching needs to be performed at server level as per requirement instead of sending single chunk per stream. - /// In GetChunks rpc request send buf as nil - GetChunks(ctx context.Context, in *GetChunksRequest, opts ...grpc.CallOption) (GrpcStore_GetChunksClient, error) - /// DeleteChunks deletes the chunks based on chunkID. - DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*empty.Empty, error) - /// Lists all the tables that exists in the database. - ListTables(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) - /// Creates a table with provided name & attributes. - CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) - // Deletes a table using table name provided. - DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) - // Describes a table information for the provided table. - DescribeTable(ctx context.Context, in *DescribeTableRequest, opts ...grpc.CallOption) (*DescribeTableResponse, error) - // Update a table with newly provided table information. - UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) -} - -type grpcStoreClient struct { - cc *grpc.ClientConn -} - -func NewGrpcStoreClient(cc *grpc.ClientConn) GrpcStoreClient { - return &grpcStoreClient{cc} -} - -func (c *grpcStoreClient) WriteIndex(ctx context.Context, in *WriteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/WriteIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) QueryIndex(ctx context.Context, in *QueryIndexRequest, opts ...grpc.CallOption) (GrpcStore_QueryIndexClient, error) { - stream, err := c.cc.NewStream(ctx, &_GrpcStore_serviceDesc.Streams[0], "/grpc.grpc_store/QueryIndex", opts...) - if err != nil { - return nil, err - } - x := &grpcStoreQueryIndexClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type GrpcStore_QueryIndexClient interface { - Recv() (*QueryIndexResponse, error) - grpc.ClientStream -} - -type grpcStoreQueryIndexClient struct { - grpc.ClientStream -} - -func (x *grpcStoreQueryIndexClient) Recv() (*QueryIndexResponse, error) { - m := new(QueryIndexResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *grpcStoreClient) DeleteIndex(ctx context.Context, in *DeleteIndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) PutChunks(ctx context.Context, in *PutChunksRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/PutChunks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) GetChunks(ctx context.Context, in *GetChunksRequest, opts ...grpc.CallOption) (GrpcStore_GetChunksClient, error) { - stream, err := c.cc.NewStream(ctx, &_GrpcStore_serviceDesc.Streams[1], "/grpc.grpc_store/GetChunks", opts...) - if err != nil { - return nil, err - } - x := &grpcStoreGetChunksClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type GrpcStore_GetChunksClient interface { - Recv() (*GetChunksResponse, error) - grpc.ClientStream -} - -type grpcStoreGetChunksClient struct { - grpc.ClientStream -} - -func (x *grpcStoreGetChunksClient) Recv() (*GetChunksResponse, error) { - m := new(GetChunksResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *grpcStoreClient) DeleteChunks(ctx context.Context, in *ChunkID, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteChunks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) ListTables(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListTablesResponse, error) { - out := new(ListTablesResponse) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/ListTables", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/CreateTable", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/DeleteTable", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) DescribeTable(ctx context.Context, in *DescribeTableRequest, opts ...grpc.CallOption) (*DescribeTableResponse, error) { - out := new(DescribeTableResponse) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/DescribeTable", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *grpcStoreClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/grpc.grpc_store/UpdateTable", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// GrpcStoreServer is the server API for GrpcStore service. -type GrpcStoreServer interface { - /// WriteIndex writes batch of indexes to the index tables. - WriteIndex(context.Context, *WriteIndexRequest) (*empty.Empty, error) - /// QueryIndex reads the indexes required for given query & sends back the batch of rows - /// in rpc streams - QueryIndex(*QueryIndexRequest, GrpcStore_QueryIndexServer) error - /// DeleteIndex deletes the batch of index entries from the index tables - DeleteIndex(context.Context, *DeleteIndexRequest) (*empty.Empty, error) - /// PutChunks saves the batch of chunks into the chunk tables. - PutChunks(context.Context, *PutChunksRequest) (*empty.Empty, error) - /// GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams - /// batching needs to be performed at server level as per requirement instead of sending single chunk per stream. - /// In GetChunks rpc request send buf as nil - GetChunks(*GetChunksRequest, GrpcStore_GetChunksServer) error - /// DeleteChunks deletes the chunks based on chunkID. - DeleteChunks(context.Context, *ChunkID) (*empty.Empty, error) - /// Lists all the tables that exists in the database. - ListTables(context.Context, *empty.Empty) (*ListTablesResponse, error) - /// Creates a table with provided name & attributes. - CreateTable(context.Context, *CreateTableRequest) (*empty.Empty, error) - // Deletes a table using table name provided. - DeleteTable(context.Context, *DeleteTableRequest) (*empty.Empty, error) - // Describes a table information for the provided table. - DescribeTable(context.Context, *DescribeTableRequest) (*DescribeTableResponse, error) - // Update a table with newly provided table information. - UpdateTable(context.Context, *UpdateTableRequest) (*empty.Empty, error) -} - -// UnimplementedGrpcStoreServer can be embedded to have forward compatible implementations. -type UnimplementedGrpcStoreServer struct { -} - -func (*UnimplementedGrpcStoreServer) WriteIndex(ctx context.Context, req *WriteIndexRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method WriteIndex not implemented") -} -func (*UnimplementedGrpcStoreServer) QueryIndex(req *QueryIndexRequest, srv GrpcStore_QueryIndexServer) error { - return status.Errorf(codes.Unimplemented, "method QueryIndex not implemented") -} -func (*UnimplementedGrpcStoreServer) DeleteIndex(ctx context.Context, req *DeleteIndexRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteIndex not implemented") -} -func (*UnimplementedGrpcStoreServer) PutChunks(ctx context.Context, req *PutChunksRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method PutChunks not implemented") -} -func (*UnimplementedGrpcStoreServer) GetChunks(req *GetChunksRequest, srv GrpcStore_GetChunksServer) error { - return status.Errorf(codes.Unimplemented, "method GetChunks not implemented") -} -func (*UnimplementedGrpcStoreServer) DeleteChunks(ctx context.Context, req *ChunkID) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteChunks not implemented") -} -func (*UnimplementedGrpcStoreServer) ListTables(ctx context.Context, req *empty.Empty) (*ListTablesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListTables not implemented") -} -func (*UnimplementedGrpcStoreServer) CreateTable(ctx context.Context, req *CreateTableRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateTable not implemented") -} -func (*UnimplementedGrpcStoreServer) DeleteTable(ctx context.Context, req *DeleteTableRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteTable not implemented") -} -func (*UnimplementedGrpcStoreServer) DescribeTable(ctx context.Context, req *DescribeTableRequest) (*DescribeTableResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DescribeTable not implemented") -} -func (*UnimplementedGrpcStoreServer) UpdateTable(ctx context.Context, req *UpdateTableRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateTable not implemented") -} - -func RegisterGrpcStoreServer(s *grpc.Server, srv GrpcStoreServer) { - s.RegisterService(&_GrpcStore_serviceDesc, srv) -} - -func _GrpcStore_WriteIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WriteIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).WriteIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/WriteIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).WriteIndex(ctx, req.(*WriteIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_QueryIndex_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(QueryIndexRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(GrpcStoreServer).QueryIndex(m, &grpcStoreQueryIndexServer{stream}) -} - -type GrpcStore_QueryIndexServer interface { - Send(*QueryIndexResponse) error - grpc.ServerStream -} - -type grpcStoreQueryIndexServer struct { - grpc.ServerStream -} - -func (x *grpcStoreQueryIndexServer) Send(m *QueryIndexResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _GrpcStore_DeleteIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).DeleteIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/DeleteIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).DeleteIndex(ctx, req.(*DeleteIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_PutChunks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PutChunksRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).PutChunks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/PutChunks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).PutChunks(ctx, req.(*PutChunksRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_GetChunks_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetChunksRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(GrpcStoreServer).GetChunks(m, &grpcStoreGetChunksServer{stream}) -} - -type GrpcStore_GetChunksServer interface { - Send(*GetChunksResponse) error - grpc.ServerStream -} - -type grpcStoreGetChunksServer struct { - grpc.ServerStream -} - -func (x *grpcStoreGetChunksServer) Send(m *GetChunksResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _GrpcStore_DeleteChunks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChunkID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).DeleteChunks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/DeleteChunks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).DeleteChunks(ctx, req.(*ChunkID)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_ListTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).ListTables(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/ListTables", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).ListTables(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_CreateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateTableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).CreateTable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/CreateTable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).CreateTable(ctx, req.(*CreateTableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_DeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteTableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).DeleteTable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/DeleteTable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).DeleteTable(ctx, req.(*DeleteTableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_DescribeTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DescribeTableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).DescribeTable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/DescribeTable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).DescribeTable(ctx, req.(*DescribeTableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _GrpcStore_UpdateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateTableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GrpcStoreServer).UpdateTable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.grpc_store/UpdateTable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GrpcStoreServer).UpdateTable(ctx, req.(*UpdateTableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _GrpcStore_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.grpc_store", - HandlerType: (*GrpcStoreServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "WriteIndex", - Handler: _GrpcStore_WriteIndex_Handler, - }, - { - MethodName: "DeleteIndex", - Handler: _GrpcStore_DeleteIndex_Handler, - }, - { - MethodName: "PutChunks", - Handler: _GrpcStore_PutChunks_Handler, - }, - { - MethodName: "DeleteChunks", - Handler: _GrpcStore_DeleteChunks_Handler, - }, - { - MethodName: "ListTables", - Handler: _GrpcStore_ListTables_Handler, - }, - { - MethodName: "CreateTable", - Handler: _GrpcStore_CreateTable_Handler, - }, - { - MethodName: "DeleteTable", - Handler: _GrpcStore_DeleteTable_Handler, - }, - { - MethodName: "DescribeTable", - Handler: _GrpcStore_DescribeTable_Handler, - }, - { - MethodName: "UpdateTable", - Handler: _GrpcStore_UpdateTable_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "QueryIndex", - Handler: _GrpcStore_QueryIndex_Handler, - ServerStreams: true, - }, - { - StreamName: "GetChunks", - Handler: _GrpcStore_GetChunks_Handler, - ServerStreams: true, - }, - }, - Metadata: "grpc.proto", -} - -func (m *PutChunksRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutChunksRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PutChunksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *GetChunksRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetChunksRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetChunksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *GetChunksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetChunksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetChunksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Chunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.TableName))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Encoded) > 0 { - i -= len(m.Encoded) - copy(dAtA[i:], m.Encoded) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Encoded))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ChunkID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChunkID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChunkID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChunkID) > 0 { - i -= len(m.ChunkID) - copy(dAtA[i:], m.ChunkID) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.ChunkID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteTableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteTableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteTableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.TableName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DescribeTableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DescribeTableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DescribeTableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.TableName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WriteBatch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteBatch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Deletes) > 0 { - for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deletes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Writes) > 0 { - for iNdEx := len(m.Writes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Writes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WriteIndexRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteIndexRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Writes) > 0 { - for iNdEx := len(m.Writes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Writes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DeleteIndexRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteIndexRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Deletes) > 0 { - for iNdEx := len(m.Deletes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deletes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryIndexResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryIndexResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryIndexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Rows) > 0 { - for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rows[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Row) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Row) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Row) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.RangeValue) > 0 { - i -= len(m.RangeValue) - copy(dAtA[i:], m.RangeValue) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.RangeValue))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *IndexEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IndexEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IndexEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x22 - } - if len(m.RangeValue) > 0 { - i -= len(m.RangeValue) - copy(dAtA[i:], m.RangeValue) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.RangeValue))) - i-- - dAtA[i] = 0x1a - } - if len(m.HashValue) > 0 { - i -= len(m.HashValue) - copy(dAtA[i:], m.HashValue) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.HashValue))) - i-- - dAtA[i] = 0x12 - } - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.TableName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryIndexRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryIndexRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Immutable { - i-- - if m.Immutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if len(m.ValueEqual) > 0 { - i -= len(m.ValueEqual) - copy(dAtA[i:], m.ValueEqual) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.ValueEqual))) - i-- - dAtA[i] = 0x2a - } - if len(m.RangeValueStart) > 0 { - i -= len(m.RangeValueStart) - copy(dAtA[i:], m.RangeValueStart) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.RangeValueStart))) - i-- - dAtA[i] = 0x22 - } - if len(m.RangeValuePrefix) > 0 { - i -= len(m.RangeValuePrefix) - copy(dAtA[i:], m.RangeValuePrefix) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.RangeValuePrefix))) - i-- - dAtA[i] = 0x1a - } - if len(m.HashValue) > 0 { - i -= len(m.HashValue) - copy(dAtA[i:], m.HashValue) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.HashValue))) - i-- - dAtA[i] = 0x12 - } - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.TableName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UpdateTableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateTableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateTableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Expected != nil { - { - size, err := m.Expected.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Current != nil { - { - size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DescribeTableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DescribeTableResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DescribeTableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.IsActive { - i-- - if m.IsActive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Desc != nil { - { - size, err := m.Desc.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateTableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Desc != nil { - { - size, err := m.Desc.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TableDesc) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableDesc) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TableDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tags) > 0 { - for k := range m.Tags { - v := m.Tags[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGrpc(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGrpc(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGrpc(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if m.ProvisionedWrite != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.ProvisionedWrite)) - i-- - dAtA[i] = 0x20 - } - if m.ProvisionedRead != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.ProvisionedRead)) - i-- - dAtA[i] = 0x18 - } - if m.UseOnDemandIOMode { - i-- - if m.UseOnDemandIOMode { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListTablesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListTablesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListTablesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TableNames) > 0 { - for iNdEx := len(m.TableNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.TableNames[iNdEx]) - copy(dAtA[i:], m.TableNames[iNdEx]) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.TableNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Labels) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Labels) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGrpc(dAtA []byte, offset int, v uint64) int { - offset -= sovGrpc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PutChunksRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *GetChunksRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *GetChunksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *Chunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Encoded) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.TableName) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *ChunkID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ChunkID) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *DeleteTableRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TableName) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *DescribeTableRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TableName) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *WriteBatch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Writes) > 0 { - for _, e := range m.Writes { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - if len(m.Deletes) > 0 { - for _, e := range m.Deletes { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *WriteIndexRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Writes) > 0 { - for _, e := range m.Writes { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *DeleteIndexRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Deletes) > 0 { - for _, e := range m.Deletes { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *QueryIndexResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Rows) > 0 { - for _, e := range m.Rows { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *Row) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.RangeValue) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *IndexEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TableName) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.HashValue) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.RangeValue) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *QueryIndexRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TableName) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.HashValue) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.RangeValuePrefix) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.RangeValueStart) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.ValueEqual) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - if m.Immutable { - n += 2 - } - return n -} - -func (m *UpdateTableRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Current != nil { - l = m.Current.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - if m.Expected != nil { - l = m.Expected.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *DescribeTableResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Desc != nil { - l = m.Desc.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - if m.IsActive { - n += 2 - } - return n -} - -func (m *CreateTableRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Desc != nil { - l = m.Desc.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *TableDesc) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - if m.UseOnDemandIOMode { - n += 2 - } - if m.ProvisionedRead != 0 { - n += 1 + sovGrpc(uint64(m.ProvisionedRead)) - } - if m.ProvisionedWrite != 0 { - n += 1 + sovGrpc(uint64(m.ProvisionedWrite)) - } - if len(m.Tags) > 0 { - for k, v := range m.Tags { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGrpc(uint64(len(k))) + 1 + len(v) + sovGrpc(uint64(len(v))) - n += mapEntrySize + 1 + sovGrpc(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ListTablesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.TableNames) > 0 { - for _, s := range m.TableNames { - l = len(s) - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *Labels) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func sovGrpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGrpc(x uint64) (n int) { - return sovGrpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PutChunksRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]*Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(f.String(), "Chunk", "Chunk", 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&PutChunksRequest{`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func (this *GetChunksRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]*Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(f.String(), "Chunk", "Chunk", 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&GetChunksRequest{`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func (this *GetChunksResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]*Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(f.String(), "Chunk", "Chunk", 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&GetChunksResponse{`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func (this *Chunk) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Chunk{`, - `Encoded:` + fmt.Sprintf("%v", this.Encoded) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, - `}`, - }, "") - return s -} -func (this *ChunkID) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ChunkID{`, - `ChunkID:` + fmt.Sprintf("%v", this.ChunkID) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteTableRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteTableRequest{`, - `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, - `}`, - }, "") - return s -} -func (this *DescribeTableRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DescribeTableRequest{`, - `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, - `}`, - }, "") - return s -} -func (this *WriteBatch) String() string { - if this == nil { - return "nil" - } - repeatedStringForWrites := "[]*IndexEntry{" - for _, f := range this.Writes { - repeatedStringForWrites += strings.Replace(f.String(), "IndexEntry", "IndexEntry", 1) + "," - } - repeatedStringForWrites += "}" - repeatedStringForDeletes := "[]*IndexEntry{" - for _, f := range this.Deletes { - repeatedStringForDeletes += strings.Replace(f.String(), "IndexEntry", "IndexEntry", 1) + "," - } - repeatedStringForDeletes += "}" - s := strings.Join([]string{`&WriteBatch{`, - `Writes:` + repeatedStringForWrites + `,`, - `Deletes:` + repeatedStringForDeletes + `,`, - `}`, - }, "") - return s -} -func (this *WriteIndexRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForWrites := "[]*IndexEntry{" - for _, f := range this.Writes { - repeatedStringForWrites += strings.Replace(f.String(), "IndexEntry", "IndexEntry", 1) + "," - } - repeatedStringForWrites += "}" - s := strings.Join([]string{`&WriteIndexRequest{`, - `Writes:` + repeatedStringForWrites + `,`, - `}`, - }, "") - return s -} -func (this *DeleteIndexRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForDeletes := "[]*IndexEntry{" - for _, f := range this.Deletes { - repeatedStringForDeletes += strings.Replace(f.String(), "IndexEntry", "IndexEntry", 1) + "," - } - repeatedStringForDeletes += "}" - s := strings.Join([]string{`&DeleteIndexRequest{`, - `Deletes:` + repeatedStringForDeletes + `,`, - `}`, - }, "") - return s -} -func (this *QueryIndexResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForRows := "[]*Row{" - for _, f := range this.Rows { - repeatedStringForRows += strings.Replace(f.String(), "Row", "Row", 1) + "," - } - repeatedStringForRows += "}" - s := strings.Join([]string{`&QueryIndexResponse{`, - `Rows:` + repeatedStringForRows + `,`, - `}`, - }, "") - return s -} -func (this *Row) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Row{`, - `RangeValue:` + fmt.Sprintf("%v", this.RangeValue) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *IndexEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IndexEntry{`, - `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, - `HashValue:` + fmt.Sprintf("%v", this.HashValue) + `,`, - `RangeValue:` + fmt.Sprintf("%v", this.RangeValue) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *QueryIndexRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QueryIndexRequest{`, - `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, - `HashValue:` + fmt.Sprintf("%v", this.HashValue) + `,`, - `RangeValuePrefix:` + fmt.Sprintf("%v", this.RangeValuePrefix) + `,`, - `RangeValueStart:` + fmt.Sprintf("%v", this.RangeValueStart) + `,`, - `ValueEqual:` + fmt.Sprintf("%v", this.ValueEqual) + `,`, - `Immutable:` + fmt.Sprintf("%v", this.Immutable) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateTableRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateTableRequest{`, - `Current:` + strings.Replace(this.Current.String(), "TableDesc", "TableDesc", 1) + `,`, - `Expected:` + strings.Replace(this.Expected.String(), "TableDesc", "TableDesc", 1) + `,`, - `}`, - }, "") - return s -} -func (this *DescribeTableResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DescribeTableResponse{`, - `Desc:` + strings.Replace(this.Desc.String(), "TableDesc", "TableDesc", 1) + `,`, - `IsActive:` + fmt.Sprintf("%v", this.IsActive) + `,`, - `}`, - }, "") - return s -} -func (this *CreateTableRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateTableRequest{`, - `Desc:` + strings.Replace(this.Desc.String(), "TableDesc", "TableDesc", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TableDesc) String() string { - if this == nil { - return "nil" - } - keysForTags := make([]string, 0, len(this.Tags)) - for k, _ := range this.Tags { - keysForTags = append(keysForTags, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForTags) - mapStringForTags := "map[string]string{" - for _, k := range keysForTags { - mapStringForTags += fmt.Sprintf("%v: %v,", k, this.Tags[k]) - } - mapStringForTags += "}" - s := strings.Join([]string{`&TableDesc{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UseOnDemandIOMode:` + fmt.Sprintf("%v", this.UseOnDemandIOMode) + `,`, - `ProvisionedRead:` + fmt.Sprintf("%v", this.ProvisionedRead) + `,`, - `ProvisionedWrite:` + fmt.Sprintf("%v", this.ProvisionedWrite) + `,`, - `Tags:` + mapStringForTags + `,`, - `}`, - }, "") - return s -} -func (this *ListTablesResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListTablesResponse{`, - `TableNames:` + fmt.Sprintf("%v", this.TableNames) + `,`, - `}`, - }, "") - return s -} -func (this *Labels) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Labels{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func valueToStringGrpc(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PutChunksRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutChunksRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutChunksRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, &Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetChunksRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetChunksRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetChunksRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, &Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetChunksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetChunksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, &Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Chunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Chunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Encoded", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Encoded = append(m.Encoded[:0], dAtA[iNdEx:postIndex]...) - if m.Encoded == nil { - m.Encoded = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChunkID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChunkID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChunkID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunkID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChunkID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteTableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteTableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DescribeTableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DescribeTableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DescribeTableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteBatch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteBatch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteBatch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Writes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Writes = append(m.Writes, &IndexEntry{}) - if err := m.Writes[len(m.Writes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deletes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deletes = append(m.Deletes, &IndexEntry{}) - if err := m.Deletes[len(m.Deletes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteIndexRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteIndexRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Writes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Writes = append(m.Writes, &IndexEntry{}) - if err := m.Writes[len(m.Writes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteIndexRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteIndexRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deletes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deletes = append(m.Deletes, &IndexEntry{}) - if err := m.Deletes[len(m.Deletes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryIndexResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryIndexResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryIndexResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rows = append(m.Rows, &Row{}) - if err := m.Rows[len(m.Rows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Row) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Row: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Row: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeValue", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeValue = append(m.RangeValue[:0], dAtA[iNdEx:postIndex]...) - if m.RangeValue == nil { - m.RangeValue = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IndexEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IndexEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IndexEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HashValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HashValue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeValue", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeValue = append(m.RangeValue[:0], dAtA[iNdEx:postIndex]...) - if m.RangeValue == nil { - m.RangeValue = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryIndexRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryIndexRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HashValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HashValue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeValuePrefix", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeValuePrefix = append(m.RangeValuePrefix[:0], dAtA[iNdEx:postIndex]...) - if m.RangeValuePrefix == nil { - m.RangeValuePrefix = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeValueStart", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeValueStart = append(m.RangeValueStart[:0], dAtA[iNdEx:postIndex]...) - if m.RangeValueStart == nil { - m.RangeValueStart = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueEqual", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValueEqual = append(m.ValueEqual[:0], dAtA[iNdEx:postIndex]...) - if m.ValueEqual == nil { - m.ValueEqual = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Immutable = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateTableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateTableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateTableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Current == nil { - m.Current = &TableDesc{} - } - if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Expected == nil { - m.Expected = &TableDesc{} - } - if err := m.Expected.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DescribeTableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DescribeTableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DescribeTableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Desc == nil { - m.Desc = &TableDesc{} - } - if err := m.Desc.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsActive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsActive = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateTableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Desc == nil { - m.Desc = &TableDesc{} - } - if err := m.Desc.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableDesc) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableDesc: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableDesc: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseOnDemandIOMode", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UseOnDemandIOMode = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProvisionedRead", wireType) - } - m.ProvisionedRead = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ProvisionedRead |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProvisionedWrite", wireType) - } - m.ProvisionedWrite = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ProvisionedWrite |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tags == nil { - m.Tags = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGrpc - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGrpc - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGrpc - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGrpc - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tags[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListTablesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListTablesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableNames = append(m.TableNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Labels) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Labels: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Labels: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGrpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGrpc - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthGrpc - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGrpc(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthGrpc - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGrpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGrpc = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.proto b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.proto deleted file mode 100644 index 3eecd3584..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc.proto +++ /dev/null @@ -1,142 +0,0 @@ -syntax = "proto3"; - -package grpc; - -import "google/protobuf/empty.proto"; - -service grpc_store { - /// index-client - - /// WriteIndex writes batch of indexes to the index tables. - rpc WriteIndex(WriteIndexRequest) returns (google.protobuf.Empty); - /// QueryIndex reads the indexes required for given query & sends back the batch of rows - /// in rpc streams - rpc QueryIndex(QueryIndexRequest) returns (stream QueryIndexResponse); - /// DeleteIndex deletes the batch of index entries from the index tables - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty); - - /// storage-client - - /// PutChunks saves the batch of chunks into the chunk tables. - rpc PutChunks(PutChunksRequest) returns (google.protobuf.Empty); - /// GetChunks requests for batch of chunks and the batch of chunks are sent back in rpc streams - /// batching needs to be performed at server level as per requirement instead of sending single chunk per stream. - /// In GetChunks rpc request send buf as nil - rpc GetChunks(GetChunksRequest) returns (stream GetChunksResponse); - /// DeleteChunks deletes the chunks based on chunkID. - rpc DeleteChunks(ChunkID) returns (google.protobuf.Empty); - - /// table-client - - /// Lists all the tables that exists in the database. - rpc ListTables(google.protobuf.Empty) returns (ListTablesResponse); - /// Creates a table with provided name & attributes. - rpc CreateTable(CreateTableRequest) returns (google.protobuf.Empty); - // Deletes a table using table name provided. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty); - // Describes a table information for the provided table. - rpc DescribeTable(DescribeTableRequest) returns (DescribeTableResponse); - // Update a table with newly provided table information. - rpc UpdateTable(UpdateTableRequest) returns (google.protobuf.Empty); -} - -message PutChunksRequest { - repeated Chunk chunks = 1; -} - -message GetChunksRequest { - repeated Chunk chunks = 1; -} - -message GetChunksResponse { - repeated Chunk chunks = 1; -} - -message Chunk { - bytes encoded = 1; - string key = 2; - string tableName = 3; -} - -message ChunkID { - string chunkID = 1; -} - -message DeleteTableRequest { - string tableName = 1; -} - -message DescribeTableRequest { - string tableName = 1; -} - -message WriteBatch { - repeated IndexEntry writes = 1; - repeated IndexEntry deletes = 2; -} - -message WriteIndexRequest { - repeated IndexEntry writes = 1; -} - -message DeleteIndexRequest { - repeated IndexEntry deletes = 1; -} - -message QueryIndexResponse { - repeated Row rows = 1; -} - -message Row { - bytes rangeValue = 1; - bytes value = 2; -} - -message IndexEntry { - string tableName = 1; - string hashValue = 2; - bytes rangeValue = 3; - bytes value = 4; -} - -message QueryIndexRequest { - string tableName = 1; - string hashValue = 2; - bytes rangeValuePrefix = 3; - bytes rangeValueStart = 4; - bytes valueEqual = 5; - bool immutable = 6; -} - -message UpdateTableRequest { - TableDesc current = 1; - TableDesc expected = 2; -} - -message DescribeTableResponse { - TableDesc desc = 1; - bool isActive = 2; -} - -message CreateTableRequest { - TableDesc desc = 1; -} - -message TableDesc { - string name = 1; - bool useOnDemandIOMode = 2; - int64 provisionedRead = 3; - int64 provisionedWrite = 4; - map tags = 5; -} - -message ListTablesResponse { - repeated string tableNames = 1; -} - -message Labels { - string name = 1; - string value = 2; -} - - diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go deleted file mode 100644 index fbcba9c9d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go +++ /dev/null @@ -1,35 +0,0 @@ -package grpc - -import ( - "flag" - "time" - - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" -) - -// Config for a StorageClient -type Config struct { - Address string `yaml:"server_address,omitempty"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Address, "grpc-store.server-address", "", "Hostname or IP of the gRPC store instance.") -} - -func connectToGrpcServer(serverAddress string) (GrpcStoreClient, *grpc.ClientConn, error) { - params := keepalive.ClientParameters{ - Time: time.Second * 20, - Timeout: time.Second * 10, - PermitWithoutStream: true, - } - param := grpc.WithKeepaliveParams(params) - cc, err := grpc.Dial(serverAddress, param, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to dial grpc-store %s", serverAddress) - } - return NewGrpcStoreClient(cc), cc, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/index_client.go deleted file mode 100644 index 1bc0f31b1..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/index_client.go +++ /dev/null @@ -1,107 +0,0 @@ -package grpc - -import ( - "context" - "io" - - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/util" -) - -func (w *WriteBatch) Add(tableName, hashValue string, rangeValue []byte, value []byte) { - w.Writes = append(w.Writes, &IndexEntry{ - TableName: tableName, - HashValue: hashValue, - RangeValue: rangeValue, - Value: value, - }) -} - -func (w *WriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { - w.Deletes = append(w.Deletes, &IndexEntry{ - TableName: tableName, - HashValue: hashValue, - RangeValue: rangeValue, - }) -} - -func (s *StorageClient) NewWriteBatch() chunk.WriteBatch { - return &WriteBatch{} -} - -func (s *StorageClient) BatchWrite(c context.Context, batch chunk.WriteBatch) error { - writeBatch := batch.(*WriteBatch) - batchWrites := &WriteIndexRequest{Writes: writeBatch.Writes} - _, err := s.client.WriteIndex(context.Background(), batchWrites) - if err != nil { - return errors.WithStack(err) - } - - batchDeletes := &DeleteIndexRequest{Deletes: writeBatch.Deletes} - _, err = s.client.DeleteIndex(context.Background(), batchDeletes) - if err != nil { - return errors.WithStack(err) - } - - return nil -} - -func (s *StorageClient) QueryPages(ctx context.Context, queries []chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) (shouldContinue bool)) error { - return util.DoParallelQueries(ctx, s.query, queries, callback) -} - -func (s *StorageClient) query(ctx context.Context, query chunk.IndexQuery, callback util.Callback) error { - indexQuery := &QueryIndexRequest{ - TableName: query.TableName, - HashValue: query.HashValue, - RangeValuePrefix: query.RangeValuePrefix, - RangeValueStart: query.RangeValueStart, - ValueEqual: query.ValueEqual, - Immutable: query.Immutable, - } - streamer, err := s.client.QueryIndex(ctx, indexQuery) - if err != nil { - return errors.WithStack(err) - } - for { - readBatch, err := streamer.Recv() - if err == io.EOF { - break - } - if err != nil { - return errors.WithStack(err) - } - if !callback(query, readBatch) { - return nil - } - } - - return nil -} - -func (r *QueryIndexResponse) Iterator() chunk.ReadBatchIterator { - return &grpcIter{ - i: -1, - QueryIndexResponse: r, - } -} - -type grpcIter struct { - i int - *QueryIndexResponse -} - -func (b *grpcIter) Next() bool { - b.i++ - return b.i < len(b.Rows) -} - -func (b *grpcIter) RangeValue() []byte { - return b.Rows[b.i].RangeValue -} - -func (b *grpcIter) Value() []byte { - return b.Rows[b.i].Value -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/storage_client.go deleted file mode 100644 index 99595f8c3..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/storage_client.go +++ /dev/null @@ -1,118 +0,0 @@ -package grpc - -import ( - "context" - "io" - - "github.com/pkg/errors" - "google.golang.org/grpc" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -type StorageClient struct { - schemaCfg chunk.SchemaConfig - client GrpcStoreClient - connection *grpc.ClientConn -} - -// NewStorageClient returns a new StorageClient. -func NewStorageClient(cfg Config, schemaCfg chunk.SchemaConfig) (*StorageClient, error) { - grpcClient, conn, err := connectToGrpcServer(cfg.Address) - if err != nil { - return nil, err - } - client := &StorageClient{ - schemaCfg: schemaCfg, - client: grpcClient, - connection: conn, - } - return client, nil -} - -func (s *StorageClient) Stop() { - s.connection.Close() -} - -// PutChunks implements chunk.ObjectClient. -func (s *StorageClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { - req := &PutChunksRequest{} - for i := range chunks { - buf, err := chunks[i].Encoded() - if err != nil { - return errors.WithStack(err) - } - - key := chunks[i].ExternalKey() - tableName, err := s.schemaCfg.ChunkTableFor(chunks[i].From) - if err != nil { - return errors.WithStack(err) - } - writeChunk := &Chunk{ - Encoded: buf, - Key: key, - TableName: tableName, - } - - req.Chunks = append(req.Chunks, writeChunk) - } - - _, err := s.client.PutChunks(ctx, req) - if err != nil { - return errors.WithStack(err) - } - - return nil -} - -func (s *StorageClient) DeleteChunk(ctx context.Context, userID, chunkID string) error { - chunkInfo := &ChunkID{ChunkID: chunkID} - _, err := s.client.DeleteChunks(ctx, chunkInfo) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -func (s *StorageClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) { - req := &GetChunksRequest{} - req.Chunks = []*Chunk{} - var err error - for _, inputInfo := range input { - chunkInfo := &Chunk{} - // send the table name from upstream gRPC client as gRPC server is unaware of schema - chunkInfo.TableName, err = s.schemaCfg.ChunkTableFor(inputInfo.From) - if err != nil { - return nil, errors.WithStack(err) - } - chunkInfo.Key = inputInfo.ExternalKey() - req.Chunks = append(req.Chunks, chunkInfo) - } - streamer, err := s.client.GetChunks(ctx, req) - if err != nil { - return nil, errors.WithStack(err) - } - var result []chunk.Chunk - decodeContext := chunk.NewDecodeContext() - for { - receivedChunks, err := streamer.Recv() - if err == io.EOF { - break - } - if err != nil { - return nil, errors.WithStack(err) - } - for _, chunkResponse := range receivedChunks.GetChunks() { - var c chunk.Chunk - if chunkResponse != nil { - err = c.Decode(decodeContext, chunkResponse.Encoded) - if err != nil { - return result, err - } - } - result = append(result, c) - } - } - - return result, err -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/table_client.go deleted file mode 100644 index 9e7d201f5..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/table_client.go +++ /dev/null @@ -1,107 +0,0 @@ -package grpc - -import ( - "context" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/pkg/errors" - "google.golang.org/grpc" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -type TableClient struct { - client GrpcStoreClient - conn *grpc.ClientConn -} - -// NewTableClient returns a new TableClient. -func NewTableClient(cfg Config) (*TableClient, error) { - grpcClient, conn, err := connectToGrpcServer(cfg.Address) - if err != nil { - return nil, err - } - client := &TableClient{ - client: grpcClient, - conn: conn, - } - return client, nil -} - -func (c *TableClient) ListTables(ctx context.Context) ([]string, error) { - tables, err := c.client.ListTables(ctx, &empty.Empty{}) - if err != nil { - return nil, errors.WithStack(err) - } - return tables.TableNames, nil -} - -func (c *TableClient) DeleteTable(ctx context.Context, name string) error { - tableName := &DeleteTableRequest{TableName: name} - _, err := c.client.DeleteTable(ctx, tableName) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -func (c *TableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { - tableName := &DescribeTableRequest{TableName: name} - tableDesc, err := c.client.DescribeTable(ctx, tableName) - if err != nil { - return desc, false, errors.WithStack(err) - } - desc.Name = tableDesc.Desc.Name - desc.ProvisionedRead = tableDesc.Desc.ProvisionedRead - desc.ProvisionedWrite = tableDesc.Desc.ProvisionedWrite - desc.UseOnDemandIOMode = tableDesc.Desc.UseOnDemandIOMode - desc.Tags = tableDesc.Desc.Tags - return desc, tableDesc.IsActive, nil -} - -func (c *TableClient) UpdateTable(ctx context.Context, current, expected chunk.TableDesc) error { - currentTable := &TableDesc{} - expectedTable := &TableDesc{} - - currentTable.Name = current.Name - currentTable.UseOnDemandIOMode = current.UseOnDemandIOMode - currentTable.ProvisionedWrite = current.ProvisionedWrite - currentTable.ProvisionedRead = current.ProvisionedRead - currentTable.Tags = current.Tags - - expectedTable.Name = expected.Name - expectedTable.UseOnDemandIOMode = expected.UseOnDemandIOMode - expectedTable.ProvisionedWrite = expected.ProvisionedWrite - expectedTable.ProvisionedRead = expected.ProvisionedRead - expectedTable.Tags = expected.Tags - - updateTableRequest := &UpdateTableRequest{ - Current: currentTable, - Expected: expectedTable, - } - _, err := c.client.UpdateTable(ctx, updateTableRequest) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -func (c *TableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { - req := &CreateTableRequest{} - req.Desc = &TableDesc{} - req.Desc.Name = desc.Name - req.Desc.ProvisionedRead = desc.ProvisionedRead - req.Desc.ProvisionedWrite = desc.ProvisionedWrite - req.Desc.Tags = desc.Tags - req.Desc.UseOnDemandIOMode = desc.UseOnDemandIOMode - - _, err := c.client.CreateTable(ctx, req) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -func (c *TableClient) Stop() { - c.conn.Close() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go deleted file mode 100644 index d93ca487a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go +++ /dev/null @@ -1,366 +0,0 @@ -package local - -import ( - "bytes" - "context" - "errors" - "flag" - "fmt" - "os" - "path" - "path/filepath" - "sync" - "time" - - "github.com/go-kit/log/level" - "go.etcd.io/bbolt" - - "github.com/cortexproject/cortex/pkg/chunk" - chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -var ( - bucketName = []byte("index") - ErrUnexistentBoltDB = errors.New("boltdb file does not exist") -) - -const ( - separator = "\000" - dbReloadPeriod = 10 * time.Minute - - DBOperationRead = iota - DBOperationWrite - - openBoltDBFileTimeout = 5 * time.Second -) - -// BoltDBConfig for a BoltDB index client. -type BoltDBConfig struct { - Directory string `yaml:"directory"` -} - -// RegisterFlags registers flags. -func (cfg *BoltDBConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Directory, "boltdb.dir", "", "Location of BoltDB index files.") -} - -type BoltIndexClient struct { - cfg BoltDBConfig - - dbsMtx sync.RWMutex - dbs map[string]*bbolt.DB - done chan struct{} - wait sync.WaitGroup -} - -// NewBoltDBIndexClient creates a new IndexClient that used BoltDB. -func NewBoltDBIndexClient(cfg BoltDBConfig) (*BoltIndexClient, error) { - if err := chunk_util.EnsureDirectory(cfg.Directory); err != nil { - return nil, err - } - - indexClient := &BoltIndexClient{ - cfg: cfg, - dbs: map[string]*bbolt.DB{}, - done: make(chan struct{}), - } - - indexClient.wait.Add(1) - go indexClient.loop() - return indexClient, nil -} - -func (b *BoltIndexClient) loop() { - defer b.wait.Done() - - ticker := time.NewTicker(dbReloadPeriod) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - b.reload() - case <-b.done: - return - } - } -} - -func (b *BoltIndexClient) reload() { - b.dbsMtx.RLock() - - removedDBs := []string{} - for name := range b.dbs { - if _, err := os.Stat(path.Join(b.cfg.Directory, name)); err != nil && os.IsNotExist(err) { - removedDBs = append(removedDBs, name) - level.Debug(util_log.Logger).Log("msg", "boltdb file got removed", "filename", name) - continue - } - } - b.dbsMtx.RUnlock() - - if len(removedDBs) != 0 { - b.dbsMtx.Lock() - defer b.dbsMtx.Unlock() - - for _, name := range removedDBs { - if err := b.dbs[name].Close(); err != nil { - level.Error(util_log.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err) - continue - } - delete(b.dbs, name) - } - } - -} - -func (b *BoltIndexClient) Stop() { - close(b.done) - - b.dbsMtx.Lock() - defer b.dbsMtx.Unlock() - for _, db := range b.dbs { - db.Close() - } - - b.wait.Wait() -} - -func (b *BoltIndexClient) NewWriteBatch() chunk.WriteBatch { - return &BoltWriteBatch{ - Writes: map[string]TableWrites{}, - } -} - -// GetDB should always return a db for write operation unless an error occurs while doing so. -// While for read operation it should throw ErrUnexistentBoltDB error if file does not exist for reading -func (b *BoltIndexClient) GetDB(name string, operation int) (*bbolt.DB, error) { - b.dbsMtx.RLock() - db, ok := b.dbs[name] - b.dbsMtx.RUnlock() - if ok { - return db, nil - } - - // we do not want to create a new db for reading if it does not exist - if operation == DBOperationRead { - if _, err := os.Stat(path.Join(b.cfg.Directory, name)); err != nil { - if os.IsNotExist(err) { - return nil, ErrUnexistentBoltDB - } - return nil, err - } - } - - b.dbsMtx.Lock() - defer b.dbsMtx.Unlock() - db, ok = b.dbs[name] - if ok { - return db, nil - } - - // Open the database. - // Set Timeout to avoid obtaining file lock wait indefinitely. - db, err := bbolt.Open(path.Join(b.cfg.Directory, name), 0666, &bbolt.Options{Timeout: openBoltDBFileTimeout}) - if err != nil { - return nil, err - } - - b.dbs[name] = db - return db, nil -} - -func (b *BoltIndexClient) WriteToDB(ctx context.Context, db *bbolt.DB, writes TableWrites) error { - return db.Update(func(tx *bbolt.Tx) error { - var b *bbolt.Bucket - - // a bucket should already exist for deletes, for other writes we create one otherwise. - if len(writes.deletes) != 0 { - b = tx.Bucket(bucketName) - if b == nil { - return fmt.Errorf("bucket %s not found in table %s", bucketName, filepath.Base(db.Path())) - } - } else { - var err error - b, err = tx.CreateBucketIfNotExists(bucketName) - if err != nil { - return err - } - } - - for key, value := range writes.puts { - if err := b.Put([]byte(key), value); err != nil { - return err - } - } - - for key := range writes.deletes { - if err := b.Delete([]byte(key)); err != nil { - return err - } - } - - return nil - }) -} - -func (b *BoltIndexClient) BatchWrite(ctx context.Context, batch chunk.WriteBatch) error { - for table, writes := range batch.(*BoltWriteBatch).Writes { - db, err := b.GetDB(table, DBOperationWrite) - if err != nil { - return err - } - - err = b.WriteToDB(ctx, db, writes) - if err != nil { - return err - } - } - - return nil -} - -func (b *BoltIndexClient) QueryPages(ctx context.Context, queries []chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) (shouldContinue bool)) error { - return chunk_util.DoParallelQueries(ctx, b.query, queries, callback) -} - -func (b *BoltIndexClient) query(ctx context.Context, query chunk.IndexQuery, callback chunk_util.Callback) error { - db, err := b.GetDB(query.TableName, DBOperationRead) - if err != nil { - if err == ErrUnexistentBoltDB { - return nil - } - - return err - } - - return b.QueryDB(ctx, db, query, callback) -} - -func (b *BoltIndexClient) QueryDB(ctx context.Context, db *bbolt.DB, query chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) (shouldContinue bool)) error { - return db.View(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(bucketName) - if bucket == nil { - return nil - } - - return b.QueryWithCursor(ctx, bucket.Cursor(), query, callback) - }) -} - -func (b *BoltIndexClient) QueryWithCursor(_ context.Context, c *bbolt.Cursor, query chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) (shouldContinue bool)) error { - var start []byte - if len(query.RangeValuePrefix) > 0 { - start = []byte(query.HashValue + separator + string(query.RangeValuePrefix)) - } else if len(query.RangeValueStart) > 0 { - start = []byte(query.HashValue + separator + string(query.RangeValueStart)) - } else { - start = []byte(query.HashValue + separator) - } - - rowPrefix := []byte(query.HashValue + separator) - - var batch boltReadBatch - - for k, v := c.Seek(start); k != nil; k, v = c.Next() { - if !bytes.HasPrefix(k, rowPrefix) { - break - } - - if len(query.RangeValuePrefix) > 0 && !bytes.HasPrefix(k, start) { - break - } - if len(query.ValueEqual) > 0 && !bytes.Equal(v, query.ValueEqual) { - continue - } - - // make a copy since k, v are only valid for the life of the transaction. - // See: https://godoc.org/github.com/boltdb/bolt#Cursor.Seek - batch.rangeValue = make([]byte, len(k)-len(rowPrefix)) - copy(batch.rangeValue, k[len(rowPrefix):]) - - batch.value = make([]byte, len(v)) - copy(batch.value, v) - - if !callback(query, &batch) { - break - } - } - - return nil -} - -type TableWrites struct { - puts map[string][]byte - deletes map[string]struct{} -} - -type BoltWriteBatch struct { - Writes map[string]TableWrites -} - -func (b *BoltWriteBatch) getOrCreateTableWrites(tableName string) TableWrites { - writes, ok := b.Writes[tableName] - if !ok { - writes = TableWrites{ - puts: map[string][]byte{}, - deletes: map[string]struct{}{}, - } - b.Writes[tableName] = writes - } - - return writes -} - -func (b *BoltWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { - writes := b.getOrCreateTableWrites(tableName) - - key := hashValue + separator + string(rangeValue) - writes.deletes[key] = struct{}{} -} - -func (b *BoltWriteBatch) Add(tableName, hashValue string, rangeValue []byte, value []byte) { - writes := b.getOrCreateTableWrites(tableName) - - key := hashValue + separator + string(rangeValue) - writes.puts[key] = value -} - -type boltReadBatch struct { - rangeValue []byte - value []byte -} - -func (b boltReadBatch) Iterator() chunk.ReadBatchIterator { - return &boltReadBatchIterator{ - boltReadBatch: b, - } -} - -type boltReadBatchIterator struct { - consumed bool - boltReadBatch -} - -func (b *boltReadBatchIterator) Next() bool { - if b.consumed { - return false - } - b.consumed = true - return true -} - -func (b *boltReadBatchIterator) RangeValue() []byte { - return b.rangeValue -} - -func (b *boltReadBatchIterator) Value() []byte { - return b.value -} - -// Open the database. -// Set Timeout to avoid obtaining file lock wait indefinitely. -func OpenBoltdbFile(path string) (*bbolt.DB, error) { - return bbolt.Open(path, 0666, &bbolt.Options{Timeout: 5 * time.Second}) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go deleted file mode 100644 index bb3d6f57d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go +++ /dev/null @@ -1,61 +0,0 @@ -package local - -import ( - "context" - "os" - "path/filepath" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -type TableClient struct { - directory string -} - -// NewTableClient returns a new TableClient. -func NewTableClient(directory string) (chunk.TableClient, error) { - return &TableClient{directory: directory}, nil -} - -func (c *TableClient) ListTables(ctx context.Context) ([]string, error) { - boltDbFiles := []string{} - err := filepath.Walk(c.directory, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - boltDbFiles = append(boltDbFiles, info.Name()) - } - return nil - }) - - if err != nil { - return nil, err - } - return boltDbFiles, nil -} - -func (c *TableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { - file, err := os.OpenFile(filepath.Join(c.directory, desc.Name), os.O_CREATE|os.O_RDONLY, 0666) - if err != nil { - return err - } - - return file.Close() -} - -func (c *TableClient) DeleteTable(ctx context.Context, name string) error { - return os.Remove(filepath.Join(c.directory, name)) -} - -func (c *TableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { - return chunk.TableDesc{ - Name: name, - }, true, nil -} - -func (c *TableClient) UpdateTable(ctx context.Context, current, expected chunk.TableDesc) error { - return nil -} - -func (*TableClient) Stop() {} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go deleted file mode 100644 index 5e0bc9f9c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go +++ /dev/null @@ -1,80 +0,0 @@ -package local - -import ( - "io" - "io/ioutil" - "os" - "time" - - "github.com/prometheus/common/model" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/objectclient" - "github.com/cortexproject/cortex/pkg/chunk/testutils" -) - -type fixture struct { - name string - dirname string -} - -func (f *fixture) Name() string { - return f.name -} - -func (f *fixture) Clients() ( - indexClient chunk.IndexClient, chunkClient chunk.Client, tableClient chunk.TableClient, - schemaConfig chunk.SchemaConfig, closer io.Closer, err error, -) { - f.dirname, err = ioutil.TempDir(os.TempDir(), "boltdb") - if err != nil { - return - } - - indexClient, err = NewBoltDBIndexClient(BoltDBConfig{ - Directory: f.dirname, - }) - if err != nil { - return - } - - oClient, err := NewFSObjectClient(FSConfig{Directory: f.dirname}) - if err != nil { - return - } - - chunkClient = objectclient.NewClient(oClient, objectclient.Base64Encoder) - - tableClient, err = NewTableClient(f.dirname) - if err != nil { - return - } - - schemaConfig = chunk.SchemaConfig{ - Configs: []chunk.PeriodConfig{{ - IndexType: "boltdb", - From: chunk.DayTime{Time: model.Now()}, - ChunkTables: chunk.PeriodicTableConfig{ - Prefix: "chunks", - Period: 10 * time.Minute, - }, - IndexTables: chunk.PeriodicTableConfig{ - Prefix: "index", - Period: 10 * time.Minute, - }, - }}, - } - - closer = testutils.CloserFunc(func() error { - return os.RemoveAll(f.dirname) - }) - - return -} - -// Fixtures for unit testing GCP storage. -var Fixtures = []testutils.Fixture{ - &fixture{ - name: "boltdb", - }, -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go deleted file mode 100644 index 27ce8e1f7..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ /dev/null @@ -1,211 +0,0 @@ -package local - -import ( - "context" - "flag" - "fmt" - "io" - "os" - "path/filepath" - "time" - - "github.com/go-kit/log/level" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/runutil" -) - -// FSConfig is the config for a FSObjectClient. -type FSConfig struct { - Directory string `yaml:"directory"` -} - -// RegisterFlags registers flags. -func (cfg *FSConfig) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("", f) -} - -// RegisterFlags registers flags with prefix. -func (cfg *FSConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.Directory, prefix+"local.chunk-directory", "", "Directory to store chunks in.") -} - -// FSObjectClient holds config for filesystem as object store -type FSObjectClient struct { - cfg FSConfig - pathSeparator string -} - -// NewFSObjectClient makes a chunk.Client which stores chunks as files in the local filesystem. -func NewFSObjectClient(cfg FSConfig) (*FSObjectClient, error) { - // filepath.Clean cleans up the path by removing unwanted duplicate slashes, dots etc. - // This is needed because DeleteObject works on paths which are already cleaned up and it - // checks whether it is about to delete the configured directory when it becomes empty - cfg.Directory = filepath.Clean(cfg.Directory) - if err := util.EnsureDirectory(cfg.Directory); err != nil { - return nil, err - } - - return &FSObjectClient{ - cfg: cfg, - pathSeparator: string(os.PathSeparator), - }, nil -} - -// Stop implements ObjectClient -func (FSObjectClient) Stop() {} - -// GetObject from the store -func (f *FSObjectClient) GetObject(_ context.Context, objectKey string) (io.ReadCloser, error) { - fl, err := os.Open(filepath.Join(f.cfg.Directory, filepath.FromSlash(objectKey))) - if err != nil && os.IsNotExist(err) { - return nil, chunk.ErrStorageObjectNotFound - } - - return fl, err -} - -// PutObject into the store -func (f *FSObjectClient) PutObject(_ context.Context, objectKey string, object io.ReadSeeker) error { - fullPath := filepath.Join(f.cfg.Directory, filepath.FromSlash(objectKey)) - err := util.EnsureDirectory(filepath.Dir(fullPath)) - if err != nil { - return err - } - - fl, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - - defer runutil.CloseWithLogOnErr(util_log.Logger, fl, "fullPath: %s", fullPath) - - _, err = io.Copy(fl, object) - if err != nil { - return err - } - - err = fl.Sync() - if err != nil { - return err - } - - return fl.Close() -} - -// List implements chunk.ObjectClient. -// FSObjectClient assumes that prefix is a directory, and only supports "" and "/" delimiters. -func (f *FSObjectClient) List(ctx context.Context, prefix, delimiter string) ([]chunk.StorageObject, []chunk.StorageCommonPrefix, error) { - if delimiter != "" && delimiter != "/" { - return nil, nil, fmt.Errorf("unsupported delimiter: %q", delimiter) - } - - folderPath := filepath.Join(f.cfg.Directory, filepath.FromSlash(prefix)) - - info, err := os.Stat(folderPath) - if err != nil { - if os.IsNotExist(err) { - return nil, nil, nil - } - return nil, nil, err - } - if !info.IsDir() { - // When listing single file, return this file only. - return []chunk.StorageObject{{Key: info.Name(), ModifiedAt: info.ModTime()}}, nil, nil - } - - var storageObjects []chunk.StorageObject - var commonPrefixes []chunk.StorageCommonPrefix - - err = filepath.Walk(folderPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Ignore starting folder itself. - if path == folderPath { - return nil - } - - relPath, err := filepath.Rel(f.cfg.Directory, path) - if err != nil { - return err - } - - relPath = filepath.ToSlash(relPath) - - if info.IsDir() { - if delimiter == "" { - // Go into directory - return nil - } - - empty, err := isDirEmpty(path) - if err != nil { - return err - } - - if !empty { - commonPrefixes = append(commonPrefixes, chunk.StorageCommonPrefix(relPath+delimiter)) - } - return filepath.SkipDir - } - - storageObjects = append(storageObjects, chunk.StorageObject{Key: relPath, ModifiedAt: info.ModTime()}) - return nil - }) - - return storageObjects, commonPrefixes, err -} - -func (f *FSObjectClient) DeleteObject(ctx context.Context, objectKey string) error { - // inspired from https://github.com/thanos-io/thanos/blob/55cb8ca38b3539381dc6a781e637df15c694e50a/pkg/objstore/filesystem/filesystem.go#L195 - file := filepath.Join(f.cfg.Directory, filepath.FromSlash(objectKey)) - - for file != f.cfg.Directory { - if err := os.Remove(file); err != nil { - return err - } - - file = filepath.Dir(file) - empty, err := isDirEmpty(file) - if err != nil { - return err - } - - if !empty { - break - } - } - - return nil -} - -// DeleteChunksBefore implements BucketClient -func (f *FSObjectClient) DeleteChunksBefore(ctx context.Context, ts time.Time) error { - return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, err error) error { - if !info.IsDir() && info.ModTime().Before(ts) { - level.Info(util_log.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name()) - if err := os.Remove(path); err != nil { - return err - } - } - return nil - }) -} - -// copied from https://github.com/thanos-io/thanos/blob/55cb8ca38b3539381dc6a781e637df15c694e50a/pkg/objstore/filesystem/filesystem.go#L181 -func isDirEmpty(name string) (ok bool, err error) { - f, err := os.Open(name) - if err != nil { - return false, err - } - defer runutil.CloseWithErrCapture(&err, f, "dir open") - - if _, err = f.Readdir(1); err == io.EOF { - return true, nil - } - return false, err -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.pb.go deleted file mode 100644 index 5646b2b4e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.pb.go +++ /dev/null @@ -1,1353 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: delete_plan.proto - -package purger - -import ( - fmt "fmt" - _ "github.com/cortexproject/cortex/pkg/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// DeletePlan holds all the chunks that are supposed to be deleted within an interval(usually a day) -// This Proto file is used just for storing Delete Plans in proto format. -type DeletePlan struct { - PlanInterval *Interval `protobuf:"bytes,1,opt,name=plan_interval,json=planInterval,proto3" json:"plan_interval,omitempty"` - ChunksGroup []ChunksGroup `protobuf:"bytes,2,rep,name=chunks_group,json=chunksGroup,proto3" json:"chunks_group"` -} - -func (m *DeletePlan) Reset() { *m = DeletePlan{} } -func (*DeletePlan) ProtoMessage() {} -func (*DeletePlan) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{0} -} -func (m *DeletePlan) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeletePlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeletePlan.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeletePlan) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeletePlan.Merge(m, src) -} -func (m *DeletePlan) XXX_Size() int { - return m.Size() -} -func (m *DeletePlan) XXX_DiscardUnknown() { - xxx_messageInfo_DeletePlan.DiscardUnknown(m) -} - -var xxx_messageInfo_DeletePlan proto.InternalMessageInfo - -func (m *DeletePlan) GetPlanInterval() *Interval { - if m != nil { - return m.PlanInterval - } - return nil -} - -func (m *DeletePlan) GetChunksGroup() []ChunksGroup { - if m != nil { - return m.ChunksGroup - } - return nil -} - -// ChunksGroup holds ChunkDetails and Labels for a group of chunks which have same series ID -type ChunksGroup struct { - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"labels"` - Chunks []ChunkDetails `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` -} - -func (m *ChunksGroup) Reset() { *m = ChunksGroup{} } -func (*ChunksGroup) ProtoMessage() {} -func (*ChunksGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{1} -} -func (m *ChunksGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChunksGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChunksGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChunksGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChunksGroup.Merge(m, src) -} -func (m *ChunksGroup) XXX_Size() int { - return m.Size() -} -func (m *ChunksGroup) XXX_DiscardUnknown() { - xxx_messageInfo_ChunksGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_ChunksGroup proto.InternalMessageInfo - -func (m *ChunksGroup) GetChunks() []ChunkDetails { - if m != nil { - return m.Chunks - } - return nil -} - -type ChunkDetails struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - PartiallyDeletedInterval *Interval `protobuf:"bytes,2,opt,name=partially_deleted_interval,json=partiallyDeletedInterval,proto3" json:"partially_deleted_interval,omitempty"` -} - -func (m *ChunkDetails) Reset() { *m = ChunkDetails{} } -func (*ChunkDetails) ProtoMessage() {} -func (*ChunkDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{2} -} -func (m *ChunkDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChunkDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ChunkDetails.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ChunkDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChunkDetails.Merge(m, src) -} -func (m *ChunkDetails) XXX_Size() int { - return m.Size() -} -func (m *ChunkDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ChunkDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_ChunkDetails proto.InternalMessageInfo - -func (m *ChunkDetails) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *ChunkDetails) GetPartiallyDeletedInterval() *Interval { - if m != nil { - return m.PartiallyDeletedInterval - } - return nil -} - -type Interval struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` -} - -func (m *Interval) Reset() { *m = Interval{} } -func (*Interval) ProtoMessage() {} -func (*Interval) Descriptor() ([]byte, []int) { - return fileDescriptor_c38868cf63b27372, []int{3} -} -func (m *Interval) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Interval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Interval.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Interval) XXX_Merge(src proto.Message) { - xxx_messageInfo_Interval.Merge(m, src) -} -func (m *Interval) XXX_Size() int { - return m.Size() -} -func (m *Interval) XXX_DiscardUnknown() { - xxx_messageInfo_Interval.DiscardUnknown(m) -} - -var xxx_messageInfo_Interval proto.InternalMessageInfo - -func (m *Interval) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *Interval) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func init() { - proto.RegisterType((*DeletePlan)(nil), "purgeplan.DeletePlan") - proto.RegisterType((*ChunksGroup)(nil), "purgeplan.ChunksGroup") - proto.RegisterType((*ChunkDetails)(nil), "purgeplan.ChunkDetails") - proto.RegisterType((*Interval)(nil), "purgeplan.Interval") -} - -func init() { proto.RegisterFile("delete_plan.proto", fileDescriptor_c38868cf63b27372) } - -var fileDescriptor_c38868cf63b27372 = []byte{ - // 446 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x8b, 0xd4, 0x30, - 0x18, 0x6d, 0xba, 0x52, 0xdc, 0x74, 0x5c, 0xd6, 0x2c, 0x68, 0x99, 0x43, 0x76, 0xe9, 0x69, 0x0e, - 0xda, 0x81, 0x15, 0x41, 0x41, 0x90, 0x1d, 0x0b, 0x32, 0xa0, 0xb0, 0x16, 0x4f, 0x5e, 0x4a, 0xda, - 0xc6, 0x6e, 0xdd, 0xb4, 0x89, 0x69, 0x2a, 0x7a, 0xf3, 0xe6, 0xd5, 0x9f, 0xe1, 0x0f, 0xf0, 0x47, - 0xec, 0x71, 0x8e, 0x8b, 0x87, 0xc1, 0xe9, 0x5c, 0x3c, 0xce, 0x4f, 0x90, 0xa6, 0xed, 0x4c, 0x15, - 0x3c, 0x78, 0xcb, 0xfb, 0xde, 0x7b, 0xc9, 0xcb, 0x4b, 0xe0, 0xed, 0x84, 0x32, 0xaa, 0x68, 0x28, - 0x18, 0x29, 0x3c, 0x21, 0xb9, 0xe2, 0x68, 0x5f, 0x54, 0x32, 0xa5, 0xcd, 0x60, 0x7c, 0x3f, 0xcd, - 0xd4, 0x45, 0x15, 0x79, 0x31, 0xcf, 0xa7, 0x29, 0x4f, 0xf9, 0x54, 0x2b, 0xa2, 0xea, 0xad, 0x46, - 0x1a, 0xe8, 0x55, 0xeb, 0x1c, 0x3f, 0x1e, 0xc8, 0x63, 0x2e, 0x15, 0xfd, 0x28, 0x24, 0x7f, 0x47, - 0x63, 0xd5, 0xa1, 0xa9, 0xb8, 0x4c, 0x7b, 0x22, 0xea, 0x16, 0xad, 0xd5, 0xfd, 0x02, 0x20, 0xf4, - 0x75, 0x94, 0x73, 0x46, 0x0a, 0xf4, 0x08, 0xde, 0x6a, 0x02, 0x84, 0x59, 0xa1, 0xa8, 0xfc, 0x40, - 0x98, 0x03, 0x4e, 0xc0, 0xc4, 0x3e, 0x3d, 0xf2, 0xb6, 0xd9, 0xbc, 0x79, 0x47, 0x05, 0xa3, 0x06, - 0xf6, 0x08, 0x3d, 0x85, 0xa3, 0xf8, 0xa2, 0x2a, 0x2e, 0xcb, 0x30, 0x95, 0xbc, 0x12, 0x8e, 0x79, - 0xb2, 0x37, 0xb1, 0x4f, 0xef, 0x0c, 0x8c, 0xcf, 0x34, 0xfd, 0xbc, 0x61, 0x67, 0x37, 0xae, 0x96, - 0xc7, 0x46, 0x60, 0xc7, 0xbb, 0x91, 0xfb, 0x1d, 0x40, 0x7b, 0x20, 0x41, 0x05, 0xb4, 0x18, 0x89, - 0x28, 0x2b, 0x1d, 0xa0, 0xb7, 0x3a, 0xf2, 0xfa, 0x1b, 0x78, 0x2f, 0x9a, 0xf9, 0x39, 0xc9, 0xe4, - 0xec, 0xac, 0xd9, 0xe7, 0xc7, 0xf2, 0xf8, 0xbf, 0x1a, 0x68, 0xfd, 0x67, 0x09, 0x11, 0x8a, 0xca, - 0xa0, 0x3b, 0x05, 0x3d, 0x84, 0x56, 0x1b, 0xa7, 0x8b, 0x7e, 0xf7, 0xef, 0xe8, 0x3e, 0x55, 0x24, - 0x63, 0x65, 0x97, 0xbd, 0x13, 0xbb, 0xef, 0xe1, 0x68, 0xc8, 0xa2, 0x03, 0x68, 0xce, 0x7d, 0x5d, - 0xdb, 0x7e, 0x60, 0xce, 0x7d, 0xf4, 0x0a, 0x8e, 0x05, 0x91, 0x2a, 0x23, 0x8c, 0x7d, 0x0a, 0xdb, - 0x47, 0x4f, 0x76, 0xf5, 0x9a, 0xff, 0xae, 0xd7, 0xd9, 0xda, 0xda, 0xf7, 0x49, 0x7a, 0xc6, 0x8d, - 0xe0, 0xcd, 0x6d, 0xed, 0xf7, 0x20, 0x2a, 0x15, 0x91, 0x2a, 0x54, 0x59, 0x4e, 0x4b, 0x45, 0x72, - 0x11, 0xe6, 0xa5, 0x3e, 0x7e, 0x2f, 0x38, 0xd4, 0xcc, 0xeb, 0x9e, 0x78, 0x59, 0xa2, 0x09, 0x3c, - 0xa4, 0x45, 0xf2, 0xa7, 0xd6, 0xd4, 0xda, 0x03, 0x5a, 0x24, 0x03, 0xe5, 0xec, 0xc9, 0x62, 0x85, - 0x8d, 0xeb, 0x15, 0x36, 0x36, 0x2b, 0x0c, 0x3e, 0xd7, 0x18, 0x7c, 0xab, 0x31, 0xb8, 0xaa, 0x31, - 0x58, 0xd4, 0x18, 0xfc, 0xac, 0x31, 0xf8, 0x55, 0x63, 0x63, 0x53, 0x63, 0xf0, 0x75, 0x8d, 0x8d, - 0xc5, 0x1a, 0x1b, 0xd7, 0x6b, 0x6c, 0xbc, 0xb1, 0xf4, 0x3d, 0x64, 0x64, 0xe9, 0xcf, 0xf5, 0xe0, - 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x46, 0x96, 0xf6, 0xe6, 0x02, 0x00, 0x00, -} - -func (this *DeletePlan) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DeletePlan) - if !ok { - that2, ok := that.(DeletePlan) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.PlanInterval.Equal(that1.PlanInterval) { - return false - } - if len(this.ChunksGroup) != len(that1.ChunksGroup) { - return false - } - for i := range this.ChunksGroup { - if !this.ChunksGroup[i].Equal(&that1.ChunksGroup[i]) { - return false - } - } - return true -} -func (this *ChunksGroup) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChunksGroup) - if !ok { - that2, ok := that.(ChunksGroup) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(&that1.Chunks[i]) { - return false - } - } - return true -} -func (this *ChunkDetails) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ChunkDetails) - if !ok { - that2, ok := that.(ChunkDetails) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ID != that1.ID { - return false - } - if !this.PartiallyDeletedInterval.Equal(that1.PartiallyDeletedInterval) { - return false - } - return true -} -func (this *Interval) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Interval) - if !ok { - that2, ok := that.(Interval) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - return true -} -func (this *DeletePlan) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.DeletePlan{") - if this.PlanInterval != nil { - s = append(s, "PlanInterval: "+fmt.Sprintf("%#v", this.PlanInterval)+",\n") - } - if this.ChunksGroup != nil { - vs := make([]*ChunksGroup, len(this.ChunksGroup)) - for i := range vs { - vs[i] = &this.ChunksGroup[i] - } - s = append(s, "ChunksGroup: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ChunksGroup) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.ChunksGroup{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Chunks != nil { - vs := make([]*ChunkDetails, len(this.Chunks)) - for i := range vs { - vs[i] = &this.Chunks[i] - } - s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ChunkDetails) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.ChunkDetails{") - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - if this.PartiallyDeletedInterval != nil { - s = append(s, "PartiallyDeletedInterval: "+fmt.Sprintf("%#v", this.PartiallyDeletedInterval)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Interval) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&purger.Interval{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringDeletePlan(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *DeletePlan) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeletePlan) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeletePlan) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChunksGroup) > 0 { - for iNdEx := len(m.ChunksGroup) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChunksGroup[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.PlanInterval != nil { - { - size, err := m.PlanInterval.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ChunksGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChunksGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChunksGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ChunkDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChunkDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChunkDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PartiallyDeletedInterval != nil { - { - size, err := m.PartiallyDeletedInterval.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeletePlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintDeletePlan(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Interval) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Interval) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Interval) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.EndTimestampMs != 0 { - i = encodeVarintDeletePlan(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintDeletePlan(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintDeletePlan(dAtA []byte, offset int, v uint64) int { - offset -= sovDeletePlan(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DeletePlan) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PlanInterval != nil { - l = m.PlanInterval.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - if len(m.ChunksGroup) > 0 { - for _, e := range m.ChunksGroup { - l = e.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - } - return n -} - -func (m *ChunksGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - } - return n -} - -func (m *ChunkDetails) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovDeletePlan(uint64(l)) - } - if m.PartiallyDeletedInterval != nil { - l = m.PartiallyDeletedInterval.Size() - n += 1 + l + sovDeletePlan(uint64(l)) - } - return n -} - -func (m *Interval) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovDeletePlan(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovDeletePlan(uint64(m.EndTimestampMs)) - } - return n -} - -func sovDeletePlan(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDeletePlan(x uint64) (n int) { - return sovDeletePlan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *DeletePlan) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunksGroup := "[]ChunksGroup{" - for _, f := range this.ChunksGroup { - repeatedStringForChunksGroup += strings.Replace(strings.Replace(f.String(), "ChunksGroup", "ChunksGroup", 1), `&`, ``, 1) + "," - } - repeatedStringForChunksGroup += "}" - s := strings.Join([]string{`&DeletePlan{`, - `PlanInterval:` + strings.Replace(this.PlanInterval.String(), "Interval", "Interval", 1) + `,`, - `ChunksGroup:` + repeatedStringForChunksGroup + `,`, - `}`, - }, "") - return s -} -func (this *ChunksGroup) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]ChunkDetails{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "ChunkDetails", "ChunkDetails", 1), `&`, ``, 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&ChunksGroup{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func (this *ChunkDetails) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ChunkDetails{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `PartiallyDeletedInterval:` + strings.Replace(this.PartiallyDeletedInterval.String(), "Interval", "Interval", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Interval) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Interval{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `}`, - }, "") - return s -} -func valueToStringDeletePlan(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *DeletePlan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeletePlan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeletePlan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PlanInterval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PlanInterval == nil { - m.PlanInterval = &Interval{} - } - if err := m.PlanInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunksGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChunksGroup = append(m.ChunksGroup, ChunksGroup{}) - if err := m.ChunksGroup[len(m.ChunksGroup)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChunksGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChunksGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChunksGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, ChunkDetails{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChunkDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChunkDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChunkDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartiallyDeletedInterval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeletePlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeletePlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PartiallyDeletedInterval == nil { - m.PartiallyDeletedInterval = &Interval{} - } - if err := m.PartiallyDeletedInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Interval) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Interval: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Interval: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeletePlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthDeletePlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDeletePlan(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDeletePlan - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthDeletePlan - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeletePlan - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipDeletePlan(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthDeletePlan - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthDeletePlan = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDeletePlan = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.proto b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.proto deleted file mode 100644 index 834fc0874..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_plan.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package purgeplan; - -option go_package = "purger"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// DeletePlan holds all the chunks that are supposed to be deleted within an interval(usually a day) -// This Proto file is used just for storing Delete Plans in proto format. -message DeletePlan { - Interval plan_interval = 1; - repeated ChunksGroup chunks_group = 2 [(gogoproto.nullable) = false]; -} - -// ChunksGroup holds ChunkDetails and Labels for a group of chunks which have same series ID -message ChunksGroup { - repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"]; - repeated ChunkDetails chunks = 2 [(gogoproto.nullable) = false]; -} - -message ChunkDetails { - string ID = 1; - Interval partially_deleted_interval = 2; -} - -message Interval { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go deleted file mode 100644 index f3ec1edbc..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go +++ /dev/null @@ -1,394 +0,0 @@ -package purger - -import ( - "context" - "encoding/binary" - "encoding/hex" - "errors" - "flag" - "fmt" - "hash/fnv" - "strconv" - "strings" - "time" - - "github.com/cortexproject/cortex/pkg/chunk" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" -) - -type ( - DeleteRequestStatus string - CacheKind string - indexType string -) - -const ( - StatusReceived DeleteRequestStatus = "received" - StatusBuildingPlan DeleteRequestStatus = "buildingPlan" - StatusDeleting DeleteRequestStatus = "deleting" - StatusProcessed DeleteRequestStatus = "processed" - - separator = "\000" // separator for series selectors in delete requests - - // CacheKindStore is for cache gen number for store cache - CacheKindStore CacheKind = "store" - // CacheKindResults is for cache gen number for results cache - CacheKindResults CacheKind = "results" - - deleteRequestID indexType = "1" - deleteRequestDetails indexType = "2" - cacheGenNum indexType = "3" -) - -var ( - pendingDeleteRequestStatuses = []DeleteRequestStatus{StatusReceived, StatusBuildingPlan, StatusDeleting} - - ErrDeleteRequestNotFound = errors.New("could not find matching delete request") -) - -// DeleteRequest holds all the details about a delete request. -type DeleteRequest struct { - RequestID string `json:"request_id"` - UserID string `json:"-"` - StartTime model.Time `json:"start_time"` - EndTime model.Time `json:"end_time"` - Selectors []string `json:"selectors"` - Status DeleteRequestStatus `json:"status"` - Matchers [][]*labels.Matcher `json:"-"` - CreatedAt model.Time `json:"created_at"` -} - -// cacheGenNumbers holds store and results cache gen numbers for a user. -type cacheGenNumbers struct { - store, results string -} - -// DeleteStore provides all the methods required to manage lifecycle of delete request and things related to it. -type DeleteStore struct { - cfg DeleteStoreConfig - indexClient chunk.IndexClient -} - -// DeleteStoreConfig holds configuration for delete store. -type DeleteStoreConfig struct { - Store string `yaml:"store"` - RequestsTableName string `yaml:"requests_table_name"` - ProvisionConfig TableProvisioningConfig `yaml:"table_provisioning"` -} - -// RegisterFlags adds the flags required to configure this flag set. -func (cfg *DeleteStoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.ProvisionConfig.RegisterFlags("deletes.table", f) - f.StringVar(&cfg.Store, "deletes.store", "", "Store for keeping delete request") - f.StringVar(&cfg.RequestsTableName, "deletes.requests-table-name", "delete_requests", "Name of the table which stores delete requests") -} - -// NewDeleteStore creates a store for managing delete requests. -func NewDeleteStore(cfg DeleteStoreConfig, indexClient chunk.IndexClient) (*DeleteStore, error) { - ds := DeleteStore{ - cfg: cfg, - indexClient: indexClient, - } - - return &ds, nil -} - -// Add creates entries for a new delete request. -func (ds *DeleteStore) AddDeleteRequest(ctx context.Context, userID string, startTime, endTime model.Time, selectors []string) error { - return ds.addDeleteRequest(ctx, userID, model.Now(), startTime, endTime, selectors) - -} - -// addDeleteRequest is also used for tests to create delete requests with different createdAt time. -func (ds *DeleteStore) addDeleteRequest(ctx context.Context, userID string, createdAt, startTime, endTime model.Time, selectors []string) error { - requestID := generateUniqueID(userID, selectors) - - for { - _, err := ds.GetDeleteRequest(ctx, userID, string(requestID)) - if err != nil { - if err == ErrDeleteRequestNotFound { - break - } - return err - } - - // we have a collision here, lets recreate a new requestID and check for collision - time.Sleep(time.Millisecond) - requestID = generateUniqueID(userID, selectors) - } - - // userID, requestID - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - // Add an entry with userID, requestID as range key and status as value to make it easy to manage and lookup status - // We don't want to set anything in hash key here since we would want to find delete requests by just status - writeBatch := ds.indexClient.NewWriteBatch() - writeBatch.Add(ds.cfg.RequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID), []byte(StatusReceived)) - - // Add another entry with additional details like creation time, time range of delete request and selectors in value - rangeValue := fmt.Sprintf("%x:%x:%x", int64(createdAt), int64(startTime), int64(endTime)) - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s", deleteRequestDetails, userIDAndRequestID), - []byte(rangeValue), []byte(strings.Join(selectors, separator))) - - // we update only cache gen number because only query responses are changing at this stage. - // we still have to query data from store for doing query time filtering and we don't want to invalidate its results now. - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, CacheKindResults), - []byte{}, []byte(strconv.FormatInt(time.Now().Unix(), 10))) - - return ds.indexClient.BatchWrite(ctx, writeBatch) -} - -// GetDeleteRequestsByStatus returns all delete requests for given status. -func (ds *DeleteStore) GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - ValueEqual: []byte(status), - }) -} - -// GetDeleteRequestsForUserByStatus returns all delete requests for a user with given status. -func (ds *DeleteStore) GetDeleteRequestsForUserByStatus(ctx context.Context, userID string, status DeleteRequestStatus) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - RangeValuePrefix: []byte(userID), - ValueEqual: []byte(status), - }) -} - -// GetAllDeleteRequestsForUser returns all delete requests for a user. -func (ds *DeleteStore) GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - RangeValuePrefix: []byte(userID), - }) -} - -// UpdateStatus updates status of a delete request. -func (ds *DeleteStore) UpdateStatus(ctx context.Context, userID, requestID string, newStatus DeleteRequestStatus) error { - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - writeBatch := ds.indexClient.NewWriteBatch() - writeBatch.Add(ds.cfg.RequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID), []byte(newStatus)) - - if newStatus == StatusProcessed { - // we have deleted data from store so invalidate cache only for store since we don't have to do runtime filtering anymore. - // we don't have to change cache gen number because we were anyways doing runtime filtering - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, CacheKindStore), []byte{}, []byte(strconv.FormatInt(time.Now().Unix(), 10))) - } - - return ds.indexClient.BatchWrite(ctx, writeBatch) -} - -// GetDeleteRequest returns delete request with given requestID. -func (ds *DeleteStore) GetDeleteRequest(ctx context.Context, userID, requestID string) (*DeleteRequest, error) { - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - deleteRequests, err := ds.queryDeleteRequests(ctx, chunk.IndexQuery{ - TableName: ds.cfg.RequestsTableName, - HashValue: string(deleteRequestID), - RangeValuePrefix: []byte(userIDAndRequestID), - }) - - if err != nil { - return nil, err - } - - if len(deleteRequests) == 0 { - return nil, ErrDeleteRequestNotFound - } - - return &deleteRequests[0], nil -} - -// GetPendingDeleteRequestsForUser returns all delete requests for a user which are not processed. -func (ds *DeleteStore) GetPendingDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { - pendingDeleteRequests := []DeleteRequest{} - for _, status := range pendingDeleteRequestStatuses { - deleteRequests, err := ds.GetDeleteRequestsForUserByStatus(ctx, userID, status) - if err != nil { - return nil, err - } - - pendingDeleteRequests = append(pendingDeleteRequests, deleteRequests...) - } - - return pendingDeleteRequests, nil -} - -func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery chunk.IndexQuery) ([]DeleteRequest, error) { - deleteRequests := []DeleteRequest{} - // No need to lock inside the callback since we run a single index query. - err := ds.indexClient.QueryPages(ctx, []chunk.IndexQuery{deleteQuery}, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - for itr.Next() { - userID, requestID := splitUserIDAndRequestID(string(itr.RangeValue())) - - deleteRequests = append(deleteRequests, DeleteRequest{ - UserID: userID, - RequestID: requestID, - Status: DeleteRequestStatus(itr.Value()), - }) - } - return true - }) - if err != nil { - return nil, err - } - - for i, deleteRequest := range deleteRequests { - deleteRequestQuery := []chunk.IndexQuery{ - { - TableName: ds.cfg.RequestsTableName, - HashValue: fmt.Sprintf("%s:%s:%s", deleteRequestDetails, deleteRequest.UserID, deleteRequest.RequestID), - }, - } - - var parseError error - err := ds.indexClient.QueryPages(ctx, deleteRequestQuery, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - itr.Next() - - deleteRequest, err = parseDeleteRequestTimestamps(itr.RangeValue(), deleteRequest) - if err != nil { - parseError = err - return false - } - - deleteRequest.Selectors = strings.Split(string(itr.Value()), separator) - deleteRequests[i] = deleteRequest - - return true - }) - - if err != nil { - return nil, err - } - - if parseError != nil { - return nil, parseError - } - } - - return deleteRequests, nil -} - -// getCacheGenerationNumbers returns cache gen numbers for a user. -func (ds *DeleteStore) getCacheGenerationNumbers(ctx context.Context, userID string) (*cacheGenNumbers, error) { - storeCacheGen, err := ds.queryCacheGenerationNumber(ctx, userID, CacheKindStore) - if err != nil { - return nil, err - } - - resultsCacheGen, err := ds.queryCacheGenerationNumber(ctx, userID, CacheKindResults) - if err != nil { - return nil, err - } - - return &cacheGenNumbers{storeCacheGen, resultsCacheGen}, nil -} - -func (ds *DeleteStore) queryCacheGenerationNumber(ctx context.Context, userID string, kind CacheKind) (string, error) { - query := chunk.IndexQuery{TableName: ds.cfg.RequestsTableName, HashValue: fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, kind)} - - genNumber := "" - err := ds.indexClient.QueryPages(ctx, []chunk.IndexQuery{query}, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { - itr := batch.Iterator() - for itr.Next() { - genNumber = string(itr.Value()) - break - } - return false - }) - - if err != nil { - return "", err - } - - return genNumber, nil -} - -// RemoveDeleteRequest removes a delete request and increments cache gen number -func (ds *DeleteStore) RemoveDeleteRequest(ctx context.Context, userID, requestID string, createdAt, startTime, endTime model.Time) error { - userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - - writeBatch := ds.indexClient.NewWriteBatch() - writeBatch.Delete(ds.cfg.RequestsTableName, string(deleteRequestID), []byte(userIDAndRequestID)) - - // Add another entry with additional details like creation time, time range of delete request and selectors in value - rangeValue := fmt.Sprintf("%x:%x:%x", int64(createdAt), int64(startTime), int64(endTime)) - writeBatch.Delete(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s", deleteRequestDetails, userIDAndRequestID), - []byte(rangeValue)) - - // we need to invalidate results cache since removal of delete request would cause query results to change - writeBatch.Add(ds.cfg.RequestsTableName, fmt.Sprintf("%s:%s:%s", cacheGenNum, userID, CacheKindResults), - []byte{}, []byte(strconv.FormatInt(time.Now().Unix(), 10))) - - return ds.indexClient.BatchWrite(ctx, writeBatch) -} - -func parseDeleteRequestTimestamps(rangeValue []byte, deleteRequest DeleteRequest) (DeleteRequest, error) { - hexParts := strings.Split(string(rangeValue), ":") - if len(hexParts) != 3 { - return deleteRequest, errors.New("invalid key in parsing delete request lookup response") - } - - createdAt, err := strconv.ParseInt(hexParts[0], 16, 64) - if err != nil { - return deleteRequest, err - } - - from, err := strconv.ParseInt(hexParts[1], 16, 64) - if err != nil { - return deleteRequest, err - - } - through, err := strconv.ParseInt(hexParts[2], 16, 64) - if err != nil { - return deleteRequest, err - - } - - deleteRequest.CreatedAt = model.Time(createdAt) - deleteRequest.StartTime = model.Time(from) - deleteRequest.EndTime = model.Time(through) - - return deleteRequest, nil -} - -// An id is useful in managing delete requests -func generateUniqueID(orgID string, selectors []string) []byte { - uniqueID := fnv.New32() - _, _ = uniqueID.Write([]byte(orgID)) - - timeNow := make([]byte, 8) - binary.LittleEndian.PutUint64(timeNow, uint64(time.Now().UnixNano())) - _, _ = uniqueID.Write(timeNow) - - for _, selector := range selectors { - _, _ = uniqueID.Write([]byte(selector)) - } - - return encodeUniqueID(uniqueID.Sum32()) -} - -func encodeUniqueID(t uint32) []byte { - throughBytes := make([]byte, 4) - binary.BigEndian.PutUint32(throughBytes, t) - encodedThroughBytes := make([]byte, 8) - hex.Encode(encodedThroughBytes, throughBytes) - return encodedThroughBytes -} - -func splitUserIDAndRequestID(rangeValue string) (userID, requestID string) { - lastIndex := strings.LastIndex(rangeValue, ":") - - userID = rangeValue[:lastIndex] - requestID = rangeValue[lastIndex+1:] - - return -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go deleted file mode 100644 index 7c37c2930..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go +++ /dev/null @@ -1,828 +0,0 @@ -package purger - -import ( - "bytes" - "context" - "flag" - "fmt" - "io/ioutil" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" - "github.com/weaveworks/common/user" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/cortexpb" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -const ( - millisecondPerDay = int64(24 * time.Hour / time.Millisecond) - statusSuccess = "success" - statusFail = "fail" - loadRequestsInterval = time.Hour - retryFailedRequestsInterval = 15 * time.Minute -) - -type purgerMetrics struct { - deleteRequestsProcessedTotal *prometheus.CounterVec - deleteRequestsChunksSelectedTotal *prometheus.CounterVec - deleteRequestsProcessingFailures *prometheus.CounterVec - loadPendingRequestsAttempsTotal *prometheus.CounterVec - oldestPendingDeleteRequestAgeSeconds prometheus.Gauge - pendingDeleteRequestsCount prometheus.Gauge -} - -func newPurgerMetrics(r prometheus.Registerer) *purgerMetrics { - m := purgerMetrics{} - - m.deleteRequestsProcessedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_processed_total", - Help: "Number of delete requests processed per user", - }, []string{"user"}) - m.deleteRequestsChunksSelectedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_chunks_selected_total", - Help: "Number of chunks selected while building delete plans per user", - }, []string{"user"}) - m.deleteRequestsProcessingFailures = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_processing_failures_total", - Help: "Number of delete requests processing failures per user", - }, []string{"user"}) - m.loadPendingRequestsAttempsTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_load_pending_requests_attempts_total", - Help: "Number of attempts that were made to load pending requests with status", - }, []string{"status"}) - m.oldestPendingDeleteRequestAgeSeconds = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "purger_oldest_pending_delete_request_age_seconds", - Help: "Age of oldest pending delete request in seconds, since they are over their cancellation period", - }) - m.pendingDeleteRequestsCount = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "purger_pending_delete_requests_count", - Help: "Count of delete requests which are over their cancellation period and have not finished processing yet", - }) - - return &m -} - -type deleteRequestWithLogger struct { - DeleteRequest - logger log.Logger // logger is initialized with userID and requestID to add context to every log generated using this -} - -// Config holds config for chunks Purger -type Config struct { - Enable bool `yaml:"enable"` - NumWorkers int `yaml:"num_workers"` - ObjectStoreType string `yaml:"object_store_type"` - DeleteRequestCancelPeriod time.Duration `yaml:"delete_request_cancel_period"` -} - -// RegisterFlags registers CLI flags for Config -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.Enable, "purger.enable", false, "Enable purger to allow deletion of series. Be aware that Delete series feature is still experimental") - f.IntVar(&cfg.NumWorkers, "purger.num-workers", 2, "Number of workers executing delete plans in parallel") - f.StringVar(&cfg.ObjectStoreType, "purger.object-store-type", "", "Name of the object store to use for storing delete plans") - f.DurationVar(&cfg.DeleteRequestCancelPeriod, "purger.delete-request-cancel-period", 24*time.Hour, "Allow cancellation of delete request until duration after they are created. Data would be deleted only after delete requests have been older than this duration. Ideally this should be set to at least 24h.") -} - -type workerJob struct { - planNo int - userID string - deleteRequestID string - logger log.Logger -} - -// Purger does the purging of data which is requested to be deleted. Purger only works for chunks. -type Purger struct { - services.Service - - cfg Config - deleteStore *DeleteStore - chunkStore chunk.Store - objectClient chunk.ObjectClient - metrics *purgerMetrics - - executePlansChan chan deleteRequestWithLogger - workerJobChan chan workerJob - - // we would only allow processing of singe delete request at a time since delete requests touching same chunks could change the chunk IDs of partially deleted chunks - // and break the purge plan for other requests - inProcessRequests *inProcessRequestsCollection - - // We do not want to limit pulling new delete requests to a fixed interval which otherwise would limit number of delete requests we process per user. - // While loading delete requests if we find more requests from user pending to be processed, we just set their id in usersWithPendingRequests and - // when a user's delete request gets processed we just check this map to see whether we want to load more requests without waiting for next ticker to load new batch. - usersWithPendingRequests map[string]struct{} - usersWithPendingRequestsMtx sync.Mutex - pullNewRequestsChan chan struct{} - - pendingPlansCount map[string]int // per request pending plan count - pendingPlansCountMtx sync.Mutex - - wg sync.WaitGroup -} - -// NewPurger creates a new Purger -func NewPurger(cfg Config, deleteStore *DeleteStore, chunkStore chunk.Store, storageClient chunk.ObjectClient, registerer prometheus.Registerer) (*Purger, error) { - util_log.WarnExperimentalUse("Delete series API") - - purger := Purger{ - cfg: cfg, - deleteStore: deleteStore, - chunkStore: chunkStore, - objectClient: storageClient, - metrics: newPurgerMetrics(registerer), - pullNewRequestsChan: make(chan struct{}, 1), - executePlansChan: make(chan deleteRequestWithLogger, 50), - workerJobChan: make(chan workerJob, 50), - inProcessRequests: newInProcessRequestsCollection(), - usersWithPendingRequests: map[string]struct{}{}, - pendingPlansCount: map[string]int{}, - } - - purger.Service = services.NewBasicService(purger.init, purger.loop, purger.stop) - return &purger, nil -} - -// init starts workers, scheduler and then loads in process delete requests -func (p *Purger) init(ctx context.Context) error { - for i := 0; i < p.cfg.NumWorkers; i++ { - p.wg.Add(1) - go p.worker() - } - - p.wg.Add(1) - go p.jobScheduler(ctx) - - return p.loadInprocessDeleteRequests() -} - -func (p *Purger) loop(ctx context.Context) error { - loadRequests := func() { - status := statusSuccess - - err := p.pullDeleteRequestsToPlanDeletes() - if err != nil { - status = statusFail - level.Error(util_log.Logger).Log("msg", "error pulling delete requests for building plans", "err", err) - } - - p.metrics.loadPendingRequestsAttempsTotal.WithLabelValues(status).Inc() - } - - // load requests on startup instead of waiting for first ticker - loadRequests() - - loadRequestsTicker := time.NewTicker(loadRequestsInterval) - defer loadRequestsTicker.Stop() - - retryFailedRequestsTicker := time.NewTicker(retryFailedRequestsInterval) - defer retryFailedRequestsTicker.Stop() - - for { - select { - case <-loadRequestsTicker.C: - loadRequests() - case <-p.pullNewRequestsChan: - loadRequests() - case <-retryFailedRequestsTicker.C: - p.retryFailedRequests() - case <-ctx.Done(): - return nil - } - } -} - -// Stop waits until all background tasks stop. -func (p *Purger) stop(_ error) error { - p.wg.Wait() - return nil -} - -func (p *Purger) retryFailedRequests() { - userIDsWithFailedRequest := p.inProcessRequests.listUsersWithFailedRequest() - - for _, userID := range userIDsWithFailedRequest { - deleteRequest := p.inProcessRequests.get(userID) - if deleteRequest == nil { - level.Error(util_log.Logger).Log("msg", "expected an in-process delete request", "user", userID) - continue - } - - p.inProcessRequests.unsetFailedRequestForUser(userID) - err := p.resumeStalledRequest(*deleteRequest) - if err != nil { - reqWithLogger := makeDeleteRequestWithLogger(*deleteRequest, util_log.Logger) - level.Error(reqWithLogger.logger).Log("msg", "failed to resume failed request", "err", err) - } - } -} - -func (p *Purger) workerJobCleanup(job workerJob) { - err := p.removeDeletePlan(context.Background(), job.userID, job.deleteRequestID, job.planNo) - if err != nil { - level.Error(job.logger).Log("msg", "error removing delete plan", - "plan_no", job.planNo, "err", err) - return - } - - p.pendingPlansCountMtx.Lock() - p.pendingPlansCount[job.deleteRequestID]-- - - if p.pendingPlansCount[job.deleteRequestID] == 0 { - level.Info(job.logger).Log("msg", "finished execution of all plans, cleaning up and updating status of request") - - err := p.deleteStore.UpdateStatus(context.Background(), job.userID, job.deleteRequestID, StatusProcessed) - if err != nil { - level.Error(job.logger).Log("msg", "error updating delete request status to process", "err", err) - } - - p.metrics.deleteRequestsProcessedTotal.WithLabelValues(job.userID).Inc() - delete(p.pendingPlansCount, job.deleteRequestID) - p.pendingPlansCountMtx.Unlock() - - p.inProcessRequests.remove(job.userID) - - // request loading of more delete request if - // - user has more pending requests and - // - we do not have a pending request to load more requests - p.usersWithPendingRequestsMtx.Lock() - defer p.usersWithPendingRequestsMtx.Unlock() - if _, ok := p.usersWithPendingRequests[job.userID]; ok { - delete(p.usersWithPendingRequests, job.userID) - select { - case p.pullNewRequestsChan <- struct{}{}: - // sent - default: - // already sent - } - } else if len(p.usersWithPendingRequests) == 0 { - // there are no pending requests from any of the users, set the oldest pending request and number of pending requests to 0 - p.metrics.oldestPendingDeleteRequestAgeSeconds.Set(0) - p.metrics.pendingDeleteRequestsCount.Set(0) - } - } else { - p.pendingPlansCountMtx.Unlock() - } -} - -// we send all the delete plans to workerJobChan -func (p *Purger) jobScheduler(ctx context.Context) { - defer p.wg.Done() - - for { - select { - case req := <-p.executePlansChan: - numPlans := numPlans(req.StartTime, req.EndTime) - level.Info(req.logger).Log("msg", "sending jobs to workers for purging data", "num_jobs", numPlans) - - p.pendingPlansCountMtx.Lock() - p.pendingPlansCount[req.RequestID] = numPlans - p.pendingPlansCountMtx.Unlock() - - for i := 0; i < numPlans; i++ { - p.workerJobChan <- workerJob{planNo: i, userID: req.UserID, - deleteRequestID: req.RequestID, logger: req.logger} - } - case <-ctx.Done(): - close(p.workerJobChan) - return - } - } -} - -func (p *Purger) worker() { - defer p.wg.Done() - - for job := range p.workerJobChan { - err := p.executePlan(job.userID, job.deleteRequestID, job.planNo, job.logger) - if err != nil { - p.metrics.deleteRequestsProcessingFailures.WithLabelValues(job.userID).Inc() - level.Error(job.logger).Log("msg", "error executing delete plan", - "plan_no", job.planNo, "err", err) - continue - } - - p.workerJobCleanup(job) - } -} - -func (p *Purger) executePlan(userID, requestID string, planNo int, logger log.Logger) (err error) { - logger = log.With(logger, "plan_no", planNo) - - defer func() { - if err != nil { - p.inProcessRequests.setFailedRequestForUser(userID) - } - }() - - plan, err := p.getDeletePlan(context.Background(), userID, requestID, planNo) - if err != nil { - if err == chunk.ErrStorageObjectNotFound { - level.Info(logger).Log("msg", "plan not found, must have been executed already") - // this means plan was already executed and got removed. Do nothing. - return nil - } - return err - } - - level.Info(logger).Log("msg", "executing plan") - - ctx := user.InjectOrgID(context.Background(), userID) - - for i := range plan.ChunksGroup { - level.Debug(logger).Log("msg", "deleting chunks", "labels", plan.ChunksGroup[i].Labels) - - for _, chunkDetails := range plan.ChunksGroup[i].Chunks { - chunkRef, err := chunk.ParseExternalKey(userID, chunkDetails.ID) - if err != nil { - return err - } - - var partiallyDeletedInterval *model.Interval = nil - if chunkDetails.PartiallyDeletedInterval != nil { - partiallyDeletedInterval = &model.Interval{ - Start: model.Time(chunkDetails.PartiallyDeletedInterval.StartTimestampMs), - End: model.Time(chunkDetails.PartiallyDeletedInterval.EndTimestampMs), - } - } - - err = p.chunkStore.DeleteChunk(ctx, chunkRef.From, chunkRef.Through, chunkRef.UserID, - chunkDetails.ID, cortexpb.FromLabelAdaptersToLabels(plan.ChunksGroup[i].Labels), partiallyDeletedInterval) - if err != nil { - if isMissingChunkErr(err) { - level.Error(logger).Log("msg", "chunk not found for deletion. We may have already deleted it", - "chunk_id", chunkDetails.ID) - continue - } - return err - } - } - - level.Debug(logger).Log("msg", "deleting series", "labels", plan.ChunksGroup[i].Labels) - - // this is mostly required to clean up series ids from series store - err := p.chunkStore.DeleteSeriesIDs(ctx, model.Time(plan.PlanInterval.StartTimestampMs), model.Time(plan.PlanInterval.EndTimestampMs), - userID, cortexpb.FromLabelAdaptersToLabels(plan.ChunksGroup[i].Labels)) - if err != nil { - return err - } - } - - level.Info(logger).Log("msg", "finished execution of plan") - - return -} - -// we need to load all in process delete requests on startup to finish them first -func (p *Purger) loadInprocessDeleteRequests() error { - inprocessRequests, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusBuildingPlan) - if err != nil { - return err - } - - requestsWithDeletingStatus, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusDeleting) - if err != nil { - return err - } - - inprocessRequests = append(inprocessRequests, requestsWithDeletingStatus...) - - for i := range inprocessRequests { - deleteRequest := inprocessRequests[i] - p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) - req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - - level.Info(req.logger).Log("msg", "resuming in process delete requests", "status", deleteRequest.Status) - err = p.resumeStalledRequest(deleteRequest) - if err != nil { - level.Error(req.logger).Log("msg", "failed to resume stalled request", "err", err) - } - - } - - return nil -} - -func (p *Purger) resumeStalledRequest(deleteRequest DeleteRequest) error { - req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - - if deleteRequest.Status == StatusBuildingPlan { - err := p.buildDeletePlan(req) - if err != nil { - p.metrics.deleteRequestsProcessingFailures.WithLabelValues(deleteRequest.UserID).Inc() - return errors.Wrap(err, "failed to build delete plan") - } - - deleteRequest.Status = StatusDeleting - } - - if deleteRequest.Status == StatusDeleting { - level.Info(req.logger).Log("msg", "sending delete request for execution") - p.executePlansChan <- req - } - - return nil -} - -// pullDeleteRequestsToPlanDeletes pulls delete requests which do not have their delete plans built yet and sends them for building delete plans -// after pulling delete requests for building plans, it updates its status to StatusBuildingPlan status to avoid picking this up again next time -func (p *Purger) pullDeleteRequestsToPlanDeletes() error { - deleteRequests, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived) - if err != nil { - return err - } - - pendingDeleteRequestsCount := p.inProcessRequests.len() - now := model.Now() - oldestPendingRequestCreatedAt := model.Time(0) - - // requests which are still being processed are also considered pending - if pendingDeleteRequestsCount != 0 { - oldestInProcessRequest := p.inProcessRequests.getOldest() - if oldestInProcessRequest != nil { - oldestPendingRequestCreatedAt = oldestInProcessRequest.CreatedAt - } - } - - for i := range deleteRequests { - deleteRequest := deleteRequests[i] - - // adding an extra minute here to avoid a race between cancellation of request and picking of the request for processing - if deleteRequest.CreatedAt.Add(p.cfg.DeleteRequestCancelPeriod).Add(time.Minute).After(model.Now()) { - continue - } - - pendingDeleteRequestsCount++ - if oldestPendingRequestCreatedAt == 0 || deleteRequest.CreatedAt.Before(oldestPendingRequestCreatedAt) { - oldestPendingRequestCreatedAt = deleteRequest.CreatedAt - } - - if inprocessDeleteRequest := p.inProcessRequests.get(deleteRequest.UserID); inprocessDeleteRequest != nil { - p.usersWithPendingRequestsMtx.Lock() - p.usersWithPendingRequests[deleteRequest.UserID] = struct{}{} - p.usersWithPendingRequestsMtx.Unlock() - - level.Debug(util_log.Logger).Log("msg", "skipping delete request processing for now since another request from same user is already in process", - "inprocess_request_id", inprocessDeleteRequest.RequestID, - "skipped_request_id", deleteRequest.RequestID, "user_id", deleteRequest.UserID) - continue - } - - err = p.deleteStore.UpdateStatus(context.Background(), deleteRequest.UserID, deleteRequest.RequestID, StatusBuildingPlan) - if err != nil { - return err - } - - deleteRequest.Status = StatusBuildingPlan - p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) - req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) - - level.Info(req.logger).Log("msg", "building plan for a new delete request") - - err := p.buildDeletePlan(req) - if err != nil { - p.metrics.deleteRequestsProcessingFailures.WithLabelValues(deleteRequest.UserID).Inc() - - // We do not want to remove this delete request from inProcessRequests to make sure - // we do not move multiple deleting requests in deletion process. - // None of the other delete requests from the user would be considered for processing until then. - level.Error(req.logger).Log("msg", "error building delete plan", "err", err) - return err - } - - level.Info(req.logger).Log("msg", "sending delete request for execution") - p.executePlansChan <- req - } - - // track age of oldest delete request since they are over their cancellation period - oldestPendingRequestAge := time.Duration(0) - if oldestPendingRequestCreatedAt != 0 { - oldestPendingRequestAge = now.Sub(oldestPendingRequestCreatedAt.Add(p.cfg.DeleteRequestCancelPeriod)) - } - p.metrics.oldestPendingDeleteRequestAgeSeconds.Set(float64(oldestPendingRequestAge / time.Second)) - p.metrics.pendingDeleteRequestsCount.Set(float64(pendingDeleteRequestsCount)) - - return nil -} - -// buildDeletePlan builds per day delete plan for given delete requests. -// A days plan will include chunk ids and labels of all the chunks which are supposed to be deleted. -// Chunks are grouped together by labels to avoid storing labels repetitively. -// After building delete plans it updates status of delete request to StatusDeleting and sends it for execution -func (p *Purger) buildDeletePlan(req deleteRequestWithLogger) (err error) { - ctx := context.Background() - ctx = user.InjectOrgID(ctx, req.UserID) - - defer func() { - if err != nil { - p.inProcessRequests.setFailedRequestForUser(req.UserID) - } else { - req.Status = StatusDeleting - p.inProcessRequests.set(req.UserID, &req.DeleteRequest) - } - }() - - perDayTimeRange := splitByDay(req.StartTime, req.EndTime) - level.Info(req.logger).Log("msg", "building delete plan", "num_plans", len(perDayTimeRange)) - - plans := make([][]byte, len(perDayTimeRange)) - includedChunkIDs := map[string]struct{}{} - - for i, planRange := range perDayTimeRange { - chunksGroups := []ChunksGroup{} - - for _, selector := range req.Selectors { - matchers, err := parser.ParseMetricSelector(selector) - if err != nil { - return err - } - - chunks, err := p.chunkStore.Get(ctx, req.UserID, planRange.Start, planRange.End, matchers...) - if err != nil { - return err - } - - var cg []ChunksGroup - cg, includedChunkIDs = groupChunks(chunks, req.StartTime, req.EndTime, includedChunkIDs) - - if len(cg) != 0 { - chunksGroups = append(chunksGroups, cg...) - } - } - - plan := DeletePlan{ - PlanInterval: &Interval{ - StartTimestampMs: int64(planRange.Start), - EndTimestampMs: int64(planRange.End), - }, - ChunksGroup: chunksGroups, - } - - pb, err := proto.Marshal(&plan) - if err != nil { - return err - } - - plans[i] = pb - } - - err = p.putDeletePlans(ctx, req.UserID, req.RequestID, plans) - if err != nil { - return - } - - err = p.deleteStore.UpdateStatus(ctx, req.UserID, req.RequestID, StatusDeleting) - if err != nil { - return - } - - p.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(req.UserID).Add(float64(len(includedChunkIDs))) - - level.Info(req.logger).Log("msg", "built delete plans", "num_plans", len(perDayTimeRange)) - - return -} - -func (p *Purger) putDeletePlans(ctx context.Context, userID, requestID string, plans [][]byte) error { - for i, plan := range plans { - objectKey := buildObjectKeyForPlan(userID, requestID, i) - - err := p.objectClient.PutObject(ctx, objectKey, bytes.NewReader(plan)) - if err != nil { - return err - } - } - - return nil -} - -func (p *Purger) getDeletePlan(ctx context.Context, userID, requestID string, planNo int) (*DeletePlan, error) { - objectKey := buildObjectKeyForPlan(userID, requestID, planNo) - - readCloser, err := p.objectClient.GetObject(ctx, objectKey) - if err != nil { - return nil, err - } - - defer readCloser.Close() - - buf, err := ioutil.ReadAll(readCloser) - if err != nil { - return nil, err - } - - var plan DeletePlan - err = proto.Unmarshal(buf, &plan) - if err != nil { - return nil, err - } - - return &plan, nil -} - -func (p *Purger) removeDeletePlan(ctx context.Context, userID, requestID string, planNo int) error { - objectKey := buildObjectKeyForPlan(userID, requestID, planNo) - return p.objectClient.DeleteObject(ctx, objectKey) -} - -// returns interval per plan -func splitByDay(start, end model.Time) []model.Interval { - numOfDays := numPlans(start, end) - - perDayTimeRange := make([]model.Interval, numOfDays) - startOfNextDay := model.Time(((int64(start) / millisecondPerDay) + 1) * millisecondPerDay) - perDayTimeRange[0] = model.Interval{Start: start, End: startOfNextDay - 1} - - for i := 1; i < numOfDays; i++ { - interval := model.Interval{Start: startOfNextDay} - startOfNextDay += model.Time(millisecondPerDay) - interval.End = startOfNextDay - 1 - perDayTimeRange[i] = interval - } - - perDayTimeRange[numOfDays-1].End = end - - return perDayTimeRange -} - -func numPlans(start, end model.Time) int { - // rounding down start to start of the day - if start%model.Time(millisecondPerDay) != 0 { - start = model.Time((int64(start) / millisecondPerDay) * millisecondPerDay) - } - - // rounding up end to end of the day - if end%model.Time(millisecondPerDay) != 0 { - end = model.Time((int64(end)/millisecondPerDay)*millisecondPerDay + millisecondPerDay) - } - - return int(int64(end-start) / millisecondPerDay) -} - -// groups chunks together by unique label sets i.e all the chunks with same labels would be stored in a group -// chunk details are stored in groups for each unique label set to avoid storing them repetitively for each chunk -func groupChunks(chunks []chunk.Chunk, deleteFrom, deleteThrough model.Time, includedChunkIDs map[string]struct{}) ([]ChunksGroup, map[string]struct{}) { - metricToChunks := make(map[string]ChunksGroup) - - for _, chk := range chunks { - chunkID := chk.ExternalKey() - - if _, ok := includedChunkIDs[chunkID]; ok { - continue - } - // chunk.Metric are assumed to be sorted which should give same value from String() for same series. - // If they stop being sorted then in the worst case we would lose the benefit of grouping chunks to avoid storing labels repetitively. - metricString := chk.Metric.String() - group, ok := metricToChunks[metricString] - if !ok { - group = ChunksGroup{Labels: cortexpb.FromLabelsToLabelAdapters(chk.Metric)} - } - - chunkDetails := ChunkDetails{ID: chunkID} - - if deleteFrom > chk.From || deleteThrough < chk.Through { - partiallyDeletedInterval := Interval{StartTimestampMs: int64(chk.From), EndTimestampMs: int64(chk.Through)} - - if deleteFrom > chk.From { - partiallyDeletedInterval.StartTimestampMs = int64(deleteFrom) - } - - if deleteThrough < chk.Through { - partiallyDeletedInterval.EndTimestampMs = int64(deleteThrough) - } - chunkDetails.PartiallyDeletedInterval = &partiallyDeletedInterval - } - - group.Chunks = append(group.Chunks, chunkDetails) - includedChunkIDs[chunkID] = struct{}{} - metricToChunks[metricString] = group - } - - chunksGroups := make([]ChunksGroup, 0, len(metricToChunks)) - - for _, group := range metricToChunks { - chunksGroups = append(chunksGroups, group) - } - - return chunksGroups, includedChunkIDs -} - -func isMissingChunkErr(err error) bool { - if err == chunk.ErrStorageObjectNotFound { - return true - } - if promqlStorageErr, ok := err.(promql.ErrStorage); ok && promqlStorageErr.Err == chunk.ErrStorageObjectNotFound { - return true - } - - return false -} - -func buildObjectKeyForPlan(userID, requestID string, planNo int) string { - return fmt.Sprintf("%s:%s/%d", userID, requestID, planNo) -} - -func makeDeleteRequestWithLogger(deleteRequest DeleteRequest, l log.Logger) deleteRequestWithLogger { - logger := log.With(l, "user_id", deleteRequest.UserID, "request_id", deleteRequest.RequestID) - return deleteRequestWithLogger{deleteRequest, logger} -} - -// inProcessRequestsCollection stores DeleteRequests which are in process by each user. -// Currently we only allow processing of one delete request per user so it stores single DeleteRequest per user. -type inProcessRequestsCollection struct { - requests map[string]*DeleteRequest - usersWithFailedRequests map[string]struct{} - mtx sync.RWMutex -} - -func newInProcessRequestsCollection() *inProcessRequestsCollection { - return &inProcessRequestsCollection{ - requests: map[string]*DeleteRequest{}, - usersWithFailedRequests: map[string]struct{}{}, - } -} - -func (i *inProcessRequestsCollection) set(userID string, request *DeleteRequest) { - i.mtx.Lock() - defer i.mtx.Unlock() - - i.requests[userID] = request -} - -func (i *inProcessRequestsCollection) get(userID string) *DeleteRequest { - i.mtx.RLock() - defer i.mtx.RUnlock() - - return i.requests[userID] -} - -func (i *inProcessRequestsCollection) remove(userID string) { - i.mtx.Lock() - defer i.mtx.Unlock() - - delete(i.requests, userID) -} - -func (i *inProcessRequestsCollection) len() int { - i.mtx.RLock() - defer i.mtx.RUnlock() - - return len(i.requests) -} - -func (i *inProcessRequestsCollection) getOldest() *DeleteRequest { - i.mtx.RLock() - defer i.mtx.RUnlock() - - var oldestRequest *DeleteRequest - for _, request := range i.requests { - if oldestRequest == nil || request.CreatedAt.Before(oldestRequest.CreatedAt) { - oldestRequest = request - } - } - - return oldestRequest -} - -func (i *inProcessRequestsCollection) setFailedRequestForUser(userID string) { - i.mtx.Lock() - defer i.mtx.Unlock() - - i.usersWithFailedRequests[userID] = struct{}{} -} - -func (i *inProcessRequestsCollection) unsetFailedRequestForUser(userID string) { - i.mtx.Lock() - defer i.mtx.Unlock() - - delete(i.usersWithFailedRequests, userID) -} - -func (i *inProcessRequestsCollection) listUsersWithFailedRequest() []string { - i.mtx.RLock() - defer i.mtx.RUnlock() - - userIDs := make([]string, 0, len(i.usersWithFailedRequests)) - for userID := range i.usersWithFailedRequests { - userIDs = append(userIDs, userID) - } - - return userIDs -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go deleted file mode 100644 index d9657b3ee..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go +++ /dev/null @@ -1,183 +0,0 @@ -package purger - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/go-kit/log/level" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql/parser" - - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -type deleteRequestHandlerMetrics struct { - deleteRequestsReceivedTotal *prometheus.CounterVec -} - -func newDeleteRequestHandlerMetrics(r prometheus.Registerer) *deleteRequestHandlerMetrics { - m := deleteRequestHandlerMetrics{} - - m.deleteRequestsReceivedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "purger_delete_requests_received_total", - Help: "Number of delete requests received per user", - }, []string{"user"}) - - return &m -} - -// DeleteRequestHandler provides handlers for delete requests -type DeleteRequestHandler struct { - deleteStore *DeleteStore - metrics *deleteRequestHandlerMetrics - deleteRequestCancelPeriod time.Duration -} - -// NewDeleteRequestHandler creates a DeleteRequestHandler -func NewDeleteRequestHandler(deleteStore *DeleteStore, deleteRequestCancelPeriod time.Duration, registerer prometheus.Registerer) *DeleteRequestHandler { - deleteMgr := DeleteRequestHandler{ - deleteStore: deleteStore, - deleteRequestCancelPeriod: deleteRequestCancelPeriod, - metrics: newDeleteRequestHandlerMetrics(registerer), - } - - return &deleteMgr -} - -// AddDeleteRequestHandler handles addition of new delete request -func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - params := r.URL.Query() - match := params["match[]"] - if len(match) == 0 { - http.Error(w, "selectors not set", http.StatusBadRequest) - return - } - - for i := range match { - _, err := parser.ParseMetricSelector(match[i]) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - - startParam := params.Get("start") - startTime := int64(0) - if startParam != "" { - startTime, err = util.ParseTime(startParam) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - - endParam := params.Get("end") - endTime := int64(model.Now()) - - if endParam != "" { - endTime, err = util.ParseTime(endParam) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - if endTime > int64(model.Now()) { - http.Error(w, "deletes in future not allowed", http.StatusBadRequest) - return - } - } - - if startTime > endTime { - http.Error(w, "start time can't be greater than end time", http.StatusBadRequest) - return - } - - if err := dm.deleteStore.AddDeleteRequest(ctx, userID, model.Time(startTime), model.Time(endTime), match); err != nil { - level.Error(util_log.Logger).Log("msg", "error adding delete request to the store", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - dm.metrics.deleteRequestsReceivedTotal.WithLabelValues(userID).Inc() - w.WriteHeader(http.StatusNoContent) -} - -// GetAllDeleteRequestsHandler handles get all delete requests -func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - deleteRequests, err := dm.deleteStore.GetAllDeleteRequestsForUser(ctx, userID) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error getting delete requests from the store", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if err := json.NewEncoder(w).Encode(deleteRequests); err != nil { - level.Error(util_log.Logger).Log("msg", "error marshalling response", "err", err) - http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError) - } -} - -// CancelDeleteRequestHandler handles delete request cancellation -func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - params := r.URL.Query() - requestID := params.Get("request_id") - - deleteRequest, err := dm.deleteStore.GetDeleteRequest(ctx, userID, requestID) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error getting delete request from the store", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if deleteRequest == nil { - http.Error(w, "could not find delete request with given id", http.StatusBadRequest) - return - } - - if deleteRequest.Status != StatusReceived { - http.Error(w, "deletion of request which is in process or already processed is not allowed", http.StatusBadRequest) - return - } - - if deleteRequest.CreatedAt.Add(dm.deleteRequestCancelPeriod).Before(model.Now()) { - http.Error(w, fmt.Sprintf("deletion of request past the deadline of %s since its creation is not allowed", dm.deleteRequestCancelPeriod.String()), http.StatusBadRequest) - return - } - - if err := dm.deleteStore.RemoveDeleteRequest(ctx, userID, requestID, deleteRequest.CreatedAt, deleteRequest.StartTime, deleteRequest.EndTime); err != nil { - level.Error(util_log.Logger).Log("msg", "error cancelling the delete request", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusNoContent) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/table_provisioning.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/table_provisioning.go deleted file mode 100644 index e8ce5d636..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/table_provisioning.go +++ /dev/null @@ -1,30 +0,0 @@ -package purger - -import ( - "flag" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -// TableProvisioningConfig holds config for table throuput and autoscaling. Currently only used by DynamoDB. -type TableProvisioningConfig struct { - chunk.ActiveTableProvisionConfig `yaml:",inline"` - TableTags chunk.Tags `yaml:"tags"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -// Adding a separate RegisterFlags here instead of using it from embedded chunk.ActiveTableProvisionConfig to be able to manage defaults separately. -// Defaults for WriteScale and ReadScale are shared for now to avoid adding further complexity since autoscaling is disabled anyways by default. -func (cfg *TableProvisioningConfig) RegisterFlags(argPrefix string, f *flag.FlagSet) { - // default values ActiveTableProvisionConfig - cfg.ProvisionedWriteThroughput = 1 - cfg.ProvisionedReadThroughput = 300 - cfg.ProvisionedThroughputOnDemandMode = false - - cfg.ActiveTableProvisionConfig.RegisterFlags(argPrefix, f) - f.Var(&cfg.TableTags, argPrefix+".tags", "Tag (of the form key=value) to be added to the tables. Supported by DynamoDB") -} - -func (cfg DeleteStoreConfig) GetTables() []chunk.TableDesc { - return []chunk.TableDesc{cfg.ProvisionConfig.BuildTableDesc(cfg.RequestsTableName, cfg.ProvisionConfig.TableTags)} -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tenant_deletion_api.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tenant_deletion_api.go deleted file mode 100644 index a8c6b8ff6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tenant_deletion_api.go +++ /dev/null @@ -1,128 +0,0 @@ -package purger - -import ( - "context" - "net/http" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" -) - -type TenantDeletionAPI struct { - bucketClient objstore.Bucket - logger log.Logger - cfgProvider bucket.TenantConfigProvider -} - -func NewTenantDeletionAPI(storageCfg cortex_tsdb.BlocksStorageConfig, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) (*TenantDeletionAPI, error) { - bucketClient, err := createBucketClient(storageCfg, logger, reg) - if err != nil { - return nil, err - } - - return newTenantDeletionAPI(bucketClient, cfgProvider, logger), nil -} - -func newTenantDeletionAPI(bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *TenantDeletionAPI { - return &TenantDeletionAPI{ - bucketClient: bkt, - cfgProvider: cfgProvider, - logger: logger, - } -} - -func (api *TenantDeletionAPI) DeleteTenant(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - // When Cortex is running, it uses Auth Middleware for checking X-Scope-OrgID and injecting tenant into context. - // Auth Middleware sends http.StatusUnauthorized if X-Scope-OrgID is missing, so we do too here, for consistency. - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID, api.cfgProvider, cortex_tsdb.NewTenantDeletionMark(time.Now())) - if err != nil { - level.Error(api.logger).Log("msg", "failed to write tenant deletion mark", "user", userID, "err", err) - - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - level.Info(api.logger).Log("msg", "tenant deletion mark in blocks storage created", "user", userID) - - w.WriteHeader(http.StatusOK) -} - -type DeleteTenantStatusResponse struct { - TenantID string `json:"tenant_id"` - BlocksDeleted bool `json:"blocks_deleted"` -} - -func (api *TenantDeletionAPI) DeleteTenantStatus(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := tenant.TenantID(ctx) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - result := DeleteTenantStatusResponse{} - result.TenantID = userID - result.BlocksDeleted, err = api.isBlocksForUserDeleted(ctx, userID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - util.WriteJSONResponse(w, result) -} - -func (api *TenantDeletionAPI) isBlocksForUserDeleted(ctx context.Context, userID string) (bool, error) { - var errBlockFound = errors.New("block found") - - userBucket := bucket.NewUserBucketClient(userID, api.bucketClient, api.cfgProvider) - err := userBucket.Iter(ctx, "", func(s string) error { - s = strings.TrimSuffix(s, "/") - - _, err := ulid.Parse(s) - if err != nil { - // not block, keep looking - return nil - } - - // Used as shortcut to stop iteration. - return errBlockFound - }) - - if errors.Is(err, errBlockFound) { - return false, nil - } - - if err != nil { - return false, err - } - - // No blocks found, all good. - return true, nil -} - -func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { - bucketClient, err := bucket.NewClient(context.Background(), cfg.Bucket, "purger", logger, reg) - if err != nil { - return nil, errors.Wrap(err, "create bucket client") - } - - return bucketClient, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go deleted file mode 100644 index 00eeeee1d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go +++ /dev/null @@ -1,450 +0,0 @@ -package purger - -import ( - "context" - "sort" - "strconv" - "sync" - "time" - - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" - - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -const tombstonesReloadDuration = 5 * time.Minute - -type tombstonesLoaderMetrics struct { - cacheGenLoadFailures prometheus.Counter - deleteRequestsLoadFailures prometheus.Counter -} - -func newtombstonesLoaderMetrics(r prometheus.Registerer) *tombstonesLoaderMetrics { - m := tombstonesLoaderMetrics{} - - m.cacheGenLoadFailures = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "tombstones_loader_cache_gen_load_failures_total", - Help: "Total number of failures while loading cache generation number using tombstones loader", - }) - m.deleteRequestsLoadFailures = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "tombstones_loader_cache_delete_requests_load_failures_total", - Help: "Total number of failures while loading delete requests using tombstones loader", - }) - - return &m -} - -// TombstonesSet holds all the pending delete requests for a user -type TombstonesSet struct { - tombstones []DeleteRequest - oldestTombstoneStart, newestTombstoneEnd model.Time // Used as optimization to find whether we want to iterate over tombstones or not -} - -// Used for easier injection of mocks. -type DeleteStoreAPI interface { - getCacheGenerationNumbers(ctx context.Context, user string) (*cacheGenNumbers, error) - GetPendingDeleteRequestsForUser(ctx context.Context, id string) ([]DeleteRequest, error) -} - -// TombstonesLoader loads delete requests and gen numbers from store and keeps checking for updates. -// It keeps checking for changes in gen numbers, which also means changes in delete requests and reloads specific users delete requests. -type TombstonesLoader struct { - tombstones map[string]*TombstonesSet - tombstonesMtx sync.RWMutex - - cacheGenNumbers map[string]*cacheGenNumbers - cacheGenNumbersMtx sync.RWMutex - - deleteStore DeleteStoreAPI - metrics *tombstonesLoaderMetrics - quit chan struct{} -} - -// NewTombstonesLoader creates a TombstonesLoader -func NewTombstonesLoader(deleteStore DeleteStoreAPI, registerer prometheus.Registerer) *TombstonesLoader { - tl := TombstonesLoader{ - tombstones: map[string]*TombstonesSet{}, - cacheGenNumbers: map[string]*cacheGenNumbers{}, - deleteStore: deleteStore, - metrics: newtombstonesLoaderMetrics(registerer), - } - go tl.loop() - - return &tl -} - -// Stop stops TombstonesLoader -func (tl *TombstonesLoader) Stop() { - close(tl.quit) -} - -func (tl *TombstonesLoader) loop() { - if tl.deleteStore == nil { - return - } - - tombstonesReloadTimer := time.NewTicker(tombstonesReloadDuration) - for { - select { - case <-tombstonesReloadTimer.C: - err := tl.reloadTombstones() - if err != nil { - level.Error(util_log.Logger).Log("msg", "error reloading tombstones", "err", err) - } - case <-tl.quit: - return - } - } -} - -func (tl *TombstonesLoader) reloadTombstones() error { - updatedGenNumbers := make(map[string]*cacheGenNumbers) - tl.cacheGenNumbersMtx.RLock() - - // check for updates in loaded gen numbers - for userID, oldGenNumbers := range tl.cacheGenNumbers { - newGenNumbers, err := tl.deleteStore.getCacheGenerationNumbers(context.Background(), userID) - if err != nil { - tl.cacheGenNumbersMtx.RUnlock() - return err - } - - if *oldGenNumbers != *newGenNumbers { - updatedGenNumbers[userID] = newGenNumbers - } - } - - tl.cacheGenNumbersMtx.RUnlock() - - // in frontend we load only cache gen numbers so short circuit here if there are no loaded deleted requests - // first call to GetPendingTombstones would avoid doing this. - tl.tombstonesMtx.RLock() - if len(tl.tombstones) == 0 { - tl.tombstonesMtx.RUnlock() - return nil - } - tl.tombstonesMtx.RUnlock() - - // for all the updated gen numbers, reload delete requests - for userID, genNumbers := range updatedGenNumbers { - err := tl.loadPendingTombstones(userID) - if err != nil { - return err - } - - tl.cacheGenNumbersMtx.Lock() - tl.cacheGenNumbers[userID] = genNumbers - tl.cacheGenNumbersMtx.Unlock() - } - - return nil -} - -// GetPendingTombstones returns all pending tombstones -func (tl *TombstonesLoader) GetPendingTombstones(userID string) (*TombstonesSet, error) { - tl.tombstonesMtx.RLock() - - tombstoneSet, isOK := tl.tombstones[userID] - if isOK { - tl.tombstonesMtx.RUnlock() - return tombstoneSet, nil - } - - tl.tombstonesMtx.RUnlock() - err := tl.loadPendingTombstones(userID) - if err != nil { - return nil, err - } - - tl.tombstonesMtx.RLock() - defer tl.tombstonesMtx.RUnlock() - - return tl.tombstones[userID], nil -} - -// GetPendingTombstones returns all pending tombstones -func (tl *TombstonesLoader) GetPendingTombstonesForInterval(userID string, from, to model.Time) (*TombstonesSet, error) { - allTombstones, err := tl.GetPendingTombstones(userID) - if err != nil { - return nil, err - } - - if !allTombstones.HasTombstonesForInterval(from, to) { - return &TombstonesSet{}, nil - } - - filteredSet := TombstonesSet{oldestTombstoneStart: model.Now()} - - for _, tombstone := range allTombstones.tombstones { - if !intervalsOverlap(model.Interval{Start: from, End: to}, model.Interval{Start: tombstone.StartTime, End: tombstone.EndTime}) { - continue - } - - filteredSet.tombstones = append(filteredSet.tombstones, tombstone) - - if tombstone.StartTime < filteredSet.oldestTombstoneStart { - filteredSet.oldestTombstoneStart = tombstone.StartTime - } - - if tombstone.EndTime > filteredSet.newestTombstoneEnd { - filteredSet.newestTombstoneEnd = tombstone.EndTime - } - } - - return &filteredSet, nil -} - -func (tl *TombstonesLoader) loadPendingTombstones(userID string) error { - if tl.deleteStore == nil { - tl.tombstonesMtx.Lock() - defer tl.tombstonesMtx.Unlock() - - tl.tombstones[userID] = &TombstonesSet{oldestTombstoneStart: 0, newestTombstoneEnd: 0} - return nil - } - - pendingDeleteRequests, err := tl.deleteStore.GetPendingDeleteRequestsForUser(context.Background(), userID) - if err != nil { - tl.metrics.deleteRequestsLoadFailures.Inc() - return errors.Wrap(err, "error loading delete requests") - } - - tombstoneSet := TombstonesSet{tombstones: pendingDeleteRequests, oldestTombstoneStart: model.Now()} - for i := range tombstoneSet.tombstones { - tombstoneSet.tombstones[i].Matchers = make([][]*labels.Matcher, len(tombstoneSet.tombstones[i].Selectors)) - - for j, selector := range tombstoneSet.tombstones[i].Selectors { - tombstoneSet.tombstones[i].Matchers[j], err = parser.ParseMetricSelector(selector) - - if err != nil { - tl.metrics.deleteRequestsLoadFailures.Inc() - return errors.Wrapf(err, "error parsing metric selector") - } - } - - if tombstoneSet.tombstones[i].StartTime < tombstoneSet.oldestTombstoneStart { - tombstoneSet.oldestTombstoneStart = tombstoneSet.tombstones[i].StartTime - } - - if tombstoneSet.tombstones[i].EndTime > tombstoneSet.newestTombstoneEnd { - tombstoneSet.newestTombstoneEnd = tombstoneSet.tombstones[i].EndTime - } - } - - tl.tombstonesMtx.Lock() - defer tl.tombstonesMtx.Unlock() - tl.tombstones[userID] = &tombstoneSet - - return nil -} - -// GetStoreCacheGenNumber returns store cache gen number for a user -func (tl *TombstonesLoader) GetStoreCacheGenNumber(tenantIDs []string) string { - return tl.getCacheGenNumbersPerTenants(tenantIDs).store -} - -// GetResultsCacheGenNumber returns results cache gen number for a user -func (tl *TombstonesLoader) GetResultsCacheGenNumber(tenantIDs []string) string { - return tl.getCacheGenNumbersPerTenants(tenantIDs).results -} - -func (tl *TombstonesLoader) getCacheGenNumbersPerTenants(tenantIDs []string) *cacheGenNumbers { - var result cacheGenNumbers - - if len(tenantIDs) == 0 { - return &result - } - - // keep the maximum value that's currently in result - var maxResults, maxStore int - - for pos, tenantID := range tenantIDs { - numbers := tl.getCacheGenNumbers(tenantID) - - // handle first tenant in the list - if pos == 0 { - // short cut if there is only one tenant - if len(tenantIDs) == 1 { - return numbers - } - - // set first tenant string whatever happens next - result.results = numbers.results - result.store = numbers.store - } - - // set results number string if it's higher than the ones before - if numbers.results != "" { - results, err := strconv.Atoi(numbers.results) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err) - } else if maxResults < results { - maxResults = results - result.results = numbers.results - } - } - - // set store number string if it's higher than the ones before - if numbers.store != "" { - store, err := strconv.Atoi(numbers.store) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error parsing storeCacheGenNumber", "user", tenantID, "err", err) - } else if maxStore < store { - maxStore = store - result.store = numbers.store - } - } - } - - return &result -} - -func (tl *TombstonesLoader) getCacheGenNumbers(userID string) *cacheGenNumbers { - tl.cacheGenNumbersMtx.RLock() - if genNumbers, isOK := tl.cacheGenNumbers[userID]; isOK { - tl.cacheGenNumbersMtx.RUnlock() - return genNumbers - } - - tl.cacheGenNumbersMtx.RUnlock() - - if tl.deleteStore == nil { - tl.cacheGenNumbersMtx.Lock() - defer tl.cacheGenNumbersMtx.Unlock() - - tl.cacheGenNumbers[userID] = &cacheGenNumbers{} - return tl.cacheGenNumbers[userID] - } - - genNumbers, err := tl.deleteStore.getCacheGenerationNumbers(context.Background(), userID) - if err != nil { - level.Error(util_log.Logger).Log("msg", "error loading cache generation numbers", "err", err) - tl.metrics.cacheGenLoadFailures.Inc() - return &cacheGenNumbers{} - } - - tl.cacheGenNumbersMtx.Lock() - defer tl.cacheGenNumbersMtx.Unlock() - - tl.cacheGenNumbers[userID] = genNumbers - return genNumbers -} - -// GetDeletedIntervals returns non-overlapping, sorted deleted intervals. -func (ts TombstonesSet) GetDeletedIntervals(lbls labels.Labels, from, to model.Time) []model.Interval { - if len(ts.tombstones) == 0 || to < ts.oldestTombstoneStart || from > ts.newestTombstoneEnd { - return nil - } - - var deletedIntervals []model.Interval - requestedInterval := model.Interval{Start: from, End: to} - - for i := range ts.tombstones { - overlaps, overlappingInterval := getOverlappingInterval(requestedInterval, - model.Interval{Start: ts.tombstones[i].StartTime, End: ts.tombstones[i].EndTime}) - - if !overlaps { - continue - } - - matches := false - for _, matchers := range ts.tombstones[i].Matchers { - if labels.Selector(matchers).Matches(lbls) { - matches = true - break - } - } - - if !matches { - continue - } - - if overlappingInterval == requestedInterval { - // whole interval deleted - return []model.Interval{requestedInterval} - } - - deletedIntervals = append(deletedIntervals, overlappingInterval) - } - - if len(deletedIntervals) == 0 { - return nil - } - - return mergeIntervals(deletedIntervals) -} - -// Len returns number of tombstones that are there -func (ts TombstonesSet) Len() int { - return len(ts.tombstones) -} - -// HasTombstonesForInterval tells whether there are any tombstones which overlapping given interval -func (ts TombstonesSet) HasTombstonesForInterval(from, to model.Time) bool { - if len(ts.tombstones) == 0 || to < ts.oldestTombstoneStart || from > ts.newestTombstoneEnd { - return false - } - - return true -} - -// sorts and merges overlapping intervals -func mergeIntervals(intervals []model.Interval) []model.Interval { - if len(intervals) <= 1 { - return intervals - } - - mergedIntervals := make([]model.Interval, 0, len(intervals)) - sort.Slice(intervals, func(i, j int) bool { - return intervals[i].Start < intervals[j].Start - }) - - ongoingTrFrom, ongoingTrTo := intervals[0].Start, intervals[0].End - for i := 1; i < len(intervals); i++ { - // if there is no overlap add it to mergedIntervals - if intervals[i].Start > ongoingTrTo { - mergedIntervals = append(mergedIntervals, model.Interval{Start: ongoingTrFrom, End: ongoingTrTo}) - ongoingTrFrom = intervals[i].Start - ongoingTrTo = intervals[i].End - continue - } - - // there is an overlap but check whether existing time range is bigger than the current one - if intervals[i].End > ongoingTrTo { - ongoingTrTo = intervals[i].End - } - } - - // add the last time range - mergedIntervals = append(mergedIntervals, model.Interval{Start: ongoingTrFrom, End: ongoingTrTo}) - - return mergedIntervals -} - -func getOverlappingInterval(interval1, interval2 model.Interval) (bool, model.Interval) { - if interval2.Start > interval1.Start { - interval1.Start = interval2.Start - } - - if interval2.End < interval1.End { - interval1.End = interval2.End - } - - return interval1.Start < interval1.End, interval1 -} - -func intervalsOverlap(interval1, interval2 model.Interval) bool { - if interval1.Start > interval2.End || interval2.Start > interval1.End { - return false - } - - return true -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go deleted file mode 100644 index c4804995f..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go +++ /dev/null @@ -1,39 +0,0 @@ -package storage - -import ( - "bytes" -) - -// Bytes exists to stop proto copying the byte array -type Bytes []byte - -// Marshal just returns bs -func (bs *Bytes) Marshal() ([]byte, error) { - return []byte(*bs), nil -} - -// MarshalTo copies Bytes to data -func (bs *Bytes) MarshalTo(data []byte) (n int, err error) { - return copy(data, *bs), nil -} - -// Unmarshal updates Bytes to be data, without a copy -func (bs *Bytes) Unmarshal(data []byte) error { - *bs = data - return nil -} - -// Size returns the length of Bytes -func (bs *Bytes) Size() int { - return len(*bs) -} - -// Equal returns true if other equals Bytes -func (bs *Bytes) Equal(other Bytes) bool { - return bytes.Equal(*bs, other) -} - -// Compare Bytes to other -func (bs *Bytes) Compare(other Bytes) int { - return bytes.Compare(*bs, other) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go deleted file mode 100644 index 98f883013..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go +++ /dev/null @@ -1,48 +0,0 @@ -package storage - -import ( - "io" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - "github.com/cortexproject/cortex/pkg/chunk/testutils" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -type fixture struct { - fixture testutils.Fixture -} - -func (f fixture) Name() string { return "caching-store" } -func (f fixture) Clients() (chunk.IndexClient, chunk.Client, chunk.TableClient, chunk.SchemaConfig, io.Closer, error) { - limits, err := defaultLimits() - if err != nil { - return nil, nil, nil, chunk.SchemaConfig{}, nil, err - } - indexClient, chunkClient, tableClient, schemaConfig, closer, err := f.fixture.Clients() - reg := prometheus.NewRegistry() - logger := log.NewNopLogger() - indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{ - MaxSizeItems: 500, - Validity: 5 * time.Minute, - }, reg, logger), 5*time.Minute, limits, logger) - return indexClient, chunkClient, tableClient, schemaConfig, closer, err -} - -// Fixtures for unit testing the caching storage. -var Fixtures = []testutils.Fixture{ - fixture{gcp.Fixtures[0]}, -} - -func defaultLimits() (*validation.Overrides, error) { - var defaults validation.Limits - flagext.DefaultValues(&defaults) - defaults.CardinalityLimit = 5 - return validation.NewOverrides(defaults, nil) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go deleted file mode 100644 index f5243b84c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go +++ /dev/null @@ -1,308 +0,0 @@ -package storage - -import ( - "context" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/cache" - chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util/spanlogger" -) - -var ( - cacheCorruptErrs = promauto.NewCounter(prometheus.CounterOpts{ - Name: "querier_index_cache_corruptions_total", - Help: "The number of cache corruptions for the index cache.", - }) - cacheHits = promauto.NewCounter(prometheus.CounterOpts{ - Name: "querier_index_cache_hits_total", - Help: "The number of cache hits for the index cache.", - }) - cacheGets = promauto.NewCounter(prometheus.CounterOpts{ - Name: "querier_index_cache_gets_total", - Help: "The number of gets for the index cache.", - }) - cachePuts = promauto.NewCounter(prometheus.CounterOpts{ - Name: "querier_index_cache_puts_total", - Help: "The number of puts for the index cache.", - }) - cacheEncodeErrs = promauto.NewCounter(prometheus.CounterOpts{ - Name: "querier_index_cache_encode_errors_total", - Help: "The number of errors for the index cache while encoding the body.", - }) -) - -type cachingIndexClient struct { - chunk.IndexClient - cache cache.Cache - validity time.Duration - limits StoreLimits - logger log.Logger -} - -func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits StoreLimits, logger log.Logger) chunk.IndexClient { - if c == nil || cache.IsEmptyTieredCache(c) { - return client - } - - return &cachingIndexClient{ - IndexClient: client, - cache: cache.NewSnappy(c, logger), - validity: validity, - limits: limits, - logger: logger, - } -} - -func (s *cachingIndexClient) Stop() { - s.cache.Stop() - s.IndexClient.Stop() -} - -func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) (shouldContinue bool)) error { - // We cache the entire row, so filter client side. - callback = chunk_util.QueryFilter(callback) - - userID, err := tenant.TenantID(ctx) - if err != nil { - return err - } - cardinalityLimit := int32(s.limits.CardinalityLimit(userID)) - - // Build list of keys to lookup in the cache. - keys := make([]string, 0, len(queries)) - queriesByKey := make(map[string][]chunk.IndexQuery, len(queries)) - for _, query := range queries { - key := queryKey(query) - keys = append(keys, key) - queriesByKey[key] = append(queriesByKey[key], query) - } - - batches, misses := s.cacheFetch(ctx, keys) - for _, batch := range batches { - if cardinalityLimit > 0 && batch.Cardinality > cardinalityLimit { - return chunk.CardinalityExceededError{ - Size: batch.Cardinality, - Limit: cardinalityLimit, - } - } - - queries := queriesByKey[batch.Key] - for _, query := range queries { - callback(query, batch) - } - } - - if len(misses) == 0 { - return nil - } - - // Build list of cachable queries for the queries that missed the cache. - var ( - resultsMtx sync.Mutex - results = make(map[string]ReadBatch, len(misses)) - cacheableMissed = make([]chunk.IndexQuery, 0, len(misses)) - expiryTime = time.Now().Add(s.validity) - ) - - for _, key := range misses { - // Only need to consider one of the queries as they have the same table & hash. - queries := queriesByKey[key] - cacheableMissed = append(cacheableMissed, chunk.IndexQuery{ - TableName: queries[0].TableName, - HashValue: queries[0].HashValue, - }) - - rb := ReadBatch{ - Key: key, - Expiry: expiryTime.UnixNano(), - } - - // If the query is cacheable forever, nil the expiry. - if queries[0].Immutable { - rb.Expiry = 0 - } - - results[key] = rb - } - - err = s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool { - resultsMtx.Lock() - defer resultsMtx.Unlock() - key := queryKey(cacheableQuery) - existing := results[key] - for iter := r.Iterator(); iter.Next(); { - existing.Entries = append(existing.Entries, Entry{Column: iter.RangeValue(), Value: iter.Value()}) - } - results[key] = existing - return true - }) - if err != nil { - return err - } - - { - resultsMtx.Lock() - defer resultsMtx.Unlock() - keys := make([]string, 0, len(results)) - batches := make([]ReadBatch, 0, len(results)) - var cardinalityErr error - for key, batch := range results { - cardinality := int32(len(batch.Entries)) - if cardinalityLimit > 0 && cardinality > cardinalityLimit { - batch.Cardinality = cardinality - batch.Entries = nil - cardinalityErr = chunk.CardinalityExceededError{ - Size: cardinality, - Limit: cardinalityLimit, - } - } - - keys = append(keys, key) - batches = append(batches, batch) - if cardinalityErr != nil { - continue - } - - queries := queriesByKey[key] - for _, query := range queries { - callback(query, batch) - } - } - s.cacheStore(ctx, keys, batches) - return cardinalityErr - } -} - -// Iterator implements chunk.ReadBatch. -func (b ReadBatch) Iterator() chunk.ReadBatchIterator { - return &readBatchIterator{ - index: -1, - readBatch: b, - } -} - -type readBatchIterator struct { - index int - readBatch ReadBatch -} - -// Len implements chunk.ReadBatchIterator. -func (b *readBatchIterator) Next() bool { - b.index++ - return b.index < len(b.readBatch.Entries) -} - -// RangeValue implements chunk.ReadBatchIterator. -func (b *readBatchIterator) RangeValue() []byte { - return b.readBatch.Entries[b.index].Column -} - -// Value implements chunk.ReadBatchIterator. -func (b *readBatchIterator) Value() []byte { - return b.readBatch.Entries[b.index].Value -} - -func queryKey(q chunk.IndexQuery) string { - const sep = "\xff" - return q.TableName + sep + q.HashValue -} - -func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batches []ReadBatch) { - cachePuts.Add(float64(len(keys))) - - // We're doing the hashing to handle unicode and key len properly. - // Memcache fails for unicode keys and keys longer than 250 Bytes. - hashed := make([]string, 0, len(keys)) - bufs := make([][]byte, 0, len(batches)) - for i := range keys { - hashed = append(hashed, cache.HashKey(keys[i])) - out, err := proto.Marshal(&batches[i]) - if err != nil { - level.Warn(s.logger).Log("msg", "error marshalling ReadBatch", "err", err) - cacheEncodeErrs.Inc() - return - } - bufs = append(bufs, out) - } - - s.cache.Store(ctx, hashed, bufs) -} - -func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (batches []ReadBatch, missed []string) { - log, ctx := spanlogger.New(ctx, "cachingIndexClient.cacheFetch") - defer log.Finish() - - cacheGets.Add(float64(len(keys))) - - // Build a map from hash -> key; NB there can be collisions here; we'll fetch - // the last hash. - hashedKeys := make(map[string]string, len(keys)) - for _, key := range keys { - hashedKeys[cache.HashKey(key)] = key - } - - // Build a list of hashes; could be less than keys due to collisions. - hashes := make([]string, 0, len(keys)) - for hash := range hashedKeys { - hashes = append(hashes, hash) - } - - // Look up the hashes in a single batch. If we get an error, we just "miss" all - // of the keys. Eventually I want to push all the errors to the leafs of the cache - // tree, to the caches only return found & missed. - foundHashes, bufs, _ := s.cache.Fetch(ctx, hashes) - - // Reverse the hash, unmarshal the index entries, check we got what we expected - // and that its still valid. - batches = make([]ReadBatch, 0, len(foundHashes)) - for j, foundHash := range foundHashes { - key := hashedKeys[foundHash] - var readBatch ReadBatch - - if err := proto.Unmarshal(bufs[j], &readBatch); err != nil { - level.Warn(log).Log("msg", "error unmarshalling index entry from cache", "err", err) - cacheCorruptErrs.Inc() - continue - } - - // Make sure the hash(key) is not a collision in the cache by looking at the - // key in the value. - if key != readBatch.Key { - level.Debug(log).Log("msg", "dropping index cache entry due to key collision", "key", key, "readBatch.Key", readBatch.Key, "expiry") - continue - } - - if readBatch.Expiry != 0 && time.Now().After(time.Unix(0, readBatch.Expiry)) { - continue - } - - cacheHits.Inc() - batches = append(batches, readBatch) - } - - // Finally work out what we're missing. - misses := make(map[string]struct{}, len(keys)) - for _, key := range keys { - misses[key] = struct{}{} - } - for i := range batches { - delete(misses, batches[i].Key) - } - missed = make([]string, 0, len(misses)) - for miss := range misses { - missed = append(missed, miss) - } - - level.Debug(log).Log("hits", len(batches), "misses", len(misses)) - return batches, missed -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go deleted file mode 100644 index a0bfe0a53..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go +++ /dev/null @@ -1,843 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: caching_index_client.proto - -package storage - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Entry struct { - Column Bytes `protobuf:"bytes,1,opt,name=Column,proto3,customtype=Bytes" json:"Column"` - Value Bytes `protobuf:"bytes,2,opt,name=Value,proto3,customtype=Bytes" json:"Value"` -} - -func (m *Entry) Reset() { *m = Entry{} } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_6a83955bbc783296, []int{0} -} -func (m *Entry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Entry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Entry.Merge(m, src) -} -func (m *Entry) XXX_Size() int { - return m.Size() -} -func (m *Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_Entry proto.InternalMessageInfo - -type ReadBatch struct { - Entries []Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - // The time at which the key expires. - Expiry int64 `protobuf:"varint,3,opt,name=expiry,proto3" json:"expiry,omitempty"` - // The number of entries; used for cardinality limiting. - // entries will be empty when this is set. - Cardinality int32 `protobuf:"varint,4,opt,name=cardinality,proto3" json:"cardinality,omitempty"` -} - -func (m *ReadBatch) Reset() { *m = ReadBatch{} } -func (*ReadBatch) ProtoMessage() {} -func (*ReadBatch) Descriptor() ([]byte, []int) { - return fileDescriptor_6a83955bbc783296, []int{1} -} -func (m *ReadBatch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadBatch.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadBatch) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadBatch.Merge(m, src) -} -func (m *ReadBatch) XXX_Size() int { - return m.Size() -} -func (m *ReadBatch) XXX_DiscardUnknown() { - xxx_messageInfo_ReadBatch.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadBatch proto.InternalMessageInfo - -func (m *ReadBatch) GetEntries() []Entry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *ReadBatch) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *ReadBatch) GetExpiry() int64 { - if m != nil { - return m.Expiry - } - return 0 -} - -func (m *ReadBatch) GetCardinality() int32 { - if m != nil { - return m.Cardinality - } - return 0 -} - -func init() { - proto.RegisterType((*Entry)(nil), "storage.Entry") - proto.RegisterType((*ReadBatch)(nil), "storage.ReadBatch") -} - -func init() { proto.RegisterFile("caching_index_client.proto", fileDescriptor_6a83955bbc783296) } - -var fileDescriptor_6a83955bbc783296 = []byte{ - // 311 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x6e, 0xea, 0x30, - 0x14, 0x86, 0x7d, 0x6e, 0x08, 0x08, 0x73, 0xef, 0xd5, 0x95, 0x87, 0xab, 0x88, 0xe1, 0x10, 0x51, - 0x55, 0xca, 0xd2, 0x20, 0xb5, 0x7d, 0x82, 0x54, 0x7d, 0x81, 0x54, 0xea, 0x8a, 0x4c, 0x70, 0x83, - 0xd5, 0x60, 0xa3, 0xe0, 0x48, 0x64, 0xeb, 0xd6, 0xb5, 0x8f, 0xd1, 0x47, 0x61, 0x64, 0x44, 0x1d, - 0x50, 0x31, 0x4b, 0x47, 0x1e, 0xa1, 0xc2, 0x50, 0xa9, 0x43, 0xb7, 0xff, 0xf7, 0xf7, 0xfb, 0xfc, - 0x47, 0x87, 0x76, 0x33, 0x9e, 0x4d, 0xa4, 0xca, 0x87, 0x52, 0x8d, 0xc5, 0x62, 0x98, 0x15, 0x52, - 0x28, 0x13, 0xcf, 0x4a, 0x6d, 0x34, 0x6b, 0xcd, 0x8d, 0x2e, 0x79, 0x2e, 0xba, 0x17, 0xb9, 0x34, - 0x93, 0x6a, 0x14, 0x67, 0x7a, 0x3a, 0xc8, 0x75, 0xae, 0x07, 0x8e, 0x8f, 0xaa, 0x07, 0xe7, 0x9c, - 0x71, 0xea, 0xf8, 0xaf, 0x7f, 0x47, 0xfd, 0x5b, 0x65, 0xca, 0x9a, 0x9d, 0xd3, 0xe6, 0x8d, 0x2e, - 0xaa, 0xa9, 0x0a, 0x20, 0x84, 0xe8, 0x77, 0xf2, 0x67, 0xb9, 0xe9, 0x91, 0xb7, 0x4d, 0xcf, 0x4f, - 0x6a, 0x23, 0xe6, 0xe9, 0x09, 0xb2, 0x33, 0xea, 0xdf, 0xf3, 0xa2, 0x12, 0xc1, 0xaf, 0x9f, 0x52, - 0x47, 0xd6, 0x7f, 0x06, 0xda, 0x4e, 0x05, 0x1f, 0x27, 0xdc, 0x64, 0x13, 0x16, 0xd3, 0x96, 0x50, - 0xa6, 0x94, 0x62, 0x1e, 0x40, 0xe8, 0x45, 0x9d, 0xcb, 0xbf, 0xf1, 0x69, 0xd9, 0xd8, 0x55, 0x27, - 0x8d, 0xc3, 0x90, 0xf4, 0x2b, 0xc4, 0xfe, 0x51, 0xef, 0x51, 0xd4, 0xae, 0xa0, 0x9d, 0x1e, 0x24, - 0xfb, 0x4f, 0x9b, 0x62, 0x31, 0x93, 0x65, 0x1d, 0x78, 0x21, 0x44, 0x5e, 0x7a, 0x72, 0x2c, 0xa4, - 0x9d, 0x8c, 0x97, 0x63, 0xa9, 0x78, 0x21, 0x4d, 0x1d, 0x34, 0x42, 0x88, 0xfc, 0xf4, 0xfb, 0x53, - 0x72, 0xbd, 0xda, 0x22, 0x59, 0x6f, 0x91, 0xec, 0xb7, 0x08, 0x4f, 0x16, 0xe1, 0xd5, 0x22, 0x2c, - 0x2d, 0xc2, 0xca, 0x22, 0xbc, 0x5b, 0x84, 0x0f, 0x8b, 0x64, 0x6f, 0x11, 0x5e, 0x76, 0x48, 0x56, - 0x3b, 0x24, 0xeb, 0x1d, 0x92, 0x51, 0xd3, 0xdd, 0xe6, 0xea, 0x33, 0x00, 0x00, 0xff, 0xff, 0xc2, - 0xe7, 0xfe, 0xff, 0x71, 0x01, 0x00, 0x00, -} - -func (this *Entry) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Entry) - if !ok { - that2, ok := that.(Entry) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Column.Equal(that1.Column) { - return false - } - if !this.Value.Equal(that1.Value) { - return false - } - return true -} -func (this *ReadBatch) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadBatch) - if !ok { - that2, ok := that.(ReadBatch) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Entries) != len(that1.Entries) { - return false - } - for i := range this.Entries { - if !this.Entries[i].Equal(&that1.Entries[i]) { - return false - } - } - if this.Key != that1.Key { - return false - } - if this.Expiry != that1.Expiry { - return false - } - if this.Cardinality != that1.Cardinality { - return false - } - return true -} -func (this *Entry) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&storage.Entry{") - s = append(s, "Column: "+fmt.Sprintf("%#v", this.Column)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadBatch) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&storage.ReadBatch{") - if this.Entries != nil { - vs := make([]*Entry, len(this.Entries)) - for i := range vs { - vs[i] = &this.Entries[i] - } - s = append(s, "Entries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") - s = append(s, "Expiry: "+fmt.Sprintf("%#v", this.Expiry)+",\n") - s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringCachingIndexClient(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Entry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Entry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintCachingIndexClient(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.Column.Size() - i -= size - if _, err := m.Column.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintCachingIndexClient(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ReadBatch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadBatch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Cardinality != 0 { - i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Cardinality)) - i-- - dAtA[i] = 0x20 - } - if m.Expiry != 0 { - i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Expiry)) - i-- - dAtA[i] = 0x18 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintCachingIndexClient(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCachingIndexClient(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintCachingIndexClient(dAtA []byte, offset int, v uint64) int { - offset -= sovCachingIndexClient(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Entry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Column.Size() - n += 1 + l + sovCachingIndexClient(uint64(l)) - l = m.Value.Size() - n += 1 + l + sovCachingIndexClient(uint64(l)) - return n -} - -func (m *ReadBatch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovCachingIndexClient(uint64(l)) - } - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovCachingIndexClient(uint64(l)) - } - if m.Expiry != 0 { - n += 1 + sovCachingIndexClient(uint64(m.Expiry)) - } - if m.Cardinality != 0 { - n += 1 + sovCachingIndexClient(uint64(m.Cardinality)) - } - return n -} - -func sovCachingIndexClient(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCachingIndexClient(x uint64) (n int) { - return sovCachingIndexClient(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Entry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Entry{`, - `Column:` + fmt.Sprintf("%v", this.Column) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *ReadBatch) String() string { - if this == nil { - return "nil" - } - repeatedStringForEntries := "[]Entry{" - for _, f := range this.Entries { - repeatedStringForEntries += strings.Replace(strings.Replace(f.String(), "Entry", "Entry", 1), `&`, ``, 1) + "," - } - repeatedStringForEntries += "}" - s := strings.Join([]string{`&ReadBatch{`, - `Entries:` + repeatedStringForEntries + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Expiry:` + fmt.Sprintf("%v", this.Expiry) + `,`, - `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, - `}`, - }, "") - return s -} -func valueToStringCachingIndexClient(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Entry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Entry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCachingIndexClient - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCachingIndexClient - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Column.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCachingIndexClient - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCachingIndexClient - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCachingIndexClient(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCachingIndexClient - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCachingIndexClient - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadBatch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadBatch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadBatch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCachingIndexClient - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCachingIndexClient - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entries = append(m.Entries, Entry{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCachingIndexClient - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCachingIndexClient - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Expiry", wireType) - } - m.Expiry = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Expiry |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) - } - m.Cardinality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Cardinality |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCachingIndexClient(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCachingIndexClient - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCachingIndexClient - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCachingIndexClient(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCachingIndexClient - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthCachingIndexClient - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCachingIndexClient - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipCachingIndexClient(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthCachingIndexClient - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthCachingIndexClient = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCachingIndexClient = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto deleted file mode 100644 index 22a9d01ff..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package storage; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -message Entry { - bytes Column = 1 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; - bytes Value = 2 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false]; -} - -message ReadBatch { - repeated Entry entries = 1 [(gogoproto.nullable) = false]; - string key = 2; - - // The time at which the key expires. - int64 expiry = 3; - - // The number of entries; used for cardinality limiting. - // entries will be empty when this is set. - int32 cardinality = 4; -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go deleted file mode 100644 index 8076590d5..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ /dev/null @@ -1,373 +0,0 @@ -package storage - -import ( - "context" - "flag" - "fmt" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/aws" - "github.com/cortexproject/cortex/pkg/chunk/azure" - "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/chunk/cassandra" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - "github.com/cortexproject/cortex/pkg/chunk/grpc" - "github.com/cortexproject/cortex/pkg/chunk/local" - "github.com/cortexproject/cortex/pkg/chunk/objectclient" - "github.com/cortexproject/cortex/pkg/chunk/openstack" - "github.com/cortexproject/cortex/pkg/chunk/purger" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// Supported storage engines -const ( - StorageEngineChunks = "chunks" - StorageEngineBlocks = "blocks" -) - -// Supported storage clients -const ( - StorageTypeAWS = "aws" - StorageTypeAWSDynamo = "aws-dynamo" - StorageTypeAzure = "azure" - StorageTypeBoltDB = "boltdb" - StorageTypeCassandra = "cassandra" - StorageTypeInMemory = "inmemory" - StorageTypeBigTable = "bigtable" - StorageTypeBigTableHashed = "bigtable-hashed" - StorageTypeFileSystem = "filesystem" - StorageTypeGCP = "gcp" - StorageTypeGCPColumnKey = "gcp-columnkey" - StorageTypeGCS = "gcs" - StorageTypeGrpc = "grpc-store" - StorageTypeS3 = "s3" - StorageTypeSwift = "swift" -) - -type indexStoreFactories struct { - indexClientFactoryFunc IndexClientFactoryFunc - tableClientFactoryFunc TableClientFactoryFunc -} - -// IndexClientFactoryFunc defines signature of function which creates chunk.IndexClient for managing index in index store -type IndexClientFactoryFunc func() (chunk.IndexClient, error) - -// TableClientFactoryFunc defines signature of function which creates chunk.TableClient for managing tables in index store -type TableClientFactoryFunc func() (chunk.TableClient, error) - -var customIndexStores = map[string]indexStoreFactories{} - -// RegisterIndexStore is used for registering a custom index type. -// When an index type is registered here with same name as existing types, the registered one takes the precedence. -func RegisterIndexStore(name string, indexClientFactory IndexClientFactoryFunc, tableClientFactory TableClientFactoryFunc) { - customIndexStores[name] = indexStoreFactories{indexClientFactory, tableClientFactory} -} - -// StoreLimits helps get Limits specific to Queries for Stores -type StoreLimits interface { - CardinalityLimit(userID string) int - MaxChunksPerQueryFromStore(userID string) int - MaxQueryLength(userID string) time.Duration -} - -// Config chooses which storage client to use. -type Config struct { - Engine string `yaml:"engine"` - AWSStorageConfig aws.StorageConfig `yaml:"aws"` - AzureStorageConfig azure.BlobStorageConfig `yaml:"azure"` - GCPStorageConfig gcp.Config `yaml:"bigtable"` - GCSConfig gcp.GCSConfig `yaml:"gcs"` - CassandraStorageConfig cassandra.Config `yaml:"cassandra"` - BoltDBConfig local.BoltDBConfig `yaml:"boltdb"` - FSConfig local.FSConfig `yaml:"filesystem"` - Swift openstack.SwiftConfig `yaml:"swift"` - - IndexCacheValidity time.Duration `yaml:"index_cache_validity"` - - IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config"` - - DeleteStoreConfig purger.DeleteStoreConfig `yaml:"delete_store"` - - GrpcConfig grpc.Config `yaml:"grpc_store"` -} - -// RegisterFlags adds the flags required to configure this flag set. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.AWSStorageConfig.RegisterFlags(f) - cfg.AzureStorageConfig.RegisterFlags(f) - cfg.GCPStorageConfig.RegisterFlags(f) - cfg.GCSConfig.RegisterFlags(f) - cfg.CassandraStorageConfig.RegisterFlags(f) - cfg.BoltDBConfig.RegisterFlags(f) - cfg.FSConfig.RegisterFlags(f) - cfg.DeleteStoreConfig.RegisterFlags(f) - cfg.Swift.RegisterFlags(f) - cfg.GrpcConfig.RegisterFlags(f) - - f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks (deprecated) or blocks.") - cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "Cache config for index entry reading. ", f) - f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.") -} - -// Validate config and returns error on failure -func (cfg *Config) Validate() error { - if cfg.Engine != StorageEngineChunks && cfg.Engine != StorageEngineBlocks { - return errors.New("unsupported storage engine") - } - if err := cfg.CassandraStorageConfig.Validate(); err != nil { - return errors.Wrap(err, "invalid Cassandra Storage config") - } - if err := cfg.GCPStorageConfig.Validate(util_log.Logger); err != nil { - return errors.Wrap(err, "invalid GCP Storage Storage config") - } - if err := cfg.Swift.Validate(); err != nil { - return errors.Wrap(err, "invalid Swift Storage config") - } - if err := cfg.IndexQueriesCacheConfig.Validate(); err != nil { - return errors.Wrap(err, "invalid Index Queries Cache config") - } - if err := cfg.AzureStorageConfig.Validate(); err != nil { - return errors.Wrap(err, "invalid Azure Storage config") - } - if err := cfg.AWSStorageConfig.Validate(); err != nil { - return errors.Wrap(err, "invalid AWS Storage config") - } - return nil -} - -// NewStore makes the storage clients based on the configuration. -func NewStore( - cfg Config, - storeCfg chunk.StoreConfig, - schemaCfg chunk.SchemaConfig, - limits StoreLimits, - reg prometheus.Registerer, - cacheGenNumLoader chunk.CacheGenNumLoader, - logger log.Logger, -) (chunk.Store, error) { - chunkMetrics := newChunkClientMetrics(reg) - - indexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig, reg, logger) - if err != nil { - return nil, err - } - - writeDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig, reg, logger) - if err != nil { - return nil, err - } - - chunkCacheCfg := storeCfg.ChunkCacheConfig - chunkCacheCfg.Prefix = "chunks" - chunksCache, err := cache.New(chunkCacheCfg, reg, logger) - if err != nil { - return nil, err - } - - // Cache is shared by multiple stores, which means they will try and Stop - // it more than once. Wrap in a StopOnce to prevent this. - indexReadCache = cache.StopOnce(indexReadCache) - chunksCache = cache.StopOnce(chunksCache) - writeDedupeCache = cache.StopOnce(writeDedupeCache) - - // Lets wrap all caches except chunksCache with CacheGenMiddleware to facilitate cache invalidation using cache generation numbers. - // chunksCache is not wrapped because chunks content can't be anyways modified without changing its ID so there is no use of - // invalidating chunks cache. Also chunks can be fetched only by their ID found in index and we are anyways removing the index and invalidating index cache here. - indexReadCache = cache.NewCacheGenNumMiddleware(indexReadCache) - writeDedupeCache = cache.NewCacheGenNumMiddleware(writeDedupeCache) - - err = schemaCfg.Load() - if err != nil { - return nil, errors.Wrap(err, "error loading schema config") - } - stores := chunk.NewCompositeStore(cacheGenNumLoader) - - for _, s := range schemaCfg.Configs { - indexClientReg := prometheus.WrapRegistererWith( - prometheus.Labels{"component": "index-store-" + s.From.String()}, reg) - - index, err := NewIndexClient(s.IndexType, cfg, schemaCfg, indexClientReg) - if err != nil { - return nil, errors.Wrap(err, "error creating index client") - } - index = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits, logger) - - objectStoreType := s.ObjectType - if objectStoreType == "" { - objectStoreType = s.IndexType - } - - chunkClientReg := prometheus.WrapRegistererWith( - prometheus.Labels{"component": "chunk-store-" + s.From.String()}, reg) - - chunks, err := NewChunkClient(objectStoreType, cfg, schemaCfg, chunkClientReg) - if err != nil { - return nil, errors.Wrap(err, "error creating object client") - } - - chunks = newMetricsChunkClient(chunks, chunkMetrics) - - err = stores.AddPeriod(storeCfg, s, index, chunks, limits, chunksCache, writeDedupeCache) - if err != nil { - return nil, err - } - } - - return stores, nil -} - -// NewIndexClient makes a new index client of the desired type. -func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (chunk.IndexClient, error) { - if indexClientFactory, ok := customIndexStores[name]; ok { - if indexClientFactory.indexClientFactoryFunc != nil { - return indexClientFactory.indexClientFactoryFunc() - } - } - - switch name { - case StorageTypeInMemory: - store := chunk.NewMockStorage() - return store, nil - case StorageTypeAWS, StorageTypeAWSDynamo: - if cfg.AWSStorageConfig.DynamoDB.URL == nil { - return nil, fmt.Errorf("Must set -dynamodb.url in aws mode") - } - path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") - if len(path) > 0 { - level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) - } - return aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) - case StorageTypeGCP: - return gcp.NewStorageClientV1(context.Background(), cfg.GCPStorageConfig, schemaCfg) - case StorageTypeGCPColumnKey, StorageTypeBigTable: - return gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg) - case StorageTypeBigTableHashed: - cfg.GCPStorageConfig.DistributeKeys = true - return gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg) - case StorageTypeCassandra: - return cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg, registerer) - case StorageTypeBoltDB: - return local.NewBoltDBIndexClient(cfg.BoltDBConfig) - case StorageTypeGrpc: - return grpc.NewStorageClient(cfg.GrpcConfig, schemaCfg) - default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v", name, StorageTypeAWS, StorageTypeCassandra, StorageTypeInMemory, StorageTypeGCP, StorageTypeBigTable, StorageTypeBigTableHashed) - } -} - -// NewChunkClient makes a new chunk.Client of the desired types. -func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (chunk.Client, error) { - switch name { - case StorageTypeInMemory: - return chunk.NewMockStorage(), nil - case StorageTypeAWS, StorageTypeS3: - return newChunkClientFromStore(aws.NewS3ObjectClient(cfg.AWSStorageConfig.S3Config)) - case StorageTypeAWSDynamo: - if cfg.AWSStorageConfig.DynamoDB.URL == nil { - return nil, fmt.Errorf("Must set -dynamodb.url in aws mode") - } - path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") - if len(path) > 0 { - level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) - } - return aws.NewDynamoDBChunkClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) - case StorageTypeAzure: - return newChunkClientFromStore(azure.NewBlobStorage(&cfg.AzureStorageConfig)) - case StorageTypeGCP: - return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) - case StorageTypeGCPColumnKey, StorageTypeBigTable, StorageTypeBigTableHashed: - return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) - case StorageTypeGCS: - return newChunkClientFromStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig)) - case StorageTypeSwift: - return newChunkClientFromStore(openstack.NewSwiftObjectClient(cfg.Swift)) - case StorageTypeCassandra: - return cassandra.NewObjectClient(cfg.CassandraStorageConfig, schemaCfg, registerer) - case StorageTypeFileSystem: - store, err := local.NewFSObjectClient(cfg.FSConfig) - if err != nil { - return nil, err - } - return objectclient.NewClient(store, objectclient.Base64Encoder), nil - case StorageTypeGrpc: - return grpc.NewStorageClient(cfg.GrpcConfig, schemaCfg) - default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v, %v, %v", name, StorageTypeAWS, StorageTypeAzure, StorageTypeCassandra, StorageTypeInMemory, StorageTypeGCP, StorageTypeBigTable, StorageTypeBigTableHashed, StorageTypeGrpc) - } -} - -func newChunkClientFromStore(store chunk.ObjectClient, err error) (chunk.Client, error) { - if err != nil { - return nil, err - } - return objectclient.NewClient(store, nil), nil -} - -// NewTableClient makes a new table client based on the configuration. -func NewTableClient(name string, cfg Config, registerer prometheus.Registerer) (chunk.TableClient, error) { - if indexClientFactory, ok := customIndexStores[name]; ok { - if indexClientFactory.tableClientFactoryFunc != nil { - return indexClientFactory.tableClientFactoryFunc() - } - } - - switch name { - case StorageTypeInMemory: - return chunk.NewMockStorage(), nil - case StorageTypeAWS, StorageTypeAWSDynamo: - if cfg.AWSStorageConfig.DynamoDB.URL == nil { - return nil, fmt.Errorf("Must set -dynamodb.url in aws mode") - } - path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") - if len(path) > 0 { - level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) - } - return aws.NewDynamoDBTableClient(cfg.AWSStorageConfig.DynamoDBConfig, registerer) - case StorageTypeGCP, StorageTypeGCPColumnKey, StorageTypeBigTable, StorageTypeBigTableHashed: - return gcp.NewTableClient(context.Background(), cfg.GCPStorageConfig) - case StorageTypeCassandra: - return cassandra.NewTableClient(context.Background(), cfg.CassandraStorageConfig, registerer) - case StorageTypeBoltDB: - return local.NewTableClient(cfg.BoltDBConfig.Directory) - case StorageTypeGrpc: - return grpc.NewTableClient(cfg.GrpcConfig) - default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v, %v", name, StorageTypeAWS, StorageTypeCassandra, StorageTypeInMemory, StorageTypeGCP, StorageTypeBigTable, StorageTypeBigTableHashed, StorageTypeGrpc) - } -} - -// NewBucketClient makes a new bucket client based on the configuration. -func NewBucketClient(storageConfig Config) (chunk.BucketClient, error) { - if storageConfig.FSConfig.Directory != "" { - return local.NewFSObjectClient(storageConfig.FSConfig) - } - - return nil, nil -} - -// NewObjectClient makes a new StorageClient of the desired types. -func NewObjectClient(name string, cfg Config) (chunk.ObjectClient, error) { - switch name { - case StorageTypeAWS, StorageTypeS3: - return aws.NewS3ObjectClient(cfg.AWSStorageConfig.S3Config) - case StorageTypeGCS: - return gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig) - case StorageTypeAzure: - return azure.NewBlobStorage(&cfg.AzureStorageConfig) - case StorageTypeSwift: - return openstack.NewSwiftObjectClient(cfg.Swift) - case StorageTypeInMemory: - return chunk.NewMockStorage(), nil - case StorageTypeFileSystem: - return local.NewFSObjectClient(cfg.FSConfig) - default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v", name, StorageTypeAWS, StorageTypeS3, StorageTypeGCS, StorageTypeAzure, StorageTypeFileSystem) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/metrics.go deleted file mode 100644 index 628c89245..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/metrics.go +++ /dev/null @@ -1,110 +0,0 @@ -package storage - -import ( - "context" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -// takes a chunk client and exposes metrics for its operations. -type metricsChunkClient struct { - client chunk.Client - - metrics chunkClientMetrics -} - -func newMetricsChunkClient(client chunk.Client, metrics chunkClientMetrics) metricsChunkClient { - return metricsChunkClient{ - client: client, - metrics: metrics, - } -} - -type chunkClientMetrics struct { - chunksPutPerUser *prometheus.CounterVec - chunksSizePutPerUser *prometheus.CounterVec - chunksFetchedPerUser *prometheus.CounterVec - chunksSizeFetchedPerUser *prometheus.CounterVec -} - -func newChunkClientMetrics(reg prometheus.Registerer) chunkClientMetrics { - return chunkClientMetrics{ - chunksPutPerUser: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "chunk_store_stored_chunks_total", - Help: "Total stored chunks per user.", - }, []string{"user"}), - chunksSizePutPerUser: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "chunk_store_stored_chunk_bytes_total", - Help: "Total bytes stored in chunks per user.", - }, []string{"user"}), - chunksFetchedPerUser: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "chunk_store_fetched_chunks_total", - Help: "Total fetched chunks per user.", - }, []string{"user"}), - chunksSizeFetchedPerUser: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "chunk_store_fetched_chunk_bytes_total", - Help: "Total bytes fetched in chunks per user.", - }, []string{"user"}), - } -} - -func (c metricsChunkClient) Stop() { - c.client.Stop() -} - -func (c metricsChunkClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { - if err := c.client.PutChunks(ctx, chunks); err != nil { - return err - } - - // For PutChunks, we explicitly encode the userID in the chunk and don't use context. - userSizes := map[string]int{} - userCounts := map[string]int{} - for _, c := range chunks { - userSizes[c.UserID] += c.Data.Size() - userCounts[c.UserID]++ - } - for user, size := range userSizes { - c.metrics.chunksSizePutPerUser.WithLabelValues(user).Add(float64(size)) - } - for user, num := range userCounts { - c.metrics.chunksPutPerUser.WithLabelValues(user).Add(float64(num)) - } - - return nil -} - -func (c metricsChunkClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { - chks, err := c.client.GetChunks(ctx, chunks) - if err != nil { - return chks, err - } - - // For GetChunks, userID is the chunk and we don't need to use context. - // For now, we just load one user chunks at once, but the interface lets us do it for multiple users. - userSizes := map[string]int{} - userCounts := map[string]int{} - for _, c := range chks { - userSizes[c.UserID] += c.Data.Size() - userCounts[c.UserID]++ - } - for user, size := range userSizes { - c.metrics.chunksSizeFetchedPerUser.WithLabelValues(user).Add(float64(size)) - } - for user, num := range userCounts { - c.metrics.chunksFetchedPerUser.WithLabelValues(user).Add(float64(num)) - } - - return chks, nil -} - -func (c metricsChunkClient) DeleteChunk(ctx context.Context, userID, chunkID string) error { - return c.client.DeleteChunk(ctx, userID, chunkID) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go deleted file mode 100644 index d178781fd..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go +++ /dev/null @@ -1,462 +0,0 @@ -package compactor - -import ( - "context" - "fmt" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/concurrency" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -type BlocksCleanerConfig struct { - DeletionDelay time.Duration - CleanupInterval time.Duration - CleanupConcurrency int - BlockDeletionMarksMigrationEnabled bool // TODO Discuss whether we should remove it in Cortex 1.8.0 and document that upgrading to 1.7.0 before 1.8.0 is required. - TenantCleanupDelay time.Duration // Delay before removing tenant deletion mark and "debug". -} - -type BlocksCleaner struct { - services.Service - - cfg BlocksCleanerConfig - cfgProvider ConfigProvider - logger log.Logger - bucketClient objstore.Bucket - usersScanner *cortex_tsdb.UsersScanner - - // Keep track of the last owned users. - lastOwnedUsers []string - - // Metrics. - runsStarted prometheus.Counter - runsCompleted prometheus.Counter - runsFailed prometheus.Counter - runsLastSuccess prometheus.Gauge - blocksCleanedTotal prometheus.Counter - blocksFailedTotal prometheus.Counter - blocksMarkedForDeletion prometheus.Counter - tenantBlocks *prometheus.GaugeVec - tenantBlocksMarkedForDelete *prometheus.GaugeVec - tenantBlocksMarkedForNoCompaction *prometheus.GaugeVec - tenantPartialBlocks *prometheus.GaugeVec - tenantBucketIndexLastUpdate *prometheus.GaugeVec -} - -func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, usersScanner *cortex_tsdb.UsersScanner, cfgProvider ConfigProvider, logger log.Logger, reg prometheus.Registerer) *BlocksCleaner { - c := &BlocksCleaner{ - cfg: cfg, - bucketClient: bucketClient, - usersScanner: usersScanner, - cfgProvider: cfgProvider, - logger: log.With(logger, "component", "cleaner"), - runsStarted: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_cleanup_started_total", - Help: "Total number of blocks cleanup runs started.", - }), - runsCompleted: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_cleanup_completed_total", - Help: "Total number of blocks cleanup runs successfully completed.", - }), - runsFailed: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_cleanup_failed_total", - Help: "Total number of blocks cleanup runs failed.", - }), - runsLastSuccess: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_block_cleanup_last_successful_run_timestamp_seconds", - Help: "Unix timestamp of the last successful blocks cleanup run.", - }), - blocksCleanedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_blocks_cleaned_total", - Help: "Total number of blocks deleted.", - }), - blocksFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_block_cleanup_failures_total", - Help: "Total number of blocks failed to be deleted.", - }), - blocksMarkedForDeletion: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: blocksMarkedForDeletionName, - Help: blocksMarkedForDeletionHelp, - ConstLabels: prometheus.Labels{"reason": "retention"}, - }), - - // The following metrics don't have the "cortex_compactor" prefix because not strictly related to - // the compactor. They're just tracked by the compactor because it's the most logical place where these - // metrics can be tracked. - tenantBlocks: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_bucket_blocks_count", - Help: "Total number of blocks in the bucket. Includes blocks marked for deletion, but not partial blocks.", - }, []string{"user"}), - tenantBlocksMarkedForDelete: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_bucket_blocks_marked_for_deletion_count", - Help: "Total number of blocks marked for deletion in the bucket.", - }, []string{"user"}), - tenantBlocksMarkedForNoCompaction: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_bucket_blocks_marked_for_no_compaction_count", - Help: "Total number of blocks marked for no compaction in the bucket.", - }, []string{"user"}), - tenantPartialBlocks: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_bucket_blocks_partials_count", - Help: "Total number of partial blocks.", - }, []string{"user"}), - tenantBucketIndexLastUpdate: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_bucket_index_last_successful_update_timestamp_seconds", - Help: "Timestamp of the last successful update of a tenant's bucket index.", - }, []string{"user"}), - } - - c.Service = services.NewTimerService(cfg.CleanupInterval, c.starting, c.ticker, nil) - - return c -} - -func (c *BlocksCleaner) starting(ctx context.Context) error { - // Run a cleanup so that any other service depending on this service - // is guaranteed to start once the initial cleanup has been done. - c.runCleanup(ctx, true) - - return nil -} - -func (c *BlocksCleaner) ticker(ctx context.Context) error { - c.runCleanup(ctx, false) - - return nil -} - -func (c *BlocksCleaner) runCleanup(ctx context.Context, firstRun bool) { - level.Info(c.logger).Log("msg", "started blocks cleanup and maintenance") - c.runsStarted.Inc() - - if err := c.cleanUsers(ctx, firstRun); err == nil { - level.Info(c.logger).Log("msg", "successfully completed blocks cleanup and maintenance") - c.runsCompleted.Inc() - c.runsLastSuccess.SetToCurrentTime() - } else if errors.Is(err, context.Canceled) { - level.Info(c.logger).Log("msg", "canceled blocks cleanup and maintenance", "err", err) - return - } else { - level.Error(c.logger).Log("msg", "failed to run blocks cleanup and maintenance", "err", err.Error()) - c.runsFailed.Inc() - } -} - -func (c *BlocksCleaner) cleanUsers(ctx context.Context, firstRun bool) error { - users, deleted, err := c.usersScanner.ScanUsers(ctx) - if err != nil { - return errors.Wrap(err, "failed to discover users from bucket") - } - - isActive := util.StringsMap(users) - isDeleted := util.StringsMap(deleted) - allUsers := append(users, deleted...) - - // Delete per-tenant metrics for all tenants not belonging anymore to this shard. - // Such tenants have been moved to a different shard, so their updated metrics will - // be exported by the new shard. - for _, userID := range c.lastOwnedUsers { - if !isActive[userID] && !isDeleted[userID] { - c.tenantBlocks.DeleteLabelValues(userID) - c.tenantBlocksMarkedForDelete.DeleteLabelValues(userID) - c.tenantBlocksMarkedForNoCompaction.DeleteLabelValues(userID) - c.tenantPartialBlocks.DeleteLabelValues(userID) - c.tenantBucketIndexLastUpdate.DeleteLabelValues(userID) - } - } - c.lastOwnedUsers = allUsers - - return concurrency.ForEachUser(ctx, allUsers, c.cfg.CleanupConcurrency, func(ctx context.Context, userID string) error { - if isDeleted[userID] { - return errors.Wrapf(c.deleteUserMarkedForDeletion(ctx, userID), "failed to delete user marked for deletion: %s", userID) - } - return errors.Wrapf(c.cleanUser(ctx, userID, firstRun), "failed to delete blocks for user: %s", userID) - }) -} - -// Remove blocks and remaining data for tenant marked for deletion. -func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userID string) error { - userLogger := util_log.WithUserID(userID, c.logger) - userBucket := bucket.NewUserBucketClient(userID, c.bucketClient, c.cfgProvider) - - level.Info(userLogger).Log("msg", "deleting blocks for tenant marked for deletion") - - // We immediately delete the bucket index, to signal to its consumers that - // the tenant has "no blocks" in the storage. - if err := bucketindex.DeleteIndex(ctx, c.bucketClient, userID, c.cfgProvider); err != nil { - return err - } - c.tenantBucketIndexLastUpdate.DeleteLabelValues(userID) - - var deletedBlocks, failed int - err := userBucket.Iter(ctx, "", func(name string) error { - if err := ctx.Err(); err != nil { - return err - } - - id, ok := block.IsBlockDir(name) - if !ok { - return nil - } - - err := block.Delete(ctx, userLogger, userBucket, id) - if err != nil { - failed++ - c.blocksFailedTotal.Inc() - level.Warn(userLogger).Log("msg", "failed to delete block", "block", id, "err", err) - return nil // Continue with other blocks. - } - - deletedBlocks++ - c.blocksCleanedTotal.Inc() - level.Info(userLogger).Log("msg", "deleted block", "block", id) - return nil - }) - - if err != nil { - return err - } - - if failed > 0 { - // The number of blocks left in the storage is equal to the number of blocks we failed - // to delete. We also consider them all marked for deletion given the next run will try - // to delete them again. - c.tenantBlocks.WithLabelValues(userID).Set(float64(failed)) - c.tenantBlocksMarkedForDelete.WithLabelValues(userID).Set(float64(failed)) - c.tenantPartialBlocks.WithLabelValues(userID).Set(0) - - return errors.Errorf("failed to delete %d blocks", failed) - } - - // Given all blocks have been deleted, we can also remove the metrics. - c.tenantBlocks.DeleteLabelValues(userID) - c.tenantBlocksMarkedForDelete.DeleteLabelValues(userID) - c.tenantBlocksMarkedForNoCompaction.DeleteLabelValues(userID) - c.tenantPartialBlocks.DeleteLabelValues(userID) - - if deletedBlocks > 0 { - level.Info(userLogger).Log("msg", "deleted blocks for tenant marked for deletion", "deletedBlocks", deletedBlocks) - } - - mark, err := cortex_tsdb.ReadTenantDeletionMark(ctx, c.bucketClient, userID) - if err != nil { - return errors.Wrap(err, "failed to read tenant deletion mark") - } - if mark == nil { - return errors.Wrap(err, "cannot find tenant deletion mark anymore") - } - - // If we have just deleted some blocks, update "finished" time. Also update "finished" time if it wasn't set yet, but there are no blocks. - // Note: this UPDATES the tenant deletion mark. Components that use caching bucket will NOT SEE this update, - // but that is fine -- they only check whether tenant deletion marker exists or not. - if deletedBlocks > 0 || mark.FinishedTime == 0 { - level.Info(userLogger).Log("msg", "updating finished time in tenant deletion mark") - mark.FinishedTime = time.Now().Unix() - return errors.Wrap(cortex_tsdb.WriteTenantDeletionMark(ctx, c.bucketClient, userID, c.cfgProvider, mark), "failed to update tenant deletion mark") - } - - if time.Since(time.Unix(mark.FinishedTime, 0)) < c.cfg.TenantCleanupDelay { - return nil - } - - level.Info(userLogger).Log("msg", "cleaning up remaining blocks data for tenant marked for deletion") - - // Let's do final cleanup of tenant. - if deleted, err := bucket.DeletePrefix(ctx, userBucket, block.DebugMetas, userLogger); err != nil { - return errors.Wrap(err, "failed to delete "+block.DebugMetas) - } else if deleted > 0 { - level.Info(userLogger).Log("msg", "deleted files under "+block.DebugMetas+" for tenant marked for deletion", "count", deleted) - } - - // Tenant deletion mark file is inside Markers as well. - if deleted, err := bucket.DeletePrefix(ctx, userBucket, bucketindex.MarkersPathname, userLogger); err != nil { - return errors.Wrap(err, "failed to delete marker files") - } else if deleted > 0 { - level.Info(userLogger).Log("msg", "deleted marker files for tenant marked for deletion", "count", deleted) - } - - return nil -} - -func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string, firstRun bool) (returnErr error) { - userLogger := util_log.WithUserID(userID, c.logger) - userBucket := bucket.NewUserBucketClient(userID, c.bucketClient, c.cfgProvider) - startTime := time.Now() - - level.Info(userLogger).Log("msg", "started blocks cleanup and maintenance") - defer func() { - if returnErr != nil { - level.Warn(userLogger).Log("msg", "failed blocks cleanup and maintenance", "err", returnErr) - } else { - level.Info(userLogger).Log("msg", "completed blocks cleanup and maintenance", "duration", time.Since(startTime)) - } - }() - - // Migrate block deletion marks to the global markers location. This operation is a best-effort. - if firstRun && c.cfg.BlockDeletionMarksMigrationEnabled { - if err := bucketindex.MigrateBlockDeletionMarksToGlobalLocation(ctx, c.bucketClient, userID, c.cfgProvider); err != nil { - level.Warn(userLogger).Log("msg", "failed to migrate block deletion marks to the global markers location", "err", err) - } else { - level.Info(userLogger).Log("msg", "migrated block deletion marks to the global markers location") - } - } - - // Read the bucket index. - idx, err := bucketindex.ReadIndex(ctx, c.bucketClient, userID, c.cfgProvider, c.logger) - if errors.Is(err, bucketindex.ErrIndexCorrupted) { - level.Warn(userLogger).Log("msg", "found a corrupted bucket index, recreating it") - } else if err != nil && !errors.Is(err, bucketindex.ErrIndexNotFound) { - return err - } - - // Mark blocks for future deletion based on the retention period for the user. - // Note doing this before UpdateIndex, so it reads in the deletion marks. - // The trade-off being that retention is not applied if the index has to be - // built, but this is rare. - if idx != nil { - // We do not want to stop the remaining work in the cleaner if an - // error occurs here. Errors are logged in the function. - retention := c.cfgProvider.CompactorBlocksRetentionPeriod(userID) - c.applyUserRetentionPeriod(ctx, idx, retention, userBucket, userLogger) - } - - // Generate an updated in-memory version of the bucket index. - w := bucketindex.NewUpdater(c.bucketClient, userID, c.cfgProvider, c.logger) - idx, partials, totalBlocksBlocksMarkedForNoCompaction, err := w.UpdateIndex(ctx, idx) - if err != nil { - return err - } - - // Delete blocks marked for deletion. We iterate over a copy of deletion marks because - // we'll need to manipulate the index (removing blocks which get deleted). - for _, mark := range idx.BlockDeletionMarks.Clone() { - if time.Since(mark.GetDeletionTime()).Seconds() <= c.cfg.DeletionDelay.Seconds() { - continue - } - - if err := block.Delete(ctx, userLogger, userBucket, mark.ID); err != nil { - c.blocksFailedTotal.Inc() - level.Warn(userLogger).Log("msg", "failed to delete block marked for deletion", "block", mark.ID, "err", err) - continue - } - - // Remove the block from the bucket index too. - idx.RemoveBlock(mark.ID) - - c.blocksCleanedTotal.Inc() - level.Info(userLogger).Log("msg", "deleted block marked for deletion", "block", mark.ID) - } - - // Partial blocks with a deletion mark can be cleaned up. This is a best effort, so we don't return - // error if the cleanup of partial blocks fail. - if len(partials) > 0 { - c.cleanUserPartialBlocks(ctx, partials, idx, userBucket, userLogger) - } - - // Upload the updated index to the storage. - if err := bucketindex.WriteIndex(ctx, c.bucketClient, userID, c.cfgProvider, idx); err != nil { - return err - } - - c.tenantBlocks.WithLabelValues(userID).Set(float64(len(idx.Blocks))) - c.tenantBlocksMarkedForDelete.WithLabelValues(userID).Set(float64(len(idx.BlockDeletionMarks))) - c.tenantBlocksMarkedForNoCompaction.WithLabelValues(userID).Set(float64(totalBlocksBlocksMarkedForNoCompaction)) - c.tenantBucketIndexLastUpdate.WithLabelValues(userID).SetToCurrentTime() - c.tenantPartialBlocks.WithLabelValues(userID).Set(float64(len(partials))) - - return nil -} - -// cleanUserPartialBlocks delete partial blocks which are safe to be deleted. The provided partials map -// is updated accordingly. -func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map[ulid.ULID]error, idx *bucketindex.Index, userBucket objstore.InstrumentedBucket, userLogger log.Logger) { - for blockID, blockErr := range partials { - // We can safely delete only blocks which are partial because the meta.json is missing. - if !errors.Is(blockErr, bucketindex.ErrBlockMetaNotFound) { - continue - } - - // We can safely delete only partial blocks with a deletion mark. - err := metadata.ReadMarker(ctx, userLogger, userBucket, blockID.String(), &metadata.DeletionMark{}) - if errors.Is(err, metadata.ErrorMarkerNotFound) { - continue - } - if err != nil { - level.Warn(userLogger).Log("msg", "error reading partial block deletion mark", "block", blockID, "err", err) - continue - } - - // Hard-delete partial blocks having a deletion mark, even if the deletion threshold has not - // been reached yet. - if err := block.Delete(ctx, userLogger, userBucket, blockID); err != nil { - c.blocksFailedTotal.Inc() - level.Warn(userLogger).Log("msg", "error deleting partial block marked for deletion", "block", blockID, "err", err) - continue - } - - // Remove the block from the bucket index too. - idx.RemoveBlock(blockID) - delete(partials, blockID) - - c.blocksCleanedTotal.Inc() - level.Info(userLogger).Log("msg", "deleted partial block marked for deletion", "block", blockID) - } -} - -// applyUserRetentionPeriod marks blocks for deletion which have aged past the retention period. -func (c *BlocksCleaner) applyUserRetentionPeriod(ctx context.Context, idx *bucketindex.Index, retention time.Duration, userBucket objstore.Bucket, userLogger log.Logger) { - // The retention period of zero is a special value indicating to never delete. - if retention <= 0 { - return - } - - level.Debug(userLogger).Log("msg", "applying retention", "retention", retention.String()) - blocks := listBlocksOutsideRetentionPeriod(idx, time.Now().Add(-retention)) - - // Attempt to mark all blocks. It is not critical if a marking fails, as - // the cleaner will retry applying the retention in its next cycle. - for _, b := range blocks { - level.Info(userLogger).Log("msg", "applied retention: marking block for deletion", "block", b.ID, "maxTime", b.MaxTime) - if err := block.MarkForDeletion(ctx, userLogger, userBucket, b.ID, fmt.Sprintf("block exceeding retention of %v", retention), c.blocksMarkedForDeletion); err != nil { - level.Warn(userLogger).Log("msg", "failed to mark block for deletion", "block", b.ID, "err", err) - } - } -} - -// listBlocksOutsideRetentionPeriod determines the blocks which have aged past -// the specified retention period, and are not already marked for deletion. -func listBlocksOutsideRetentionPeriod(idx *bucketindex.Index, threshold time.Time) (result bucketindex.Blocks) { - // Whilst re-marking a block is not harmful, it is wasteful and generates - // a warning log message. Use the block deletion marks already in-memory - // to prevent marking blocks already marked for deletion. - marked := make(map[ulid.ULID]struct{}, len(idx.BlockDeletionMarks)) - for _, d := range idx.BlockDeletionMarks { - marked[d.ID] = struct{}{} - } - - for _, b := range idx.Blocks { - maxTime := time.Unix(b.MaxTime/1000, 0) - if maxTime.Before(threshold) { - if _, isMarked := marked[b.ID]; !isMarked { - result = append(result, b) - } - } - } - - return -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go deleted file mode 100644 index 352dcee72..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go +++ /dev/null @@ -1,894 +0,0 @@ -package compactor - -import ( - "context" - "flag" - "fmt" - "hash/fnv" - "io/ioutil" - "math/rand" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/tsdb" - "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/compact" - "github.com/thanos-io/thanos/pkg/compact/downsample" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/backoff" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -const ( - // ringKey is the key under which we store the compactors ring in the KVStore. - ringKey = "compactor" - - blocksMarkedForDeletionName = "cortex_compactor_blocks_marked_for_deletion_total" - blocksMarkedForDeletionHelp = "Total number of blocks marked for deletion in compactor." -) - -var ( - errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s" - RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, nil) - - supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} - errInvalidShardingStrategy = errors.New("invalid sharding strategy") - errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") - - DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, _ prometheus.Gauge, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string) compact.Grouper { - return compact.NewDefaultGrouper( - logger, - bkt, - false, // Do not accept malformed indexes - true, // Enable vertical compaction - reg, - blocksMarkedForDeletion, - garbageCollectedBlocks, - blocksMarkedForNoCompaction, - metadata.NoneFunc) - } - - ShuffleShardingGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, remainingPlannedCompactions prometheus.Gauge, ring *ring.Ring, ringLifecycle *ring.Lifecycler, limits Limits, userID string) compact.Grouper { - return NewShuffleShardingGrouper( - logger, - bkt, - false, // Do not accept malformed indexes - true, // Enable vertical compaction - reg, - blocksMarkedForDeletion, - blocksMarkedForNoCompaction, - garbageCollectedBlocks, - remainingPlannedCompactions, - metadata.NoneFunc, - cfg, - ring, - ringLifecycle.Addr, - limits, - userID) - } - - DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, PlannerFactory, error) { - compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool(), nil) - if err != nil { - return nil, nil, err - } - - plannerFactory := func(logger log.Logger, cfg Config, noCompactionMarkFilter *compact.GatherNoCompactionMarkFilter) compact.Planner { - return compact.NewPlanner(logger, cfg.BlockRanges.ToMilliseconds(), noCompactionMarkFilter) - } - - return compactor, plannerFactory, nil - } - - ShuffleShardingBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, PlannerFactory, error) { - compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool(), nil) - if err != nil { - return nil, nil, err - } - - plannerFactory := func(logger log.Logger, cfg Config, noCompactionMarkFilter *compact.GatherNoCompactionMarkFilter) compact.Planner { - - return NewShuffleShardingPlanner(logger, cfg.BlockRanges.ToMilliseconds(), noCompactionMarkFilter.NoCompactMarkedBlocks) - } - return compactor, plannerFactory, nil - } -) - -// BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks. -type BlocksGrouperFactory func( - ctx context.Context, - cfg Config, - bkt objstore.Bucket, - logger log.Logger, - reg prometheus.Registerer, - blocksMarkedForDeletion prometheus.Counter, - blocksMarkedForNoCompact prometheus.Counter, - garbageCollectedBlocks prometheus.Counter, - remainingPlannedCompactions prometheus.Gauge, - ring *ring.Ring, - ringLifecycler *ring.Lifecycler, - limit Limits, - userID string, -) compact.Grouper - -// BlocksCompactorFactory builds and returns the compactor and planner to use to compact a tenant's blocks. -type BlocksCompactorFactory func( - ctx context.Context, - cfg Config, - logger log.Logger, - reg prometheus.Registerer, -) (compact.Compactor, PlannerFactory, error) - -type PlannerFactory func( - logger log.Logger, - cfg Config, - noCompactionMarkFilter *compact.GatherNoCompactionMarkFilter, -) compact.Planner - -// Limits defines limits used by the Compactor. -type Limits interface { - CompactorTenantShardSize(userID string) int -} - -// Config holds the Compactor config. -type Config struct { - BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"` - BlockSyncConcurrency int `yaml:"block_sync_concurrency"` - MetaSyncConcurrency int `yaml:"meta_sync_concurrency"` - ConsistencyDelay time.Duration `yaml:"consistency_delay"` - DataDir string `yaml:"data_dir"` - CompactionInterval time.Duration `yaml:"compaction_interval"` - CompactionRetries int `yaml:"compaction_retries"` - CompactionConcurrency int `yaml:"compaction_concurrency"` - CleanupInterval time.Duration `yaml:"cleanup_interval"` - CleanupConcurrency int `yaml:"cleanup_concurrency"` - DeletionDelay time.Duration `yaml:"deletion_delay"` - TenantCleanupDelay time.Duration `yaml:"tenant_cleanup_delay"` - SkipBlocksWithOutOfOrderChunksEnabled bool `yaml:"skip_blocks_with_out_of_order_chunks_enabled"` - - // Whether the migration of block deletion marks to the global markers location is enabled. - BlockDeletionMarksMigrationEnabled bool `yaml:"block_deletion_marks_migration_enabled"` - - EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` - DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` - - // Compactors sharding. - ShardingEnabled bool `yaml:"sharding_enabled"` - ShardingStrategy string `yaml:"sharding_strategy"` - ShardingRing RingConfig `yaml:"sharding_ring"` - - // No need to add options to customize the retry backoff, - // given the defaults should be fine, but allow to override - // it in tests. - retryMinBackoff time.Duration `yaml:"-"` - retryMaxBackoff time.Duration `yaml:"-"` - - // Allow downstream projects to customise the blocks compactor. - BlocksGrouperFactory BlocksGrouperFactory `yaml:"-"` - BlocksCompactorFactory BlocksCompactorFactory `yaml:"-"` -} - -// RegisterFlags registers the Compactor flags. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.ShardingRing.RegisterFlags(f) - - cfg.BlockRanges = cortex_tsdb.DurationList{2 * time.Hour, 12 * time.Hour, 24 * time.Hour} - cfg.retryMinBackoff = 10 * time.Second - cfg.retryMaxBackoff = time.Minute - - f.Var(&cfg.BlockRanges, "compactor.block-ranges", "List of compaction time ranges.") - f.DurationVar(&cfg.ConsistencyDelay, "compactor.consistency-delay", 0, fmt.Sprintf("Minimum age of fresh (non-compacted) blocks before they are being processed. Malformed blocks older than the maximum of consistency-delay and %s will be removed.", compact.PartialUploadThresholdAge)) - f.IntVar(&cfg.BlockSyncConcurrency, "compactor.block-sync-concurrency", 20, "Number of Go routines to use when syncing block index and chunks files from the long term storage.") - f.IntVar(&cfg.MetaSyncConcurrency, "compactor.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from the long term storage.") - f.StringVar(&cfg.DataDir, "compactor.data-dir", "./data", "Data directory in which to cache blocks and process compactions") - f.DurationVar(&cfg.CompactionInterval, "compactor.compaction-interval", time.Hour, "The frequency at which the compaction runs") - f.IntVar(&cfg.CompactionRetries, "compactor.compaction-retries", 3, "How many times to retry a failed compaction within a single compaction run.") - f.IntVar(&cfg.CompactionConcurrency, "compactor.compaction-concurrency", 1, "Max number of concurrent compactions running.") - f.DurationVar(&cfg.CleanupInterval, "compactor.cleanup-interval", 15*time.Minute, "How frequently compactor should run blocks cleanup and maintenance, as well as update the bucket index.") - f.IntVar(&cfg.CleanupConcurrency, "compactor.cleanup-concurrency", 20, "Max number of tenants for which blocks cleanup and maintenance should run concurrently.") - f.BoolVar(&cfg.ShardingEnabled, "compactor.sharding-enabled", false, "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.") - f.StringVar(&cfg.ShardingStrategy, "compactor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) - f.DurationVar(&cfg.DeletionDelay, "compactor.deletion-delay", 12*time.Hour, "Time before a block marked for deletion is deleted from bucket. "+ - "If not 0, blocks will be marked for deletion and compactor component will permanently delete blocks marked for deletion from the bucket. "+ - "If 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures.") - f.DurationVar(&cfg.TenantCleanupDelay, "compactor.tenant-cleanup-delay", 6*time.Hour, "For tenants marked for deletion, this is time between deleting of last block, and doing final cleanup (marker files, debug files) of the tenant.") - f.BoolVar(&cfg.BlockDeletionMarksMigrationEnabled, "compactor.block-deletion-marks-migration-enabled", false, "When enabled, at compactor startup the bucket will be scanned and all found deletion marks inside the block location will be copied to the markers global location too. This option can (and should) be safely disabled as soon as the compactor has successfully run at least once.") - f.BoolVar(&cfg.SkipBlocksWithOutOfOrderChunksEnabled, "compactor.skip-blocks-with-out-of-order-chunks-enabled", false, "When enabled, mark blocks containing index with out-of-order chunks for no compact instead of halting the compaction.") - - f.Var(&cfg.EnabledTenants, "compactor.enabled-tenants", "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.") - f.Var(&cfg.DisabledTenants, "compactor.disabled-tenants", "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.") -} - -func (cfg *Config) Validate(limits validation.Limits) error { - // Each block range period should be divisible by the previous one. - for i := 1; i < len(cfg.BlockRanges); i++ { - if cfg.BlockRanges[i]%cfg.BlockRanges[i-1] != 0 { - return errors.Errorf(errInvalidBlockRanges, cfg.BlockRanges[i].String(), cfg.BlockRanges[i-1].String()) - } - } - - // Make sure a valid sharding strategy is being used - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { - return errInvalidShardingStrategy - } - - if cfg.ShardingEnabled && cfg.ShardingStrategy == util.ShardingStrategyShuffle { - if limits.CompactorTenantShardSize <= 0 { - return errInvalidTenantShardSize - } - } - - return nil -} - -// ConfigProvider defines the per-tenant config provider for the Compactor. -type ConfigProvider interface { - bucket.TenantConfigProvider - CompactorBlocksRetentionPeriod(user string) time.Duration -} - -// Compactor is a multi-tenant TSDB blocks compactor based on Thanos. -type Compactor struct { - services.Service - - compactorCfg Config - storageCfg cortex_tsdb.BlocksStorageConfig - cfgProvider ConfigProvider - logger log.Logger - parentLogger log.Logger - registerer prometheus.Registerer - allowedTenants *util.AllowedTenants - limits Limits - - // Functions that creates bucket client, grouper, planner and compactor using the context. - // Useful for injecting mock objects from tests. - bucketClientFactory func(ctx context.Context) (objstore.Bucket, error) - blocksGrouperFactory BlocksGrouperFactory - blocksCompactorFactory BlocksCompactorFactory - - // Users scanner, used to discover users from the bucket. - usersScanner *cortex_tsdb.UsersScanner - - // Blocks cleaner is responsible to hard delete blocks marked for deletion. - blocksCleaner *BlocksCleaner - - // Underlying compactor used to compact TSDB blocks. - blocksCompactor compact.Compactor - - blocksPlannerFactory PlannerFactory - - // Client used to run operations on the bucket storing blocks. - bucketClient objstore.Bucket - - // Ring used for sharding compactions. - ringLifecycler *ring.Lifecycler - ring *ring.Ring - ringSubservices *services.Manager - ringSubservicesWatcher *services.FailureWatcher - - // Metrics. - compactionRunsStarted prometheus.Counter - compactionRunsCompleted prometheus.Counter - compactionRunsFailed prometheus.Counter - compactionRunsLastSuccess prometheus.Gauge - compactionRunDiscoveredTenants prometheus.Gauge - compactionRunSkippedTenants prometheus.Gauge - compactionRunSucceededTenants prometheus.Gauge - compactionRunFailedTenants prometheus.Gauge - compactionRunInterval prometheus.Gauge - blocksMarkedForDeletion prometheus.Counter - blocksMarkedForNoCompaction prometheus.Counter - garbageCollectedBlocks prometheus.Counter - remainingPlannedCompactions prometheus.Gauge - - // TSDB syncer metrics - syncerMetrics *syncerMetrics -} - -// NewCompactor makes a new Compactor. -func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, cfgProvider ConfigProvider, logger log.Logger, registerer prometheus.Registerer, limits Limits) (*Compactor, error) { - bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) { - return bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) - } - - blocksGrouperFactory := compactorCfg.BlocksGrouperFactory - if blocksGrouperFactory == nil { - if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { - blocksGrouperFactory = ShuffleShardingGrouperFactory - } else { - blocksGrouperFactory = DefaultBlocksGrouperFactory - } - } - - blocksCompactorFactory := compactorCfg.BlocksCompactorFactory - if blocksCompactorFactory == nil { - if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { - blocksCompactorFactory = ShuffleShardingBlocksCompactorFactory - } else { - blocksCompactorFactory = DefaultBlocksCompactorFactory - } - } - - cortexCompactor, err := newCompactor(compactorCfg, storageCfg, cfgProvider, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory, limits) - if err != nil { - return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") - } - - return cortexCompactor, nil -} - -func newCompactor( - compactorCfg Config, - storageCfg cortex_tsdb.BlocksStorageConfig, - cfgProvider ConfigProvider, - logger log.Logger, - registerer prometheus.Registerer, - bucketClientFactory func(ctx context.Context) (objstore.Bucket, error), - blocksGrouperFactory BlocksGrouperFactory, - blocksCompactorFactory BlocksCompactorFactory, - limits Limits, -) (*Compactor, error) { - var remainingPlannedCompactions prometheus.Gauge - if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { - remainingPlannedCompactions = promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_remaining_planned_compactions", - Help: "Total number of plans that remain to be compacted. Only available with shuffle-sharding strategy", - }) - } - c := &Compactor{ - compactorCfg: compactorCfg, - storageCfg: storageCfg, - cfgProvider: cfgProvider, - parentLogger: logger, - logger: log.With(logger, "component", "compactor"), - registerer: registerer, - syncerMetrics: newSyncerMetrics(registerer), - bucketClientFactory: bucketClientFactory, - blocksGrouperFactory: blocksGrouperFactory, - blocksCompactorFactory: blocksCompactorFactory, - allowedTenants: util.NewAllowedTenants(compactorCfg.EnabledTenants, compactorCfg.DisabledTenants), - - compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_runs_started_total", - Help: "Total number of compaction runs started.", - }), - compactionRunsCompleted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_runs_completed_total", - Help: "Total number of compaction runs successfully completed.", - }), - compactionRunsFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_runs_failed_total", - Help: "Total number of compaction runs failed.", - }), - compactionRunsLastSuccess: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_last_successful_run_timestamp_seconds", - Help: "Unix timestamp of the last successful compaction run.", - }), - compactionRunDiscoveredTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_tenants_discovered", - Help: "Number of tenants discovered during the current compaction run. Reset to 0 when compactor is idle.", - }), - compactionRunSkippedTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_tenants_skipped", - Help: "Number of tenants skipped during the current compaction run. Reset to 0 when compactor is idle.", - }), - compactionRunSucceededTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_tenants_processing_succeeded", - Help: "Number of tenants successfully processed during the current compaction run. Reset to 0 when compactor is idle.", - }), - compactionRunFailedTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_tenants_processing_failed", - Help: "Number of tenants failed processing during the current compaction run. Reset to 0 when compactor is idle.", - }), - compactionRunInterval: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_compaction_interval_seconds", - Help: "The configured interval on which compaction is run in seconds. Useful when compared to the last successful run metric to accurately detect multiple failed compaction runs.", - }), - blocksMarkedForDeletion: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: blocksMarkedForDeletionName, - Help: blocksMarkedForDeletionHelp, - ConstLabels: prometheus.Labels{"reason": "compaction"}, - }), - blocksMarkedForNoCompaction: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_blocks_marked_for_no_compaction_total", - Help: "Total number of blocks marked for no compact during a compaction run.", - }), - garbageCollectedBlocks: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_garbage_collected_blocks_total", - Help: "Total number of blocks marked for deletion by compactor.", - }), - remainingPlannedCompactions: remainingPlannedCompactions, - limits: limits, - } - - if len(compactorCfg.EnabledTenants) > 0 { - level.Info(c.logger).Log("msg", "compactor using enabled users", "enabled", strings.Join(compactorCfg.EnabledTenants, ", ")) - } - if len(compactorCfg.DisabledTenants) > 0 { - level.Info(c.logger).Log("msg", "compactor using disabled users", "disabled", strings.Join(compactorCfg.DisabledTenants, ", ")) - } - - c.Service = services.NewBasicService(c.starting, c.running, c.stopping) - - // The last successful compaction run metric is exposed as seconds since epoch, so we need to use seconds for this metric. - c.compactionRunInterval.Set(c.compactorCfg.CompactionInterval.Seconds()) - - return c, nil -} - -// Start the compactor. -func (c *Compactor) starting(ctx context.Context) error { - var err error - - // Create bucket client. - c.bucketClient, err = c.bucketClientFactory(ctx) - if err != nil { - return errors.Wrap(err, "failed to create bucket client") - } - - // Create blocks compactor dependencies. - c.blocksCompactor, c.blocksPlannerFactory, err = c.blocksCompactorFactory(ctx, c.compactorCfg, c.logger, c.registerer) - if err != nil { - return errors.Wrap(err, "failed to initialize compactor dependencies") - } - - // Wrap the bucket client to write block deletion marks in the global location too. - c.bucketClient = bucketindex.BucketWithGlobalMarkers(c.bucketClient) - - // Create the users scanner. - c.usersScanner = cortex_tsdb.NewUsersScanner(c.bucketClient, c.ownUserForCleanUp, c.parentLogger) - - // Create the blocks cleaner (service). - c.blocksCleaner = NewBlocksCleaner(BlocksCleanerConfig{ - DeletionDelay: c.compactorCfg.DeletionDelay, - CleanupInterval: util.DurationWithJitter(c.compactorCfg.CleanupInterval, 0.1), - CleanupConcurrency: c.compactorCfg.CleanupConcurrency, - BlockDeletionMarksMigrationEnabled: c.compactorCfg.BlockDeletionMarksMigrationEnabled, - TenantCleanupDelay: c.compactorCfg.TenantCleanupDelay, - }, c.bucketClient, c.usersScanner, c.cfgProvider, c.parentLogger, c.registerer) - - // Initialize the compactors ring if sharding is enabled. - if c.compactorCfg.ShardingEnabled { - lifecyclerCfg := c.compactorCfg.ShardingRing.ToLifecyclerConfig() - c.ringLifecycler, err = ring.NewLifecycler(lifecyclerCfg, ring.NewNoopFlushTransferer(), "compactor", ringKey, false, c.logger, prometheus.WrapRegistererWithPrefix("cortex_", c.registerer)) - if err != nil { - return errors.Wrap(err, "unable to initialize compactor ring lifecycler") - } - - c.ring, err = ring.New(lifecyclerCfg.RingConfig, "compactor", ringKey, c.logger, prometheus.WrapRegistererWithPrefix("cortex_", c.registerer)) - if err != nil { - return errors.Wrap(err, "unable to initialize compactor ring") - } - - c.ringSubservices, err = services.NewManager(c.ringLifecycler, c.ring) - if err == nil { - c.ringSubservicesWatcher = services.NewFailureWatcher() - c.ringSubservicesWatcher.WatchManager(c.ringSubservices) - - err = services.StartManagerAndAwaitHealthy(ctx, c.ringSubservices) - } - - if err != nil { - return errors.Wrap(err, "unable to start compactor ring dependencies") - } - - // If sharding is enabled we should wait until this instance is - // ACTIVE within the ring. This MUST be done before starting the - // any other component depending on the users scanner, because the - // users scanner depends on the ring (to check whether an user belongs - // to this shard or not). - level.Info(c.logger).Log("msg", "waiting until compactor is ACTIVE in the ring") - - ctxWithTimeout, cancel := context.WithTimeout(ctx, c.compactorCfg.ShardingRing.WaitActiveInstanceTimeout) - defer cancel() - if err := ring.WaitInstanceState(ctxWithTimeout, c.ring, c.ringLifecycler.ID, ring.ACTIVE); err != nil { - level.Error(c.logger).Log("msg", "compactor failed to become ACTIVE in the ring", "err", err) - return err - } - level.Info(c.logger).Log("msg", "compactor is ACTIVE in the ring") - - // In the event of a cluster cold start or scale up of 2+ compactor instances at the same - // time, we may end up in a situation where each new compactor instance starts at a slightly - // different time and thus each one starts with a different state of the ring. It's better - // to just wait the ring stability for a short time. - if c.compactorCfg.ShardingRing.WaitStabilityMinDuration > 0 { - minWaiting := c.compactorCfg.ShardingRing.WaitStabilityMinDuration - maxWaiting := c.compactorCfg.ShardingRing.WaitStabilityMaxDuration - - level.Info(c.logger).Log("msg", "waiting until compactor ring topology is stable", "min_waiting", minWaiting.String(), "max_waiting", maxWaiting.String()) - if err := ring.WaitRingStability(ctx, c.ring, RingOp, minWaiting, maxWaiting); err != nil { - level.Warn(c.logger).Log("msg", "compactor ring topology is not stable after the max waiting time, proceeding anyway") - } else { - level.Info(c.logger).Log("msg", "compactor ring topology is stable") - } - } - } - - // Ensure an initial cleanup occurred before starting the compactor. - if err := services.StartAndAwaitRunning(ctx, c.blocksCleaner); err != nil { - c.ringSubservices.StopAsync() - return errors.Wrap(err, "failed to start the blocks cleaner") - } - - return nil -} - -func (c *Compactor) stopping(_ error) error { - ctx := context.Background() - - services.StopAndAwaitTerminated(ctx, c.blocksCleaner) //nolint:errcheck - if c.ringSubservices != nil { - return services.StopManagerAndAwaitStopped(ctx, c.ringSubservices) - } - return nil -} - -func (c *Compactor) running(ctx context.Context) error { - // Run an initial compaction before starting the interval. - c.compactUsers(ctx) - - ticker := time.NewTicker(util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.05)) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - c.compactUsers(ctx) - case <-ctx.Done(): - return nil - case err := <-c.ringSubservicesWatcher.Chan(): - return errors.Wrap(err, "compactor subservice failed") - } - } -} - -func (c *Compactor) compactUsers(ctx context.Context) { - succeeded := false - compactionErrorCount := 0 - - c.compactionRunsStarted.Inc() - - defer func() { - if succeeded && compactionErrorCount == 0 { - c.compactionRunsCompleted.Inc() - c.compactionRunsLastSuccess.SetToCurrentTime() - } else { - c.compactionRunsFailed.Inc() - } - - // Reset progress metrics once done. - c.compactionRunDiscoveredTenants.Set(0) - c.compactionRunSkippedTenants.Set(0) - c.compactionRunSucceededTenants.Set(0) - c.compactionRunFailedTenants.Set(0) - }() - - level.Info(c.logger).Log("msg", "discovering users from bucket") - users, err := c.discoverUsersWithRetries(ctx) - if err != nil { - level.Error(c.logger).Log("msg", "failed to discover users from bucket", "err", err) - return - } - - level.Info(c.logger).Log("msg", "discovered users from bucket", "users", len(users)) - c.compactionRunDiscoveredTenants.Set(float64(len(users))) - - // When starting multiple compactor replicas nearly at the same time, running in a cluster with - // a large number of tenants, we may end up in a situation where the 1st user is compacted by - // multiple replicas at the same time. Shuffling users helps reduce the likelihood this will happen. - rand.Shuffle(len(users), func(i, j int) { - users[i], users[j] = users[j], users[i] - }) - - // Keep track of users owned by this shard, so that we can delete the local files for all other users. - ownedUsers := map[string]struct{}{} - for _, userID := range users { - // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). - if ctx.Err() != nil { - level.Info(c.logger).Log("msg", "interrupting compaction of user blocks", "err", err) - return - } - - // Ensure the user ID belongs to our shard. - if owned, err := c.ownUserForCompaction(userID); err != nil { - c.compactionRunSkippedTenants.Inc() - level.Warn(c.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err) - continue - } else if !owned { - c.compactionRunSkippedTenants.Inc() - level.Debug(c.logger).Log("msg", "skipping user because it is not owned by this shard", "user", userID) - continue - } - - ownedUsers[userID] = struct{}{} - - if markedForDeletion, err := cortex_tsdb.TenantDeletionMarkExists(ctx, c.bucketClient, userID); err != nil { - c.compactionRunSkippedTenants.Inc() - level.Warn(c.logger).Log("msg", "unable to check if user is marked for deletion", "user", userID, "err", err) - continue - } else if markedForDeletion { - c.compactionRunSkippedTenants.Inc() - level.Debug(c.logger).Log("msg", "skipping user because it is marked for deletion", "user", userID) - continue - } - - level.Info(c.logger).Log("msg", "starting compaction of user blocks", "user", userID) - - if err = c.compactUserWithRetries(ctx, userID); err != nil { - c.compactionRunFailedTenants.Inc() - compactionErrorCount++ - level.Error(c.logger).Log("msg", "failed to compact user blocks", "user", userID, "err", err) - continue - } - - c.compactionRunSucceededTenants.Inc() - level.Info(c.logger).Log("msg", "successfully compacted user blocks", "user", userID) - } - - // Delete local files for unowned tenants, if there are any. This cleans up - // leftover local files for tenants that belong to different compactors now, - // or have been deleted completely. - for userID := range c.listTenantsWithMetaSyncDirectories() { - if _, owned := ownedUsers[userID]; owned { - continue - } - - dir := c.metaSyncDirForUser(userID) - s, err := os.Stat(dir) - if err != nil { - if !os.IsNotExist(err) { - level.Warn(c.logger).Log("msg", "failed to stat local directory with user data", "dir", dir, "err", err) - } - continue - } - - if s.IsDir() { - err := os.RemoveAll(dir) - if err == nil { - level.Info(c.logger).Log("msg", "deleted directory for user not owned by this shard", "dir", dir) - } else { - level.Warn(c.logger).Log("msg", "failed to delete directory for user not owned by this shard", "dir", dir, "err", err) - } - } - } - - succeeded = true -} - -func (c *Compactor) compactUserWithRetries(ctx context.Context, userID string) error { - var lastErr error - - retries := backoff.New(ctx, backoff.Config{ - MinBackoff: c.compactorCfg.retryMinBackoff, - MaxBackoff: c.compactorCfg.retryMaxBackoff, - MaxRetries: c.compactorCfg.CompactionRetries, - }) - - for retries.Ongoing() { - lastErr = c.compactUser(ctx, userID) - if lastErr == nil { - return nil - } - - retries.Wait() - } - - return lastErr -} - -func (c *Compactor) compactUser(ctx context.Context, userID string) error { - bucket := bucket.NewUserBucketClient(userID, c.bucketClient, c.cfgProvider) - reg := prometheus.NewRegistry() - defer c.syncerMetrics.gatherThanosSyncerMetrics(reg) - - ulogger := util_log.WithUserID(userID, c.logger) - - // Filters out duplicate blocks that can be formed from two or more overlapping - // blocks that fully submatches the source blocks of the older blocks. - deduplicateBlocksFilter := block.NewDeduplicateFilter() - - // While fetching blocks, we filter out blocks that were marked for deletion by using IgnoreDeletionMarkFilter. - // No delay is used -- all blocks with deletion marker are ignored, and not considered for compaction. - ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter( - ulogger, - bucket, - 0, - c.compactorCfg.MetaSyncConcurrency) - - // Filters out blocks with no compaction maker; blocks can be marked as no compaction for reasons like - // out of order chunks or index file too big. - noCompactMarkerFilter := compact.NewGatherNoCompactionMarkFilter(ulogger, bucket, c.compactorCfg.MetaSyncConcurrency) - - fetcher, err := block.NewMetaFetcher( - ulogger, - c.compactorCfg.MetaSyncConcurrency, - bucket, - c.metaSyncDirForUser(userID), - reg, - // List of filters to apply (order matters). - []block.MetadataFilter{ - // Remove the ingester ID because we don't shard blocks anymore, while still - // honoring the shard ID if sharding was done in the past. - NewLabelRemoverFilter([]string{cortex_tsdb.IngesterIDExternalLabel}), - block.NewConsistencyDelayMetaFilter(ulogger, c.compactorCfg.ConsistencyDelay, reg), - ignoreDeletionMarkFilter, - deduplicateBlocksFilter, - noCompactMarkerFilter, - }, - nil, - ) - if err != nil { - return err - } - - syncer, err := compact.NewMetaSyncer( - ulogger, - reg, - bucket, - fetcher, - deduplicateBlocksFilter, - ignoreDeletionMarkFilter, - c.blocksMarkedForDeletion, - c.garbageCollectedBlocks, - c.compactorCfg.BlockSyncConcurrency, - ) - if err != nil { - return errors.Wrap(err, "failed to create syncer") - } - - compactor, err := compact.NewBucketCompactor( - ulogger, - syncer, - c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.blocksMarkedForNoCompaction, c.garbageCollectedBlocks, c.remainingPlannedCompactions, c.ring, c.ringLifecycler, c.limits, userID), - c.blocksPlannerFactory(ulogger, c.compactorCfg, noCompactMarkerFilter), - c.blocksCompactor, - path.Join(c.compactorCfg.DataDir, "compact"), - bucket, - c.compactorCfg.CompactionConcurrency, - c.compactorCfg.SkipBlocksWithOutOfOrderChunksEnabled, - ) - if err != nil { - return errors.Wrap(err, "failed to create bucket compactor") - } - - if err := compactor.Compact(ctx); err != nil { - return errors.Wrap(err, "compaction") - } - - return nil -} - -func (c *Compactor) discoverUsersWithRetries(ctx context.Context) ([]string, error) { - var lastErr error - - retries := backoff.New(ctx, backoff.Config{ - MinBackoff: c.compactorCfg.retryMinBackoff, - MaxBackoff: c.compactorCfg.retryMaxBackoff, - MaxRetries: c.compactorCfg.CompactionRetries, - }) - - for retries.Ongoing() { - var users []string - - users, lastErr = c.discoverUsers(ctx) - if lastErr == nil { - return users, nil - } - - retries.Wait() - } - - return nil, lastErr -} - -func (c *Compactor) discoverUsers(ctx context.Context) ([]string, error) { - var users []string - - err := c.bucketClient.Iter(ctx, "", func(entry string) error { - users = append(users, strings.TrimSuffix(entry, "/")) - return nil - }) - - return users, err -} - -func (c *Compactor) ownUserForCompaction(userID string) (bool, error) { - return c.ownUser(userID, false) -} - -func (c *Compactor) ownUserForCleanUp(userID string) (bool, error) { - return c.ownUser(userID, true) -} - -func (c *Compactor) ownUser(userID string, isCleanUp bool) (bool, error) { - if !c.allowedTenants.IsAllowed(userID) { - return false, nil - } - - // Always owned if sharding is disabled - if !c.compactorCfg.ShardingEnabled { - return true, nil - } - - // If we aren't cleaning up user blocks, and we are using shuffle-sharding, ownership is determined by a subring - // Cleanup should only be owned by a single compactor, as there could be race conditions during block deletion - if !isCleanUp && c.compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { - subRing := c.ring.ShuffleShard(userID, c.limits.CompactorTenantShardSize(userID)) - - rs, err := subRing.GetAllHealthy(RingOp) - if err != nil { - return false, err - } - - return rs.Includes(c.ringLifecycler.Addr), nil - } - - // Hash the user ID. - hasher := fnv.New32a() - _, _ = hasher.Write([]byte(userID)) - userHash := hasher.Sum32() - - // Check whether this compactor instance owns the user. - rs, err := c.ring.Get(userHash, RingOp, nil, nil, nil) - if err != nil { - return false, err - } - - if len(rs.Instances) != 1 { - return false, fmt.Errorf("unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Instances)) - } - - return rs.Instances[0].Addr == c.ringLifecycler.Addr, nil -} - -const compactorMetaPrefix = "compactor-meta-" - -// metaSyncDirForUser returns directory to store cached meta files. -// The fetcher stores cached metas in the "meta-syncer/" sub directory, -// but we prefix it with "compactor-meta-" in order to guarantee no clashing with -// the directory used by the Thanos Syncer, whatever is the user ID. -func (c *Compactor) metaSyncDirForUser(userID string) string { - return filepath.Join(c.compactorCfg.DataDir, compactorMetaPrefix+userID) -} - -// This function returns tenants with meta sync directories found on local disk. On error, it returns nil map. -func (c *Compactor) listTenantsWithMetaSyncDirectories() map[string]struct{} { - result := map[string]struct{}{} - - files, err := ioutil.ReadDir(c.compactorCfg.DataDir) - if err != nil { - return nil - } - - for _, f := range files { - if !f.IsDir() { - continue - } - - if !strings.HasPrefix(f.Name(), compactorMetaPrefix) { - continue - } - - result[f.Name()[len(compactorMetaPrefix):]] = struct{}{} - } - - return result -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go deleted file mode 100644 index 38ad9eb96..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go +++ /dev/null @@ -1,53 +0,0 @@ -package compactor - -import ( - "html/template" - "net/http" - - "github.com/go-kit/log/level" - - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -var ( - compactorStatusPageTemplate = template.Must(template.New("main").Parse(` - - - - - Cortex Compactor Ring - - -

Cortex Compactor Ring

-

{{ .Message }}

- - `)) -) - -func writeMessage(w http.ResponseWriter, message string) { - w.WriteHeader(http.StatusOK) - err := compactorStatusPageTemplate.Execute(w, struct { - Message string - }{Message: message}) - - if err != nil { - level.Error(util_log.Logger).Log("msg", "unable to serve compactor ring page", "err", err) - } -} - -func (c *Compactor) RingHandler(w http.ResponseWriter, req *http.Request) { - if !c.compactorCfg.ShardingEnabled { - writeMessage(w, "Compactor has no ring because sharding is disabled.") - return - } - - if c.State() != services.Running { - // we cannot read the ring before Compactor is in Running state, - // because that would lead to race condition. - writeMessage(w, "Compactor is not running yet.") - return - } - - c.ring.ServeHTTP(w, req) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go deleted file mode 100644 index c6ba61b29..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go +++ /dev/null @@ -1,107 +0,0 @@ -package compactor - -import ( - "flag" - "os" - "time" - - "github.com/go-kit/log/level" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// RingConfig masks the ring lifecycler config which contains -// many options not really required by the compactors ring. This config -// is used to strip down the config to the minimum, and avoid confusion -// to the user. -type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - - // Wait ring stability. - WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` - WaitStabilityMaxDuration time.Duration `yaml:"wait_stability_max_duration"` - - // Instance details - InstanceID string `yaml:"instance_id" doc:"hidden"` - InstanceInterfaceNames []string `yaml:"instance_interface_names"` - InstancePort int `yaml:"instance_port" doc:"hidden"` - InstanceAddr string `yaml:"instance_addr" doc:"hidden"` - - // Injected internally - ListenPort int `yaml:"-"` - - WaitActiveInstanceTimeout time.Duration `yaml:"wait_active_instance_timeout"` - - ObservePeriod time.Duration `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { - hostname, err := os.Hostname() - if err != nil { - level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) - os.Exit(1) - } - - // Ring flags - cfg.KVStore.RegisterFlagsWithPrefix("compactor.ring.", "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") - f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).") - - // Wait stability flags. - f.DurationVar(&cfg.WaitStabilityMinDuration, "compactor.ring.wait-stability-min-duration", time.Minute, "Minimum time to wait for ring stability at startup. 0 to disable.") - f.DurationVar(&cfg.WaitStabilityMaxDuration, "compactor.ring.wait-stability-max-duration", 5*time.Minute, "Maximum time to wait for ring stability at startup. If the compactor ring keeps changing after this period of time, the compactor will start anyway.") - - // Instance flags - cfg.InstanceInterfaceNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), "compactor.ring.instance-interface-names", "Name of network interface to read address from.") - f.StringVar(&cfg.InstanceAddr, "compactor.ring.instance-addr", "", "IP address to advertise in the ring.") - f.IntVar(&cfg.InstancePort, "compactor.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") - f.StringVar(&cfg.InstanceID, "compactor.ring.instance-id", hostname, "Instance ID to register in the ring.") - - // Timeout durations - f.DurationVar(&cfg.WaitActiveInstanceTimeout, "compactor.ring.wait-active-instance-timeout", 10*time.Minute, "Timeout for waiting on compactor to become ACTIVE in the ring.") -} - -// ToLifecyclerConfig returns a LifecyclerConfig based on the compactor -// ring config. -func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { - // We have to make sure that the ring.LifecyclerConfig and ring.Config - // defaults are preserved - lc := ring.LifecyclerConfig{} - rc := ring.Config{} - - flagext.DefaultValues(&lc) - flagext.DefaultValues(&rc) - - // Configure ring - rc.KVStore = cfg.KVStore - rc.HeartbeatTimeout = cfg.HeartbeatTimeout - rc.ReplicationFactor = 1 - - // Configure lifecycler - lc.RingConfig = rc - lc.RingConfig.SubringCacheDisabled = true - lc.ListenPort = cfg.ListenPort - lc.Addr = cfg.InstanceAddr - lc.Port = cfg.InstancePort - lc.ID = cfg.InstanceID - lc.InfNames = cfg.InstanceInterfaceNames - lc.UnregisterOnShutdown = true - lc.HeartbeatPeriod = cfg.HeartbeatPeriod - lc.ObservePeriod = cfg.ObservePeriod - lc.JoinAfter = 0 - lc.MinReadyDuration = 0 - lc.FinalSleep = 0 - - // We use a safe default instead of exposing to config option to the user - // in order to simplify the config. - lc.NumTokens = 512 - - return lc -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/label_remover_filter.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/label_remover_filter.go deleted file mode 100644 index 221201419..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/label_remover_filter.go +++ /dev/null @@ -1,29 +0,0 @@ -package compactor - -import ( - "context" - - "github.com/oklog/ulid" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/extprom" -) - -type LabelRemoverFilter struct { - labels []string -} - -// NewLabelRemoverFilter creates a LabelRemoverFilter. -func NewLabelRemoverFilter(labels []string) *LabelRemoverFilter { - return &LabelRemoverFilter{labels: labels} -} - -// Filter modifies external labels of existing blocks, removing given labels from the metadata of blocks that have it. -func (f *LabelRemoverFilter) Filter(_ context.Context, metas map[ulid.ULID]*metadata.Meta, _ *extprom.TxGaugeVec) error { - for _, meta := range metas { - for _, l := range f.labels { - delete(meta.Thanos.Labels, l) - } - } - - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_grouper.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_grouper.go deleted file mode 100644 index b6d25ee80..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_grouper.go +++ /dev/null @@ -1,458 +0,0 @@ -package compactor - -import ( - "fmt" - "hash/fnv" - "sort" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/model/labels" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/compact" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/ring" -) - -type ShuffleShardingGrouper struct { - logger log.Logger - bkt objstore.Bucket - acceptMalformedIndex bool - enableVerticalCompaction bool - reg prometheus.Registerer - blocksMarkedForDeletion prometheus.Counter - blocksMarkedForNoCompact prometheus.Counter - garbageCollectedBlocks prometheus.Counter - remainingPlannedCompactions prometheus.Gauge - hashFunc metadata.HashFunc - compactions *prometheus.CounterVec - compactionRunsStarted *prometheus.CounterVec - compactionRunsCompleted *prometheus.CounterVec - compactionFailures *prometheus.CounterVec - verticalCompactions *prometheus.CounterVec - compactorCfg Config - limits Limits - userID string - - ring ring.ReadRing - ringLifecyclerAddr string -} - -func NewShuffleShardingGrouper( - logger log.Logger, - bkt objstore.Bucket, - acceptMalformedIndex bool, - enableVerticalCompaction bool, - reg prometheus.Registerer, - blocksMarkedForDeletion prometheus.Counter, - blocksMarkedForNoCompact prometheus.Counter, - garbageCollectedBlocks prometheus.Counter, - remainingPlannedCompactions prometheus.Gauge, - hashFunc metadata.HashFunc, - compactorCfg Config, - ring ring.ReadRing, - ringLifecyclerAddr string, - limits Limits, - userID string, -) *ShuffleShardingGrouper { - if logger == nil { - logger = log.NewNopLogger() - } - - return &ShuffleShardingGrouper{ - logger: logger, - bkt: bkt, - acceptMalformedIndex: acceptMalformedIndex, - enableVerticalCompaction: enableVerticalCompaction, - reg: reg, - blocksMarkedForDeletion: blocksMarkedForDeletion, - blocksMarkedForNoCompact: blocksMarkedForNoCompact, - garbageCollectedBlocks: garbageCollectedBlocks, - remainingPlannedCompactions: remainingPlannedCompactions, - hashFunc: hashFunc, - // Metrics are copied from Thanos DefaultGrouper constructor - compactions: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_compact_group_compactions_total", - Help: "Total number of group compaction attempts that resulted in a new block.", - }, []string{"group"}), - compactionRunsStarted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_compact_group_compaction_runs_started_total", - Help: "Total number of group compaction attempts.", - }, []string{"group"}), - compactionRunsCompleted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_compact_group_compaction_runs_completed_total", - Help: "Total number of group completed compaction runs. This also includes compactor group runs that resulted with no compaction.", - }, []string{"group"}), - compactionFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_compact_group_compactions_failures_total", - Help: "Total number of failed group compactions.", - }, []string{"group"}), - verticalCompactions: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_compact_group_vertical_compactions_total", - Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.", - }, []string{"group"}), - compactorCfg: compactorCfg, - ring: ring, - ringLifecyclerAddr: ringLifecyclerAddr, - limits: limits, - userID: userID, - } -} - -// Groups function modified from https://github.com/cortexproject/cortex/pull/2616 -func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*compact.Group, err error) { - // First of all we have to group blocks using the Thanos default - // grouping (based on downsample resolution + external labels). - mainGroups := map[string][]*metadata.Meta{} - for _, b := range blocks { - key := compact.DefaultGroupKey(b.Thanos) - mainGroups[key] = append(mainGroups[key], b) - } - - // For each group, we have to further split it into set of blocks - // which we can parallelly compact. - var outGroups []*compact.Group - - // Check if this compactor is on the subring. - // If the compactor is not on the subring when using the userID as a identifier - // no plans generated below will be owned by the compactor so we can just return an empty array - // as there will be no planned groups - onSubring, err := g.checkSubringForCompactor() - if err != nil { - return nil, errors.Wrap(err, "unable to check sub-ring for compactor ownership") - } - if !onSubring { - level.Debug(g.logger).Log("msg", "compactor is not on the current sub-ring skipping user", "user", g.userID) - return outGroups, nil - } - // Metrics for the remaining planned compactions - var remainingCompactions = 0. - defer func() { g.remainingPlannedCompactions.Set(remainingCompactions) }() - - for _, mainBlocks := range mainGroups { - for _, group := range groupBlocksByCompactableRanges(mainBlocks, g.compactorCfg.BlockRanges.ToMilliseconds()) { - // Nothing to do if we don't have at least 2 blocks. - if len(group.blocks) < 2 { - continue - } - - groupHash := hashGroup(g.userID, group.rangeStart, group.rangeEnd) - - if owned, err := g.ownGroup(groupHash); err != nil { - level.Warn(g.logger).Log("msg", "unable to check if user is owned by this shard", "group hash", groupHash, "err", err, "group", group.String()) - continue - } else if !owned { - level.Info(g.logger).Log("msg", "skipping group because it is not owned by this shard", "group_hash", groupHash) - continue - } - - remainingCompactions++ - groupKey := fmt.Sprintf("%v%s", groupHash, compact.DefaultGroupKey(group.blocks[0].Thanos)) - - level.Info(g.logger).Log("msg", "found compactable group for user", "group_hash", groupHash, "group", group.String()) - - // All the blocks within the same group have the same downsample - // resolution and external labels. - resolution := group.blocks[0].Thanos.Downsample.Resolution - externalLabels := labels.FromMap(group.blocks[0].Thanos.Labels) - - thanosGroup, err := compact.NewGroup( - log.With(g.logger, "groupKey", groupKey, "rangeStart", group.rangeStartTime().String(), "rangeEnd", group.rangeEndTime().String(), "externalLabels", externalLabels, "downsampleResolution", resolution), - g.bkt, - groupKey, - externalLabels, - resolution, - false, // No malformed index. - true, // Enable vertical compaction. - g.compactions.WithLabelValues(groupKey), - g.compactionRunsStarted.WithLabelValues(groupKey), - g.compactionRunsCompleted.WithLabelValues(groupKey), - g.compactionFailures.WithLabelValues(groupKey), - g.verticalCompactions.WithLabelValues(groupKey), - g.garbageCollectedBlocks, - g.blocksMarkedForDeletion, - g.blocksMarkedForNoCompact, - g.hashFunc, - ) - if err != nil { - return nil, errors.Wrap(err, "create compaction group") - } - - for _, m := range group.blocks { - if err := thanosGroup.AppendMeta(m); err != nil { - return nil, errors.Wrap(err, "add block to compaction group") - } - } - - outGroups = append(outGroups, thanosGroup) - } - } - - // Ensure groups are sorted by smallest range, oldest min time first. The rationale - // is that we wanna favor smaller ranges first (ie. to deduplicate samples sooner - // than later) and older ones are more likely to be "complete" (no missing block still - // to be uploaded). - sort.SliceStable(outGroups, func(i, j int) bool { - iLength := outGroups[i].MaxTime() - outGroups[i].MinTime() - jLength := outGroups[j].MaxTime() - outGroups[j].MinTime() - - if iLength != jLength { - return iLength < jLength - } - if outGroups[i].MinTime() != outGroups[j].MinTime() { - return outGroups[i].MinTime() < outGroups[j].MinTime() - } - - // Guarantee stable sort for tests. - return outGroups[i].Key() < outGroups[j].Key() - }) - - return outGroups, nil -} - -// Check whether this compactor instance owns the group. -func (g *ShuffleShardingGrouper) ownGroup(groupHash uint32) (bool, error) { - subRing := g.ring.ShuffleShard(g.userID, g.limits.CompactorTenantShardSize(g.userID)) - - rs, err := subRing.Get(groupHash, RingOp, nil, nil, nil) - if err != nil { - return false, err - } - - if len(rs.Instances) != 1 { - return false, fmt.Errorf("unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Instances)) - } - - return rs.Instances[0].Addr == g.ringLifecyclerAddr, nil -} - -// Check whether this compactor exists on the subring based on user ID -func (g *ShuffleShardingGrouper) checkSubringForCompactor() (bool, error) { - subRing := g.ring.ShuffleShard(g.userID, g.limits.CompactorTenantShardSize(g.userID)) - - rs, err := subRing.GetAllHealthy(RingOp) - if err != nil { - return false, err - } - - return rs.Includes(g.ringLifecyclerAddr), nil -} - -// Get the hash of a group based on the UserID, and the starting and ending time of the group's range. -func hashGroup(userID string, rangeStart int64, rangeEnd int64) uint32 { - groupString := fmt.Sprintf("%v%v%v", userID, rangeStart, rangeEnd) - groupHasher := fnv.New32a() - // Hasher never returns err. - _, _ = groupHasher.Write([]byte(groupString)) - groupHash := groupHasher.Sum32() - - return groupHash -} - -// blocksGroup struct and functions copied and adjusted from https://github.com/cortexproject/cortex/pull/2616 -type blocksGroup struct { - rangeStart int64 // Included. - rangeEnd int64 // Excluded. - blocks []*metadata.Meta - key string -} - -// overlaps returns whether the group range overlaps with the input group. -func (g blocksGroup) overlaps(other blocksGroup) bool { - if g.rangeStart >= other.rangeEnd || other.rangeStart >= g.rangeEnd { - return false - } - - return true -} - -func (g blocksGroup) rangeStartTime() time.Time { - return time.Unix(0, g.rangeStart*int64(time.Millisecond)).UTC() -} - -func (g blocksGroup) rangeEndTime() time.Time { - return time.Unix(0, g.rangeEnd*int64(time.Millisecond)).UTC() -} - -func (g blocksGroup) String() string { - out := strings.Builder{} - out.WriteString(fmt.Sprintf("Group range start: %d, range end: %d, key %v, blocks: ", g.rangeStart, g.rangeEnd, g.key)) - - for i, b := range g.blocks { - if i > 0 { - out.WriteString(", ") - } - - minT := time.Unix(0, b.MinTime*int64(time.Millisecond)).UTC() - maxT := time.Unix(0, b.MaxTime*int64(time.Millisecond)).UTC() - out.WriteString(fmt.Sprintf("%s (min time: %s, max time: %s)", b.ULID.String(), minT.String(), maxT.String())) - } - - return out.String() -} - -func (g blocksGroup) rangeLength() int64 { - return g.rangeEnd - g.rangeStart -} - -// minTime returns the MinTime across all blocks in the group. -func (g blocksGroup) minTime() int64 { - // Blocks are expected to be sorted by MinTime. - return g.blocks[0].MinTime -} - -// maxTime returns the MaxTime across all blocks in the group. -func (g blocksGroup) maxTime() int64 { - max := g.blocks[0].MaxTime - - for _, b := range g.blocks[1:] { - if b.MaxTime > max { - max = b.MaxTime - } - } - - return max -} - -// groupBlocksByCompactableRanges groups input blocks by compactable ranges, giving preference -// to smaller ranges. If a smaller range contains more than 1 block (and thus it should -// be compacted), the larger range block group is not generated until each of its -// smaller ranges have 1 block each at most. -func groupBlocksByCompactableRanges(blocks []*metadata.Meta, ranges []int64) []blocksGroup { - if len(blocks) == 0 { - return nil - } - - // Sort blocks by min time. - sortMetasByMinTime(blocks) - - var groups []blocksGroup - - for _, tr := range ranges { - nextGroup: - for _, group := range groupBlocksByRange(blocks, tr) { - - // Exclude groups with a single block, because no compaction is required. - if len(group.blocks) < 2 { - continue - } - - // Ensure this group's range does not overlap with any group already scheduled - // for compaction by a smaller range, because we need to guarantee that smaller ranges - // are compacted first. - for _, c := range groups { - if group.overlaps(c) { - continue nextGroup - } - } - - groups = append(groups, group) - } - } - - // Ensure we don't compact the most recent blocks prematurely when another one of - // the same size still fits in the range. To do it, we consider valid a group only - // if it's before the most recent block or if it fully covers the range. - highestMinTime := blocks[len(blocks)-1].MinTime - for idx := 0; idx < len(groups); { - group := groups[idx] - - // If the group covers a range before the most recent block, it's fine. - if group.rangeEnd <= highestMinTime { - idx++ - continue - } - - // If the group covers the full range, it's fine. - if group.maxTime()-group.minTime() == group.rangeLength() { - idx++ - continue - } - - // If the group's maxTime is after 1 block range, we can compact assuming that - // all the required blocks have already been uploaded. - if int64(ulid.Now()) > group.maxTime()+group.rangeLength() { - idx++ - continue - } - - // We hit into a group which would compact recent blocks prematurely, - // so we need to filter it out. - - groups = append(groups[:idx], groups[idx+1:]...) - } - - return groups -} - -// groupBlocksByRange splits the blocks by the time range. The range sequence starts at 0. -// Input blocks are expected to be sorted by MinTime. -// -// For example, if we have blocks [0-10, 10-20, 50-60, 90-100] and the split range tr is 30 -// it returns [0-10, 10-20], [50-60], [90-100]. -func groupBlocksByRange(blocks []*metadata.Meta, tr int64) []blocksGroup { - var ret []blocksGroup - - for i := 0; i < len(blocks); { - var ( - group blocksGroup - m = blocks[i] - ) - - group.rangeStart = getRangeStart(m, tr) - group.rangeEnd = group.rangeStart + tr - - // Skip blocks that don't fall into the range. This can happen via mis-alignment or - // by being the multiple of the intended range. - if m.MaxTime > group.rangeEnd { - i++ - continue - } - - // Add all blocks to the current group that are within [t0, t0+tr]. - for ; i < len(blocks); i++ { - // If the block does not start within this group, then we should break the iteration - // and move it to the next group. - if blocks[i].MinTime >= group.rangeEnd { - break - } - - // If the block doesn't fall into this group, but it started within this group then it - // means it spans across multiple ranges and we should skip it. - if blocks[i].MaxTime > group.rangeEnd { - continue - } - - group.blocks = append(group.blocks, blocks[i]) - } - - if len(group.blocks) > 0 { - ret = append(ret, group) - } - } - - return ret -} - -func getRangeStart(m *metadata.Meta, tr int64) int64 { - // Compute start of aligned time range of size tr closest to the current block's start. - // This code has been copied from TSDB. - if m.MinTime >= 0 { - return tr * (m.MinTime / tr) - } - - return tr * ((m.MinTime - tr + 1) / tr) -} - -func sortMetasByMinTime(metas []*metadata.Meta) { - sort.Slice(metas, func(i, j int) bool { - return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime - }) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_planner.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_planner.go deleted file mode 100644 index 4c38ff598..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/shuffle_sharding_planner.go +++ /dev/null @@ -1,54 +0,0 @@ -package compactor - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/oklog/ulid" - "github.com/thanos-io/thanos/pkg/block/metadata" -) - -type ShuffleShardingPlanner struct { - logger log.Logger - ranges []int64 - noCompBlocksFunc func() map[ulid.ULID]*metadata.NoCompactMark -} - -func NewShuffleShardingPlanner(logger log.Logger, ranges []int64, noCompBlocksFunc func() map[ulid.ULID]*metadata.NoCompactMark) *ShuffleShardingPlanner { - return &ShuffleShardingPlanner{ - logger: logger, - ranges: ranges, - noCompBlocksFunc: noCompBlocksFunc, - } -} - -func (p *ShuffleShardingPlanner) Plan(_ context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { - // Ensure all blocks fits within the largest range. This is a double check - // to ensure there's no bug in the previous blocks grouping, given this Plan() - // is just a pass-through. - // Modifed from https://github.com/cortexproject/cortex/pull/2616/files#diff-e3051fc530c48bb276ba958dd8fadc684e546bd7964e6bc75cef9a86ef8df344R28-R63 - largestRange := p.ranges[len(p.ranges)-1] - rangeStart := getRangeStart(metasByMinTime[0], largestRange) - rangeEnd := rangeStart + largestRange - noCompactMarked := p.noCompBlocksFunc() - resultMetas := make([]*metadata.Meta, 0, len(metasByMinTime)) - - for _, b := range metasByMinTime { - if _, excluded := noCompactMarked[b.ULID]; excluded { - continue - } - - if b.MinTime < rangeStart || b.MaxTime > rangeEnd { - return nil, fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", b.ULID.String(), b.MinTime, b.MaxTime, rangeStart, rangeEnd) - } - - resultMetas = append(resultMetas, b) - } - - if len(resultMetas) < 2 { - return nil, nil - } - - return resultMetas, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go deleted file mode 100644 index c17177927..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go +++ /dev/null @@ -1,124 +0,0 @@ -package compactor - -import ( - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// Copied from Thanos, pkg/compact/compact.go. -// Here we aggregate metrics from all finished syncers. -type syncerMetrics struct { - metaSync prometheus.Counter - metaSyncFailures prometheus.Counter - metaSyncDuration *util.HistogramDataCollector // was prometheus.Histogram before - metaSyncConsistencyDelay prometheus.Gauge - garbageCollections prometheus.Counter - garbageCollectionFailures prometheus.Counter - garbageCollectionDuration *util.HistogramDataCollector // was prometheus.Histogram before - compactions prometheus.Counter - compactionRunsStarted prometheus.Counter - compactionRunsCompleted prometheus.Counter - compactionFailures prometheus.Counter - verticalCompactions prometheus.Counter -} - -// Copied (and modified with Cortex prefix) from Thanos, pkg/compact/compact.go -// We also ignore "group" label, since we only use a single group. -func newSyncerMetrics(reg prometheus.Registerer) *syncerMetrics { - var m syncerMetrics - - m.metaSync = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_meta_syncs_total", - Help: "Total blocks metadata synchronization attempts.", - }) - m.metaSyncFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_meta_sync_failures_total", - Help: "Total blocks metadata synchronization failures.", - }) - m.metaSyncDuration = util.NewHistogramDataCollector(prometheus.NewDesc( - "cortex_compactor_meta_sync_duration_seconds", - "Duration of the blocks metadata synchronization in seconds.", - nil, nil)) - m.metaSyncConsistencyDelay = promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_compactor_meta_sync_consistency_delay_seconds", - Help: "Configured consistency delay in seconds.", - }) - - m.garbageCollections = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_garbage_collection_total", - Help: "Total number of garbage collection operations.", - }) - m.garbageCollectionFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_garbage_collection_failures_total", - Help: "Total number of failed garbage collection operations.", - }) - m.garbageCollectionDuration = util.NewHistogramDataCollector(prometheus.NewDesc( - "cortex_compactor_garbage_collection_duration_seconds", - "Time it took to perform garbage collection iteration.", - nil, nil)) - - m.compactions = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_group_compactions_total", - Help: "Total number of group compaction attempts that resulted in a new block.", - }) - m.compactionRunsStarted = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_group_compaction_runs_started_total", - Help: "Total number of group compaction attempts.", - }) - m.compactionRunsCompleted = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_group_compaction_runs_completed_total", - Help: "Total number of group completed compaction runs. This also includes compactor group runs that resulted with no compaction.", - }) - m.compactionFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_group_compactions_failures_total", - Help: "Total number of failed group compactions.", - }) - m.verticalCompactions = promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_group_vertical_compactions_total", - Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.", - }) - - if reg != nil { - reg.MustRegister(m.metaSyncDuration, m.garbageCollectionDuration) - } - - return &m -} - -func (m *syncerMetrics) gatherThanosSyncerMetrics(reg *prometheus.Registry) { - if m == nil { - return - } - - mf, err := reg.Gather() - if err != nil { - level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) - return - } - - mfm, err := util.NewMetricFamilyMap(mf) - if err != nil { - level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) - return - } - - m.metaSync.Add(mfm.SumCounters("blocks_meta_syncs_total")) - m.metaSyncFailures.Add(mfm.SumCounters("blocks_meta_sync_failures_total")) - m.metaSyncDuration.Add(mfm.SumHistograms("blocks_meta_sync_duration_seconds")) - m.metaSyncConsistencyDelay.Set(mfm.MaxGauges("consistency_delay_seconds")) - - m.garbageCollections.Add(mfm.SumCounters("thanos_compact_garbage_collection_total")) - m.garbageCollectionFailures.Add(mfm.SumCounters("thanos_compact_garbage_collection_failures_total")) - m.garbageCollectionDuration.Add(mfm.SumHistograms("thanos_compact_garbage_collection_duration_seconds")) - - // These metrics have "group" label, but we sum them all together. - m.compactions.Add(mfm.SumCounters("thanos_compact_group_compactions_total")) - m.compactionRunsStarted.Add(mfm.SumCounters("thanos_compact_group_compaction_runs_started_total")) - m.compactionRunsCompleted.Add(mfm.SumCounters("thanos_compact_group_compaction_runs_completed_total")) - m.compactionFailures.Add(mfm.SumCounters("thanos_compact_group_compactions_failures_total")) - m.verticalCompactions.Add(mfm.SumCounters("thanos_compact_group_vertical_compactions_total")) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go deleted file mode 100644 index 2cb8d147d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go +++ /dev/null @@ -1,367 +0,0 @@ -package api - -import ( - "database/sql" - "encoding/json" - "errors" - "flag" - "fmt" - "html/template" - "io/ioutil" - "mime" - "net/http" - "strconv" - "strings" - - "gopkg.in/yaml.v2" - - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - amconfig "github.com/prometheus/alertmanager/config" - amtemplate "github.com/prometheus/alertmanager/template" - - "github.com/cortexproject/cortex/pkg/configs/db" - "github.com/cortexproject/cortex/pkg/configs/userconfig" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -var ( - ErrEmailNotificationsAreDisabled = errors.New("email notifications are disabled") - ErrWebhookNotificationsAreDisabled = errors.New("webhook notifications are disabled") -) - -// Config configures Configs API -type Config struct { - Notifications NotificationsConfig `yaml:"notifications"` -} - -// NotificationsConfig configures Alertmanager notifications method. -type NotificationsConfig struct { - DisableEmail bool `yaml:"disable_email"` - DisableWebHook bool `yaml:"disable_webhook"` -} - -// RegisterFlags adds the flags required to configure this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.Notifications.DisableEmail, "configs.notifications.disable-email", false, "Disable Email notifications for Alertmanager.") - f.BoolVar(&cfg.Notifications.DisableWebHook, "configs.notifications.disable-webhook", false, "Disable WebHook notifications for Alertmanager.") -} - -// API implements the configs api. -type API struct { - http.Handler - db db.DB - cfg Config -} - -// New creates a new API -func New(database db.DB, cfg Config) *API { - a := &API{ - db: database, - cfg: cfg, - } - r := mux.NewRouter() - a.RegisterRoutes(r) - a.Handler = r - return a -} - -func (a *API) admin(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "text/html") - fmt.Fprintf(w, ` - - - configs :: configuration service - -

configs :: configuration service

- - -`) -} - -// RegisterRoutes registers the configs API HTTP routes with the provided Router. -func (a *API) RegisterRoutes(r *mux.Router) { - for _, route := range []struct { - name, method, path string - handler http.HandlerFunc - }{ - {"root", "GET", "/", a.admin}, - // Dedicated APIs for updating rules config. In the future, these *must* - // be used. - {"get_rules", "GET", "/api/prom/configs/rules", a.getConfig}, - {"set_rules", "POST", "/api/prom/configs/rules", a.setConfig}, - {"get_templates", "GET", "/api/prom/configs/templates", a.getConfig}, - {"set_templates", "POST", "/api/prom/configs/templates", a.setConfig}, - {"get_alertmanager_config", "GET", "/api/prom/configs/alertmanager", a.getConfig}, - {"set_alertmanager_config", "POST", "/api/prom/configs/alertmanager", a.setConfig}, - {"validate_alertmanager_config", "POST", "/api/prom/configs/alertmanager/validate", a.validateAlertmanagerConfig}, - {"deactivate_config", "DELETE", "/api/prom/configs/deactivate", a.deactivateConfig}, - {"restore_config", "POST", "/api/prom/configs/restore", a.restoreConfig}, - // Internal APIs. - {"private_get_rules", "GET", "/private/api/prom/configs/rules", a.getConfigs}, - {"private_get_alertmanager_config", "GET", "/private/api/prom/configs/alertmanager", a.getConfigs}, - } { - r.Handle(route.path, route.handler).Methods(route.method).Name(route.name) - } -} - -// getConfig returns the request configuration. -func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) - if err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - logger := util_log.WithContext(r.Context(), util_log.Logger) - - cfg, err := a.db.GetConfig(r.Context(), userID) - if err == sql.ErrNoRows { - http.Error(w, "No configuration", http.StatusNotFound) - return - } else if err != nil { - // XXX: Untested - level.Error(logger).Log("msg", "error getting config", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - switch parseConfigFormat(r.Header.Get("Accept"), FormatJSON) { - case FormatJSON: - w.Header().Set("Content-Type", "application/json") - err = json.NewEncoder(w).Encode(cfg) - case FormatYAML: - w.Header().Set("Content-Type", "application/yaml") - err = yaml.NewEncoder(w).Encode(cfg) - default: - // should never reach this point - level.Error(logger).Log("msg", "unexpected error detecting the config format") - http.Error(w, err.Error(), http.StatusInternalServerError) - } - if err != nil { - // XXX: Untested - level.Error(logger).Log("msg", "error encoding config", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - -func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) - if err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - logger := util_log.WithContext(r.Context(), util_log.Logger) - - var cfg userconfig.Config - switch parseConfigFormat(r.Header.Get("Content-Type"), FormatJSON) { - case FormatJSON: - if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { - // XXX: Untested - level.Error(logger).Log("msg", "error decoding json body", "err", err) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - case FormatYAML: - if err := yaml.NewDecoder(r.Body).Decode(&cfg); err != nil { - // XXX: Untested - level.Error(logger).Log("msg", "error decoding yaml body", "err", err) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - default: - // should never reach this point - level.Error(logger).Log("msg", "unexpected error detecting the config format") - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if err := validateAlertmanagerConfig(cfg.AlertmanagerConfig, a.cfg.Notifications); err != nil && cfg.AlertmanagerConfig != "" { - level.Error(logger).Log("msg", "invalid Alertmanager config", "err", err) - http.Error(w, fmt.Sprintf("Invalid Alertmanager config: %v", err), http.StatusBadRequest) - return - } - if err := validateRulesFiles(cfg); err != nil { - level.Error(logger).Log("msg", "invalid rules", "err", err) - http.Error(w, fmt.Sprintf("Invalid rules: %v", err), http.StatusBadRequest) - return - } - if err := validateTemplateFiles(cfg); err != nil { - level.Error(logger).Log("msg", "invalid templates", "err", err) - http.Error(w, fmt.Sprintf("Invalid templates: %v", err), http.StatusBadRequest) - return - } - if err := a.db.SetConfig(r.Context(), userID, cfg); err != nil { - // XXX: Untested - level.Error(logger).Log("msg", "error storing config", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) -} - -func (a *API) validateAlertmanagerConfig(w http.ResponseWriter, r *http.Request) { - logger := util_log.WithContext(r.Context(), util_log.Logger) - cfg, err := ioutil.ReadAll(r.Body) - if err != nil { - level.Error(logger).Log("msg", "error reading request body", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if err = validateAlertmanagerConfig(string(cfg), a.cfg.Notifications); err != nil { - w.WriteHeader(http.StatusBadRequest) - util.WriteJSONResponse(w, map[string]string{ - "status": "error", - "error": err.Error(), - }) - return - } - - util.WriteJSONResponse(w, map[string]string{ - "status": "success", - }) -} - -func validateAlertmanagerConfig(cfg string, noCfg NotificationsConfig) error { - amCfg, err := amconfig.Load(cfg) - if err != nil { - return err - } - - for _, recv := range amCfg.Receivers { - if noCfg.DisableEmail && len(recv.EmailConfigs) > 0 { - return ErrEmailNotificationsAreDisabled - } - if noCfg.DisableWebHook && len(recv.WebhookConfigs) > 0 { - return ErrWebhookNotificationsAreDisabled - } - } - - return nil -} - -func validateRulesFiles(c userconfig.Config) error { - _, err := c.RulesConfig.Parse() - return err -} - -func validateTemplateFiles(c userconfig.Config) error { - for fn, content := range c.TemplateFiles { - if _, err := template.New(fn).Funcs(template.FuncMap(amtemplate.DefaultFuncs)).Parse(content); err != nil { - return err - } - } - - return nil -} - -// ConfigsView renders multiple configurations, mapping userID to userconfig.View. -// Exposed only for tests. -type ConfigsView struct { - Configs map[string]userconfig.View `json:"configs"` -} - -func (a *API) getConfigs(w http.ResponseWriter, r *http.Request) { - var cfgs map[string]userconfig.View - var cfgErr error - logger := util_log.WithContext(r.Context(), util_log.Logger) - rawSince := r.FormValue("since") - if rawSince == "" { - cfgs, cfgErr = a.db.GetAllConfigs(r.Context()) - } else { - since, err := strconv.ParseUint(rawSince, 10, 0) - if err != nil { - level.Info(logger).Log("msg", "invalid config ID", "err", err) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - cfgs, cfgErr = a.db.GetConfigs(r.Context(), userconfig.ID(since)) - } - - if cfgErr != nil { - // XXX: Untested - level.Error(logger).Log("msg", "error getting configs", "err", cfgErr) - http.Error(w, cfgErr.Error(), http.StatusInternalServerError) - return - } - - view := ConfigsView{Configs: cfgs} - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(view); err != nil { - // XXX: Untested - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) - if err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - logger := util_log.WithContext(r.Context(), util_log.Logger) - - if err := a.db.DeactivateConfig(r.Context(), userID); err != nil { - if err == sql.ErrNoRows { - level.Info(logger).Log("msg", "deactivate config - no configuration", "userID", userID) - http.Error(w, "No configuration", http.StatusNotFound) - return - } - level.Error(logger).Log("msg", "error deactivating config", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - level.Info(logger).Log("msg", "config deactivated", "userID", userID) - w.WriteHeader(http.StatusOK) -} - -func (a *API) restoreConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) - if err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - logger := util_log.WithContext(r.Context(), util_log.Logger) - - if err := a.db.RestoreConfig(r.Context(), userID); err != nil { - if err == sql.ErrNoRows { - level.Info(logger).Log("msg", "restore config - no configuration", "userID", userID) - http.Error(w, "No configuration", http.StatusNotFound) - return - } - level.Error(logger).Log("msg", "error restoring config", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - level.Info(logger).Log("msg", "config restored", "userID", userID) - w.WriteHeader(http.StatusOK) -} - -const ( - FormatInvalid = "invalid" - FormatJSON = "json" - FormatYAML = "yaml" -) - -func parseConfigFormat(v string, defaultFormat string) string { - if v == "" { - return defaultFormat - } - parts := strings.Split(v, ",") - for _, part := range parts { - mimeType, _, err := mime.ParseMediaType(part) - if err != nil { - continue - } - switch mimeType { - case "application/json": - return FormatJSON - case "text/yaml", "text/x-yaml", "application/yaml", "application/x-yaml": - return FormatYAML - } - } - return defaultFormat -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go deleted file mode 100644 index 1903098d6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go +++ /dev/null @@ -1,184 +0,0 @@ -package client - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "flag" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/version" - "github.com/weaveworks/common/instrument" - - "github.com/cortexproject/cortex/pkg/configs/userconfig" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" - tls_cfg "github.com/cortexproject/cortex/pkg/util/tls" -) - -var ( - errBadURL = errors.New("configs_api_url is not set or valid") -) - -// Config says where we can find the ruler userconfig. -type Config struct { - ConfigsAPIURL flagext.URLValue `yaml:"configs_api_url"` - ClientTimeout time.Duration `yaml:"client_timeout"` // HTTP timeout duration for requests made to the Weave Cloud configs service. - TLS tls_cfg.ClientConfig `yaml:",inline"` -} - -// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.Var(&cfg.ConfigsAPIURL, prefix+"configs.url", "URL of configs API server.") - f.DurationVar(&cfg.ClientTimeout, prefix+"configs.client-timeout", 5*time.Second, "Timeout for requests to Weave Cloud configs service.") - cfg.TLS.RegisterFlagsWithPrefix(prefix+"configs", f) -} - -var configsRequestDuration = instrument.NewHistogramCollector(promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "configs_request_duration_seconds", - Help: "Time spent requesting userconfig.", - Buckets: prometheus.DefBuckets, -}, []string{"operation", "status_code"})) - -// Client is what the ruler and altermanger needs from a config store to process rules. -type Client interface { - // GetRules returns all Cortex configurations from a configs API server - // that have been updated after the given userconfig.ID was last updated. - GetRules(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) - - // GetAlerts fetches all the alerts that have changes since since. - GetAlerts(ctx context.Context, since userconfig.ID) (*ConfigsResponse, error) -} - -// New creates a new ConfigClient. -func New(cfg Config) (*ConfigDBClient, error) { - - if cfg.ConfigsAPIURL.URL == nil { - return nil, errBadURL - } - - client := &ConfigDBClient{ - URL: cfg.ConfigsAPIURL.URL, - Timeout: cfg.ClientTimeout, - } - - tlsConfig, err := cfg.TLS.GetTLSConfig() - if err != nil { - return nil, err - } - - if tlsConfig != nil { - client.TLSConfig = tlsConfig - } - return client, nil -} - -// ConfigDBClient allows retrieving recording and alerting rules from the configs server. -type ConfigDBClient struct { - URL *url.URL - Timeout time.Duration - TLSConfig *tls.Config -} - -// GetRules implements Client -func (c ConfigDBClient) GetRules(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { - suffix := "" - if since != 0 { - suffix = fmt.Sprintf("?since=%d", since) - } - endpoint := fmt.Sprintf("%s/private/api/prom/configs/rules%s", c.URL.String(), suffix) - var response *ConfigsResponse - err := instrument.CollectedRequest(ctx, "GetRules", configsRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - response, err = doRequest(endpoint, c.Timeout, c.TLSConfig, since) - return err - }) - if err != nil { - return nil, err - } - configs := map[string]userconfig.VersionedRulesConfig{} - for id, view := range response.Configs { - cfg := view.GetVersionedRulesConfig() - if cfg != nil { - configs[id] = *cfg - } - } - return configs, nil -} - -// GetAlerts implements Client. -func (c ConfigDBClient) GetAlerts(ctx context.Context, since userconfig.ID) (*ConfigsResponse, error) { - suffix := "" - if since != 0 { - suffix = fmt.Sprintf("?since=%d", since) - } - endpoint := fmt.Sprintf("%s/private/api/prom/configs/alertmanager%s", c.URL.String(), suffix) - var response *ConfigsResponse - err := instrument.CollectedRequest(ctx, "GetAlerts", configsRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - response, err = doRequest(endpoint, c.Timeout, c.TLSConfig, since) - return err - }) - return response, err -} - -func doRequest(endpoint string, timeout time.Duration, tlsConfig *tls.Config, since userconfig.ID) (*ConfigsResponse, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, err - } - - client := &http.Client{Timeout: timeout} - if tlsConfig != nil { - client.Transport = &http.Transport{TLSClientConfig: tlsConfig} - } - - req.Header.Set("User-Agent", fmt.Sprintf("Cortex/%s", version.Version)) - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Invalid response from configs server: %v", resp.StatusCode) - } - - var config ConfigsResponse - if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { - level.Error(util_log.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) - return nil, err - } - - config.since = since - return &config, nil -} - -// ConfigsResponse is a response from server for Getuserconfig. -type ConfigsResponse struct { - // The version since which these configs were changed - since userconfig.ID - - // Configs maps user ID to their latest userconfig.View. - Configs map[string]userconfig.View `json:"configs"` -} - -// GetLatestConfigID returns the last config ID from a set of userconfig. -func (c ConfigsResponse) GetLatestConfigID() userconfig.ID { - latest := c.since - for _, config := range c.Configs { - if config.ID > latest { - latest = config.ID - } - } - return latest -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/config.go deleted file mode 100644 index 9ec1d68b6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package configs - -import ( - "flag" - - "github.com/cortexproject/cortex/pkg/configs/api" - "github.com/cortexproject/cortex/pkg/configs/db" -) - -type Config struct { - DB db.Config `yaml:"database"` - API api.Config `yaml:"api"` -} - -// RegisterFlags adds the flags required to configure this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.DB.RegisterFlags(f) - cfg.API.RegisterFlags(f) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/db.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/db.go deleted file mode 100644 index 0ec290494..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/db.go +++ /dev/null @@ -1,96 +0,0 @@ -package db - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - "net/url" - - "github.com/cortexproject/cortex/pkg/configs/db/memory" - "github.com/cortexproject/cortex/pkg/configs/db/postgres" - "github.com/cortexproject/cortex/pkg/configs/userconfig" -) - -// Config configures the database. -type Config struct { - URI string `yaml:"uri"` - MigrationsDir string `yaml:"migrations_dir"` - PasswordFile string `yaml:"password_file"` - - // Allow injection of mock DBs for unit testing. - Mock DB `yaml:"-"` -} - -// RegisterFlags adds the flags required to configure this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.URI, "configs.database.uri", "postgres://postgres@configs-db.weave.local/configs?sslmode=disable", "URI where the database can be found (for dev you can use memory://)") - f.StringVar(&cfg.MigrationsDir, "configs.database.migrations-dir", "", "Path where the database migration files can be found") - f.StringVar(&cfg.PasswordFile, "configs.database.password-file", "", "File containing password (username goes in URI)") -} - -// DB is the interface for the database. -type DB interface { - // GetRulesConfig gets the user's ruler config - GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) - - // SetRulesConfig does a compare-and-swap (CAS) on the user's rules config. - // `oldConfig` must precisely match the current config in order to change the config to `newConfig`. - // Will return `true` if the config was updated, `false` otherwise. - SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) - - // GetAllRulesConfigs gets all of the ruler configs - GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) - - // GetRulesConfigs gets all of the configs that have been added or have - // changed since the provided config. - GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) - - GetConfig(ctx context.Context, userID string) (userconfig.View, error) - SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error - - GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) - GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) - - DeactivateConfig(ctx context.Context, userID string) error - RestoreConfig(ctx context.Context, userID string) error - - Close() error -} - -// New creates a new database. -func New(cfg Config) (DB, error) { - if cfg.Mock != nil { - return cfg.Mock, nil - } - - u, err := url.Parse(cfg.URI) - if err != nil { - return nil, err - } - - if len(cfg.PasswordFile) != 0 { - if u.User == nil { - return nil, fmt.Errorf("--database.password-file requires username in --database.uri") - } - passwordBytes, err := ioutil.ReadFile(cfg.PasswordFile) - if err != nil { - return nil, fmt.Errorf("Could not read database password file: %v", err) - } - u.User = url.UserPassword(u.User.Username(), string(passwordBytes)) - } - - var d DB - switch u.Scheme { - case "memory": - d, err = memory.New(u.String(), cfg.MigrationsDir) - case "postgres": - d, err = postgres.New(u.String(), cfg.MigrationsDir) - default: - return nil, fmt.Errorf("Unknown database type: %s", u.Scheme) - } - if err != nil { - return nil, err - } - return traced{timed{d}}, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/memory/memory.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/memory/memory.go deleted file mode 100644 index a759c425b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/memory/memory.go +++ /dev/null @@ -1,145 +0,0 @@ -package memory - -import ( - "context" - "database/sql" - "fmt" - "time" - - "github.com/cortexproject/cortex/pkg/configs/userconfig" -) - -// DB is an in-memory database for testing, and local development -type DB struct { - cfgs map[string]userconfig.View - id uint -} - -// New creates a new in-memory database -func New(_, _ string) (*DB, error) { - return &DB{ - cfgs: map[string]userconfig.View{}, - id: 0, - }, nil -} - -// GetConfig gets the user's configuration. -func (d *DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) { - c, ok := d.cfgs[userID] - if !ok { - return userconfig.View{}, sql.ErrNoRows - } - return c, nil -} - -// SetConfig sets configuration for a user. -func (d *DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error { - if !cfg.RulesConfig.FormatVersion.IsValid() { - return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion) - } - d.cfgs[userID] = userconfig.View{Config: cfg, ID: userconfig.ID(d.id)} - d.id++ - return nil -} - -// GetAllConfigs gets all of the userconfig. -func (d *DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) { - return d.cfgs, nil -} - -// GetConfigs gets all of the configs that have changed recently. -func (d *DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) { - cfgs := map[string]userconfig.View{} - for user, c := range d.cfgs { - if c.ID > since { - cfgs[user] = c - } - } - return cfgs, nil -} - -// SetDeletedAtConfig sets a deletedAt for configuration -// by adding a single new row with deleted_at set -// the same as SetConfig is actually insert -func (d *DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt time.Time) error { - cv, err := d.GetConfig(ctx, userID) - if err != nil { - return err - } - cv.DeletedAt = deletedAt - cv.ID = userconfig.ID(d.id) - d.cfgs[userID] = cv - d.id++ - return nil -} - -// DeactivateConfig deactivates configuration for a user by creating new configuration with DeletedAt set to now -func (d *DB) DeactivateConfig(ctx context.Context, userID string) error { - return d.SetDeletedAtConfig(ctx, userID, time.Now()) -} - -// RestoreConfig restores deactivated configuration for a user by creating new configuration with empty DeletedAt -func (d *DB) RestoreConfig(ctx context.Context, userID string) error { - return d.SetDeletedAtConfig(ctx, userID, time.Time{}) -} - -// Close finishes using the db. Noop. -func (d *DB) Close() error { - return nil -} - -// GetRulesConfig gets the rules config for a user. -func (d *DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) { - c, ok := d.cfgs[userID] - if !ok { - return userconfig.VersionedRulesConfig{}, sql.ErrNoRows - } - cfg := c.GetVersionedRulesConfig() - if cfg == nil { - return userconfig.VersionedRulesConfig{}, sql.ErrNoRows - } - return *cfg, nil -} - -// SetRulesConfig sets the rules config for a user. -func (d *DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) { - c, ok := d.cfgs[userID] - if !ok { - return true, d.SetConfig(ctx, userID, userconfig.Config{RulesConfig: newConfig}) - } - if !oldConfig.Equal(c.Config.RulesConfig) { - return false, nil - } - return true, d.SetConfig(ctx, userID, userconfig.Config{ - AlertmanagerConfig: c.Config.AlertmanagerConfig, - RulesConfig: newConfig, - }) -} - -// GetAllRulesConfigs gets the rules configs for all users that have them. -func (d *DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) { - cfgs := map[string]userconfig.VersionedRulesConfig{} - for user, c := range d.cfgs { - cfg := c.GetVersionedRulesConfig() - if cfg != nil { - cfgs[user] = *cfg - } - } - return cfgs, nil -} - -// GetRulesConfigs gets the rules configs that have changed -// since the given config version. -func (d *DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { - cfgs := map[string]userconfig.VersionedRulesConfig{} - for user, c := range d.cfgs { - if c.ID <= since { - continue - } - cfg := c.GetVersionedRulesConfig() - if cfg != nil { - cfgs[user] = *cfg - } - } - return cfgs, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go deleted file mode 100644 index c94fd9311..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go +++ /dev/null @@ -1,371 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/cortexproject/cortex/pkg/configs/userconfig" - util_log "github.com/cortexproject/cortex/pkg/util/log" - - "github.com/Masterminds/squirrel" - "github.com/go-kit/log/level" - "github.com/golang-migrate/migrate/v4" - _ "github.com/golang-migrate/migrate/v4/database/postgres" // Import the postgres migrations driver - _ "github.com/golang-migrate/migrate/v4/source/file" // Import the postgres migrations driver - "github.com/lib/pq" - _ "github.com/lib/pq" // Import the postgres sql driver - "github.com/pkg/errors" -) - -const ( - // TODO: These are a legacy from when configs was more general. Update the - // schema so this isn't needed. - entityType = "org" - subsystem = "cortex" - // timeout waiting for database connection to be established - dbTimeout = 5 * time.Minute -) - -var ( - allConfigs = squirrel.Eq{ - "owner_type": entityType, - "subsystem": subsystem, - } -) - -// DB is a postgres db, for dev and production -type DB struct { - dbProxy - squirrel.StatementBuilderType -} - -type dbProxy interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryRow(query string, args ...interface{}) *sql.Row - Prepare(query string) (*sql.Stmt, error) -} - -// dbWait waits for database connection to be established -func dbWait(db *sql.DB) error { - deadline := time.Now().Add(dbTimeout) - var err error - for tries := 0; time.Now().Before(deadline); tries++ { - err = db.Ping() - if err == nil { - return nil - } - level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err) - time.Sleep(time.Second << uint(tries)) - } - return errors.Wrapf(err, "db connection not established after %s", dbTimeout) -} - -// New creates a new postgres DB -func New(uri, migrationsDir string) (DB, error) { - db, err := sql.Open("postgres", uri) - if err != nil { - return DB{}, errors.Wrap(err, "cannot open postgres db") - } - - if err := dbWait(db); err != nil { - return DB{}, errors.Wrap(err, "cannot establish db connection") - } - - if migrationsDir != "" { - // Add file scheme if no scheme is present - if !strings.HasPrefix(migrationsDir, "file:") { - migrationsDir = "file:" + migrationsDir - } - - m, err := migrate.New(migrationsDir, uri) - if err != nil { - return DB{}, errors.Wrap(err, "database migrations initialization failed") - } - - level.Info(util_log.Logger).Log("msg", "running database migrations...") - - if err := m.Up(); err != nil { - if err != migrate.ErrNoChange { - return DB{}, errors.Wrap(err, "database migrations failed") - } - level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err) - } - } - - return DB{ - dbProxy: db, - StatementBuilderType: statementBuilder(db), - }, err -} - -var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith - -func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) { - rows, err := d.Select("id", "owner_id", "config", "deleted_at"). - Options("DISTINCT ON (owner_id)"). - From("configs"). - Where(filter). - OrderBy("owner_id, id DESC"). - Query() - if err != nil { - return nil, err - } - defer rows.Close() - cfgs := map[string]userconfig.View{} - for rows.Next() { - var cfg userconfig.View - var cfgBytes []byte - var userID string - var deletedAt pq.NullTime - err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt) - if err != nil { - return nil, err - } - err = json.Unmarshal(cfgBytes, &cfg.Config) - if err != nil { - return nil, err - } - cfg.DeletedAt = deletedAt.Time - cfgs[userID] = cfg - } - - // Check for any errors encountered. - err = rows.Err() - if err != nil { - return nil, err - } - - return cfgs, nil -} - -// GetConfig gets a configuration. -func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) { - var cfgView userconfig.View - var cfgBytes []byte - var deletedAt pq.NullTime - err := d.Select("id", "config", "deleted_at"). - From("configs"). - Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}). - OrderBy("id DESC"). - Limit(1). - QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt) - if err != nil { - return cfgView, err - } - cfgView.DeletedAt = deletedAt.Time - err = json.Unmarshal(cfgBytes, &cfgView.Config) - return cfgView, err -} - -// SetConfig sets a configuration. -func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error { - if !cfg.RulesConfig.FormatVersion.IsValid() { - return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion) - } - cfgBytes, err := json.Marshal(cfg) - if err != nil { - return err - } - - _, err = d.Insert("configs"). - Columns("owner_id", "owner_type", "subsystem", "config"). - Values(userID, entityType, subsystem, cfgBytes). - Exec() - return err -} - -// GetAllConfigs gets all of the userconfig. -func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) { - return d.findConfigs(allConfigs) -} - -// GetConfigs gets all of the configs that have changed recently. -func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) { - return d.findConfigs(squirrel.And{ - allConfigs, - squirrel.Gt{"id": since}, - }) -} - -// GetRulesConfig gets the latest alertmanager config for a user. -func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) { - current, err := d.GetConfig(ctx, userID) - if err != nil { - return userconfig.VersionedRulesConfig{}, err - } - cfg := current.GetVersionedRulesConfig() - if cfg == nil { - return userconfig.VersionedRulesConfig{}, sql.ErrNoRows - } - return *cfg, nil -} - -// SetRulesConfig sets the current alertmanager config for a user. -func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) { - updated := false - err := d.Transaction(func(tx DB) error { - current, err := d.GetConfig(ctx, userID) - if err != nil && err != sql.ErrNoRows { - return err - } - // The supplied oldConfig must match the current config. If no config - // exists, then oldConfig must be nil. Otherwise, it must exactly - // equal the existing config. - if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) { - return nil - } - new := userconfig.Config{ - AlertmanagerConfig: current.Config.AlertmanagerConfig, - RulesConfig: newConfig, - } - updated = true - return d.SetConfig(ctx, userID, new) - }) - return updated, err -} - -// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the -// set of all active rules configurations across all our users. -func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) { - rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at"). - Options("DISTINCT ON (owner_id)"). - From("configs"). - Where(filter). - // `->>` gets a JSON object field as text. When a config row exists - // and alertmanager config is provided but ruler config has not yet - // been, the 'rules_files' key will have an empty JSON object as its - // value. This is (probably) the most efficient way to test for a - // non-empty `rules_files` key. - // - // This whole situation is way too complicated. See - // https://github.com/cortexproject/cortex/issues/619 for the whole - // story, and our plans to improve it. - Where("config ->> 'rules_files' <> '{}'"). - OrderBy("owner_id, id DESC"). - Query() - if err != nil { - return nil, err - } - defer rows.Close() - cfgs := map[string]userconfig.VersionedRulesConfig{} - for rows.Next() { - var cfg userconfig.VersionedRulesConfig - var userID string - var cfgBytes []byte - var rfvBytes []byte - var deletedAt pq.NullTime - err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt) - if err != nil { - return nil, err - } - err = json.Unmarshal(cfgBytes, &cfg.Config.Files) - if err != nil { - return nil, err - } - // Legacy configs don't have a rule format version, in which case this will - // be a zero-length (but non-nil) slice. - if len(rfvBytes) > 0 { - err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion) - if err != nil { - return nil, err - } - } - cfg.DeletedAt = deletedAt.Time - cfgs[userID] = cfg - } - - // Check for any errors encountered. - err = rows.Err() - if err != nil { - return nil, err - } - - return cfgs, nil -} - -// GetAllRulesConfigs gets all alertmanager configs for all users. -func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) { - return d.findRulesConfigs(allConfigs) -} - -// GetRulesConfigs gets all the alertmanager configs that have changed since a given config. -func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { - return d.findRulesConfigs(squirrel.And{ - allConfigs, - squirrel.Gt{"id": since}, - }) -} - -// SetDeletedAtConfig sets a deletedAt for configuration -// by adding a single new row with deleted_at set -// the same as SetConfig is actually insert -func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error { - cfgBytes, err := json.Marshal(cfg) - if err != nil { - return err - } - _, err = d.Insert("configs"). - Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config"). - Values(userID, entityType, subsystem, deletedAt, cfgBytes). - Exec() - return err -} - -// DeactivateConfig deactivates a configuration. -func (d DB) DeactivateConfig(ctx context.Context, userID string) error { - cfg, err := d.GetConfig(ctx, userID) - if err != nil { - return err - } - return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config) -} - -// RestoreConfig restores configuration. -func (d DB) RestoreConfig(ctx context.Context, userID string) error { - cfg, err := d.GetConfig(ctx, userID) - if err != nil { - return err - } - return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{}, cfg.Config) -} - -// Transaction runs the given function in a postgres transaction. If fn returns -// an error the txn will be rolled back. -func (d DB) Transaction(f func(DB) error) error { - if _, ok := d.dbProxy.(*sql.Tx); ok { - // Already in a nested transaction - return f(d) - } - - tx, err := d.dbProxy.(*sql.DB).Begin() - if err != nil { - return err - } - err = f(DB{ - dbProxy: tx, - StatementBuilderType: statementBuilder(tx), - }) - if err != nil { - // Rollback error is ignored as we already have one in progress - if err2 := tx.Rollback(); err2 != nil { - level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2) - } - return err - } - return tx.Commit() -} - -// Close finishes using the db -func (d DB) Close() error { - if db, ok := d.dbProxy.(interface { - Close() error - }); ok { - return db.Close() - } - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/timed.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/timed.go deleted file mode 100644 index 58cbfdc9e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/timed.go +++ /dev/null @@ -1,128 +0,0 @@ -package db - -import ( - "context" - - "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/instrument" - - "github.com/cortexproject/cortex/pkg/configs/userconfig" -) - -var ( - databaseRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "database_request_duration_seconds", - Help: "Time spent (in seconds) doing database requests.", - Buckets: prometheus.DefBuckets, - }, []string{"method", "status_code"})) -) - -func init() { - databaseRequestDuration.Register() -} - -// timed adds prometheus timings to another database implementation -type timed struct { - d DB -} - -func (t timed) GetConfig(ctx context.Context, userID string) (userconfig.View, error) { - var cfg userconfig.View - err := instrument.CollectedRequest(ctx, "DB.GetConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - cfg, err = t.d.GetConfig(ctx, userID) // Warning: this will produce an incorrect result if the configID ever overflows - return err - }) - return cfg, err -} - -func (t timed) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error { - return instrument.CollectedRequest(ctx, "DB.SetConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - return t.d.SetConfig(ctx, userID, cfg) // Warning: this will produce an incorrect result if the configID ever overflows - }) -} - -func (t timed) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) { - var cfgs map[string]userconfig.View - err := instrument.CollectedRequest(ctx, "DB.GetAllConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - cfgs, err = t.d.GetAllConfigs(ctx) - return err - }) - - return cfgs, err -} - -func (t timed) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) { - var cfgs map[string]userconfig.View - err := instrument.CollectedRequest(ctx, "DB.GetConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - cfgs, err = t.d.GetConfigs(ctx, since) - return err - }) - - return cfgs, err -} - -func (t timed) DeactivateConfig(ctx context.Context, userID string) error { - return instrument.CollectedRequest(ctx, "DB.DeactivateConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - return t.d.DeactivateConfig(ctx, userID) - }) -} - -func (t timed) RestoreConfig(ctx context.Context, userID string) (err error) { - return instrument.CollectedRequest(ctx, "DB.RestoreConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - return t.d.RestoreConfig(ctx, userID) - }) -} - -func (t timed) Close() error { - return instrument.CollectedRequest(context.Background(), "DB.Close", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - return t.d.Close() - }) -} - -func (t timed) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) { - var cfg userconfig.VersionedRulesConfig - err := instrument.CollectedRequest(ctx, "DB.GetRulesConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - cfg, err = t.d.GetRulesConfig(ctx, userID) - return err - }) - - return cfg, err -} - -func (t timed) SetRulesConfig(ctx context.Context, userID string, oldCfg, newCfg userconfig.RulesConfig) (bool, error) { - var updated bool - err := instrument.CollectedRequest(ctx, "DB.SetRulesConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - updated, err = t.d.SetRulesConfig(ctx, userID, oldCfg, newCfg) - return err - }) - - return updated, err -} - -func (t timed) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) { - var cfgs map[string]userconfig.VersionedRulesConfig - err := instrument.CollectedRequest(ctx, "DB.GetAllRulesConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - cfgs, err = t.d.GetAllRulesConfigs(ctx) - return err - }) - - return cfgs, err -} - -func (t timed) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { - var cfgs map[string]userconfig.VersionedRulesConfig - err := instrument.CollectedRequest(ctx, "DB.GetRulesConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - var err error - cfgs, err = t.d.GetRulesConfigs(ctx, since) - return err - }) - - return cfgs, err -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go deleted file mode 100644 index 6f7bf7e01..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go +++ /dev/null @@ -1,75 +0,0 @@ -package db - -import ( - "context" - "fmt" - - "github.com/cortexproject/cortex/pkg/configs/userconfig" - util_log "github.com/cortexproject/cortex/pkg/util/log" - - "github.com/go-kit/log/level" -) - -// traced adds log trace lines on each db call -type traced struct { - d DB -} - -func (t traced) trace(name string, args ...interface{}) { - level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) -} - -func (t traced) GetConfig(ctx context.Context, userID string) (cfg userconfig.View, err error) { - defer func() { t.trace("GetConfig", userID, cfg, err) }() - return t.d.GetConfig(ctx, userID) -} - -func (t traced) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) (err error) { - defer func() { t.trace("SetConfig", userID, cfg, err) }() - return t.d.SetConfig(ctx, userID, cfg) -} - -func (t traced) GetAllConfigs(ctx context.Context) (cfgs map[string]userconfig.View, err error) { - defer func() { t.trace("GetAllConfigs", cfgs, err) }() - return t.d.GetAllConfigs(ctx) -} - -func (t traced) GetConfigs(ctx context.Context, since userconfig.ID) (cfgs map[string]userconfig.View, err error) { - defer func() { t.trace("GetConfigs", since, cfgs, err) }() - return t.d.GetConfigs(ctx, since) -} - -func (t traced) DeactivateConfig(ctx context.Context, userID string) (err error) { - defer func() { t.trace("DeactivateConfig", userID, err) }() - return t.d.DeactivateConfig(ctx, userID) -} - -func (t traced) RestoreConfig(ctx context.Context, userID string) (err error) { - defer func() { t.trace("RestoreConfig", userID, err) }() - return t.d.RestoreConfig(ctx, userID) -} - -func (t traced) Close() (err error) { - defer func() { t.trace("Close", err) }() - return t.d.Close() -} - -func (t traced) GetRulesConfig(ctx context.Context, userID string) (cfg userconfig.VersionedRulesConfig, err error) { - defer func() { t.trace("GetRulesConfig", userID, cfg, err) }() - return t.d.GetRulesConfig(ctx, userID) -} - -func (t traced) SetRulesConfig(ctx context.Context, userID string, oldCfg, newCfg userconfig.RulesConfig) (updated bool, err error) { - defer func() { t.trace("SetRulesConfig", userID, oldCfg, newCfg, updated, err) }() - return t.d.SetRulesConfig(ctx, userID, oldCfg, newCfg) -} - -func (t traced) GetAllRulesConfigs(ctx context.Context) (cfgs map[string]userconfig.VersionedRulesConfig, err error) { - defer func() { t.trace("GetAllRulesConfigs", cfgs, err) }() - return t.d.GetAllRulesConfigs(ctx) -} - -func (t traced) GetRulesConfigs(ctx context.Context, since userconfig.ID) (cfgs map[string]userconfig.VersionedRulesConfig, err error) { - defer func() { t.trace("GetConfigs", since, cfgs, err) }() - return t.d.GetRulesConfigs(ctx, since) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/ast.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/ast.go deleted file mode 100644 index ca1165753..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/ast.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//nolint //Since this was copied from Prometheus leave it as is -package promql - -import ( - "fmt" - "time" - - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" -) - -// Node is a generic interface for all nodes in an AST. -// -// Whenever numerous nodes are listed such as in a switch-case statement -// or a chain of function definitions (e.g. String(), expr(), etc.) convention is -// to list them as follows: -// -// - Statements -// - statement types (alphabetical) -// - ... -// - Expressions -// - expression types (alphabetical) -// - ... -// -type Node interface { - // String representation of the node that returns the given node when parsed - // as part of a valid query. - String() string -} - -// Statement is a generic interface for all statements. -type Statement interface { - Node - - // stmt ensures that no other type accidentally implements the interface - stmt() -} - -// Statements is a list of statement nodes that implements Node. -type Statements []Statement - -// AlertStmt represents an added alert rule. -type AlertStmt struct { - Name string - Expr Expr - Duration time.Duration - Labels labels.Labels - Annotations labels.Labels -} - -// EvalStmt holds an expression and information on the range it should -// be evaluated on. -type EvalStmt struct { - Expr Expr // Expression to be evaluated. - - // The time boundaries for the evaluation. If Start equals End an instant - // is evaluated. - Start, End time.Time - // Time between two evaluated instants for the range [Start:End]. - Interval time.Duration -} - -// RecordStmt represents an added recording rule. -type RecordStmt struct { - Name string - Expr Expr - Labels labels.Labels -} - -func (*AlertStmt) stmt() {} -func (*EvalStmt) stmt() {} -func (*RecordStmt) stmt() {} - -// Expr is a generic interface for all expression types. -type Expr interface { - Node - - // Type returns the type the expression evaluates to. It does not perform - // in-depth checks as this is done at parsing-time. - Type() ValueType - // expr ensures that no other types accidentally implement the interface. - expr() -} - -// Expressions is a list of expression nodes that implements Node. -type Expressions []Expr - -// AggregateExpr represents an aggregation operation on a Vector. -type AggregateExpr struct { - Op ItemType // The used aggregation operation. - Expr Expr // The Vector expression over which is aggregated. - Param Expr // Parameter used by some aggregators. - Grouping []string // The labels by which to group the Vector. - Without bool // Whether to drop the given labels rather than keep them. -} - -// BinaryExpr represents a binary expression between two child expressions. -type BinaryExpr struct { - Op ItemType // The operation of the expression. - LHS, RHS Expr // The operands on the respective sides of the operator. - - // The matching behavior for the operation if both operands are Vectors. - // If they are not this field is nil. - VectorMatching *VectorMatching - - // If a comparison operator, return 0/1 rather than filtering. - ReturnBool bool -} - -// Call represents a function call. -type Call struct { - Func *Function // The function that was called. - Args Expressions // Arguments used in the call. -} - -// MatrixSelector represents a Matrix selection. -type MatrixSelector struct { - Name string - Range time.Duration - Offset time.Duration - LabelMatchers []*labels.Matcher - - // The series are populated at query preparation time. - series []storage.Series -} - -// NumberLiteral represents a number. -type NumberLiteral struct { - Val float64 -} - -// ParenExpr wraps an expression so it cannot be disassembled as a consequence -// of operator precedence. -type ParenExpr struct { - Expr Expr -} - -// StringLiteral represents a string. -type StringLiteral struct { - Val string -} - -// UnaryExpr represents a unary operation on another expression. -// Currently unary operations are only supported for Scalars. -type UnaryExpr struct { - Op ItemType - Expr Expr -} - -// VectorSelector represents a Vector selection. -type VectorSelector struct { - Name string - Offset time.Duration - LabelMatchers []*labels.Matcher - - // The series are populated at query preparation time. - series []storage.Series -} - -func (e *AggregateExpr) Type() ValueType { return ValueTypeVector } -func (e *Call) Type() ValueType { return e.Func.ReturnType } -func (e *MatrixSelector) Type() ValueType { return ValueTypeMatrix } -func (e *NumberLiteral) Type() ValueType { return ValueTypeScalar } -func (e *ParenExpr) Type() ValueType { return e.Expr.Type() } -func (e *StringLiteral) Type() ValueType { return ValueTypeString } -func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() } -func (e *VectorSelector) Type() ValueType { return ValueTypeVector } -func (e *BinaryExpr) Type() ValueType { - if e.LHS.Type() == ValueTypeScalar && e.RHS.Type() == ValueTypeScalar { - return ValueTypeScalar - } - return ValueTypeVector -} - -func (*AggregateExpr) expr() {} -func (*BinaryExpr) expr() {} -func (*Call) expr() {} -func (*MatrixSelector) expr() {} -func (*NumberLiteral) expr() {} -func (*ParenExpr) expr() {} -func (*StringLiteral) expr() {} -func (*UnaryExpr) expr() {} -func (*VectorSelector) expr() {} - -// VectorMatchCardinality describes the cardinality relationship -// of two Vectors in a binary operation. -type VectorMatchCardinality int - -const ( - CardOneToOne VectorMatchCardinality = iota - CardManyToOne - CardOneToMany - CardManyToMany -) - -func (vmc VectorMatchCardinality) String() string { - switch vmc { - case CardOneToOne: - return "one-to-one" - case CardManyToOne: - return "many-to-one" - case CardOneToMany: - return "one-to-many" - case CardManyToMany: - return "many-to-many" - } - panic("promql.VectorMatchCardinality.String: unknown match cardinality") -} - -// VectorMatching describes how elements from two Vectors in a binary -// operation are supposed to be matched. -type VectorMatching struct { - // The cardinality of the two Vectors. - Card VectorMatchCardinality - // MatchingLabels contains the labels which define equality of a pair of - // elements from the Vectors. - MatchingLabels []string - // On includes the given label names from matching, - // rather than excluding them. - On bool - // Include contains additional labels that should be included in - // the result from the side with the lower cardinality. - Include []string -} - -// Visitor allows visiting a Node and its child nodes. The Visit method is -// invoked for each node with the path leading to the node provided additionally. -// If the result visitor w is not nil and no error, Walk visits each of the children -// of node with the visitor w, followed by a call of w.Visit(nil, nil). -type Visitor interface { - Visit(node Node, path []Node) (w Visitor, err error) -} - -// Walk traverses an AST in depth-first order: It starts by calling -// v.Visit(node, path); node must not be nil. If the visitor w returned by -// v.Visit(node, path) is not nil and the visitor returns no error, Walk is -// invoked recursively with visitor w for each of the non-nil children of node, -// followed by a call of w.Visit(nil), returning an error -// As the tree is descended the path of previous nodes is provided. -func Walk(v Visitor, node Node, path []Node) error { - var err error - if v, err = v.Visit(node, path); v == nil || err != nil { - return err - } - path = append(path, node) - - switch n := node.(type) { - case Statements: - for _, s := range n { - if err := Walk(v, s, path); err != nil { - return err - } - } - case *AlertStmt: - if err := Walk(v, n.Expr, path); err != nil { - return err - } - - case *EvalStmt: - if err := Walk(v, n.Expr, path); err != nil { - return err - } - - case *RecordStmt: - if err := Walk(v, n.Expr, path); err != nil { - return err - } - - case Expressions: - for _, e := range n { - if err := Walk(v, e, path); err != nil { - return err - } - } - case *AggregateExpr: - if err := Walk(v, n.Expr, path); err != nil { - return err - } - - case *BinaryExpr: - if err := Walk(v, n.LHS, path); err != nil { - return err - } - if err := Walk(v, n.RHS, path); err != nil { - return err - } - - case *Call: - if err := Walk(v, n.Args, path); err != nil { - return err - } - - case *ParenExpr: - if err := Walk(v, n.Expr, path); err != nil { - return err - } - - case *UnaryExpr: - if err := Walk(v, n.Expr, path); err != nil { - return err - } - - case *MatrixSelector, *NumberLiteral, *StringLiteral, *VectorSelector: - // nothing to do - - default: - panic(fmt.Errorf("promql.Walk: unhandled node type %T", node)) - } - - _, err = v.Visit(nil, nil) - return err -} - -type inspector func(Node, []Node) error - -func (f inspector) Visit(node Node, path []Node) (Visitor, error) { - if err := f(node, path); err == nil { - return f, nil - } else { - return nil, err - } -} - -// Inspect traverses an AST in depth-first order: It starts by calling -// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f -// for all the non-nil children of node, recursively. -func Inspect(node Node, f inspector) { - Walk(inspector(f), node, nil) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/engine.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/engine.go deleted file mode 100644 index 11f012d9a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/engine.go +++ /dev/null @@ -1,1747 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//nolint //Since this was copied from Prometheus leave it as is -package promql - -import ( - "container/heap" - "context" - "fmt" - "math" - "regexp" - "runtime" - "sort" - "strconv" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - opentracing "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/timestamp" - "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/storage" - - "github.com/prometheus/prometheus/util/stats" -) - -const ( - namespace = "prometheus" - subsystem = "engine" - queryTag = "query" - - // The largest SampleValue that can be converted to an int64 without overflow. - maxInt64 = 9223372036854774784 - // The smallest SampleValue that can be converted to an int64 without underflow. - minInt64 = -9223372036854775808 -) - -type engineMetrics struct { - currentQueries prometheus.Gauge - maxConcurrentQueries prometheus.Gauge - queryQueueTime prometheus.Summary - queryPrepareTime prometheus.Summary - queryInnerEval prometheus.Summary - queryResultSort prometheus.Summary -} - -// convertibleToInt64 returns true if v does not over-/underflow an int64. -func convertibleToInt64(v float64) bool { - return v <= maxInt64 && v >= minInt64 -} - -type ( - // ErrQueryTimeout is returned if a query timed out during processing. - ErrQueryTimeout string - // ErrQueryCanceled is returned if a query was canceled during processing. - ErrQueryCanceled string - // ErrStorage is returned if an error was encountered in the storage layer - // during query handling. - ErrStorage error -) - -func (e ErrQueryTimeout) Error() string { return fmt.Sprintf("query timed out in %s", string(e)) } -func (e ErrQueryCanceled) Error() string { return fmt.Sprintf("query was canceled in %s", string(e)) } - -// A Query is derived from an a raw query string and can be run against an engine -// it is associated with. -type Query interface { - // Exec processes the query. Can only be called once. - Exec(ctx context.Context) *Result - // Close recovers memory used by the query result. - Close() - // Statement returns the parsed statement of the query. - Statement() Statement - // Stats returns statistics about the lifetime of the query. - Stats() *stats.TimerGroup - // Cancel signals that a running query execution should be aborted. - Cancel() -} - -// query implements the Query interface. -type query struct { - // Underlying data provider. - queryable storage.Queryable - // The original query string. - q string - // Statement of the parsed query. - stmt Statement - // Timer stats for the query execution. - stats *stats.TimerGroup - // Result matrix for reuse. - matrix Matrix - // Cancellation function for the query. - cancel func() - - // The engine against which the query is executed. - ng *Engine -} - -// Statement implements the Query interface. -func (q *query) Statement() Statement { - return q.stmt -} - -// Stats implements the Query interface. -func (q *query) Stats() *stats.TimerGroup { - return q.stats -} - -// Cancel implements the Query interface. -func (q *query) Cancel() { - if q.cancel != nil { - q.cancel() - } -} - -// Close implements the Query interface. -func (q *query) Close() { - for _, s := range q.matrix { - putPointSlice(s.Points) - } -} - -// Exec implements the Query interface. -func (q *query) Exec(ctx context.Context) *Result { - if span := opentracing.SpanFromContext(ctx); span != nil { - span.SetTag(queryTag, q.stmt.String()) - } - - res, err := q.ng.exec(ctx, q) - return &Result{Err: err, Value: res} -} - -// contextDone returns an error if the context was canceled or timed out. -func contextDone(ctx context.Context, env string) error { - select { - case <-ctx.Done(): - err := ctx.Err() - switch err { - case context.Canceled: - return ErrQueryCanceled(env) - case context.DeadlineExceeded: - return ErrQueryTimeout(env) - default: - return err - } - default: - return nil - } -} - -// Engine handles the lifetime of queries from beginning to end. -// It is connected to a querier. -type Engine struct { - logger log.Logger - metrics *engineMetrics - timeout time.Duration - gate *queryGate -} - -// NewEngine returns a new engine. -func NewEngine(logger log.Logger, reg prometheus.Registerer, maxConcurrent int, timeout time.Duration) *Engine { - if logger == nil { - logger = log.NewNopLogger() - } - - metrics := &engineMetrics{ - currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queries", - Help: "The current number of queries being executed or waiting.", - }), - maxConcurrentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queries_concurrent_max", - Help: "The max number of concurrent queries.", - }), - queryQueueTime: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "queue_time"}, - }), - queryPrepareTime: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "prepare_time"}, - }), - queryInnerEval: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "inner_eval"}, - }), - queryResultSort: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "result_sort"}, - }), - } - metrics.maxConcurrentQueries.Set(float64(maxConcurrent)) - - if reg != nil { - reg.MustRegister( - metrics.currentQueries, - metrics.maxConcurrentQueries, - metrics.queryQueueTime, - metrics.queryPrepareTime, - metrics.queryInnerEval, - metrics.queryResultSort, - ) - } - return &Engine{ - gate: newQueryGate(maxConcurrent), - timeout: timeout, - logger: logger, - metrics: metrics, - } -} - -// NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(q storage.Queryable, qs string, ts time.Time) (Query, error) { - expr, err := ParseExpr(qs) - if err != nil { - return nil, err - } - qry := ng.newQuery(q, expr, ts, ts, 0) - qry.q = qs - - return qry, nil -} - -// NewRangeQuery returns an evaluation query for the given time range and with -// the resolution set by the interval. -func (ng *Engine) NewRangeQuery(q storage.Queryable, qs string, start, end time.Time, interval time.Duration) (Query, error) { - expr, err := ParseExpr(qs) - if err != nil { - return nil, err - } - if expr.Type() != ValueTypeVector && expr.Type() != ValueTypeScalar { - return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", documentedType(expr.Type())) - } - qry := ng.newQuery(q, expr, start, end, interval) - qry.q = qs - - return qry, nil -} - -func (ng *Engine) newQuery(q storage.Queryable, expr Expr, start, end time.Time, interval time.Duration) *query { - es := &EvalStmt{ - Expr: expr, - Start: start, - End: end, - Interval: interval, - } - qry := &query{ - stmt: es, - ng: ng, - stats: stats.NewTimerGroup(), - queryable: q, - } - return qry -} - -// testStmt is an internal helper statement that allows execution -// of an arbitrary function during handling. It is used to test the Engine. -type testStmt func(context.Context) error - -func (testStmt) String() string { return "test statement" } -func (testStmt) stmt() {} - -func (ng *Engine) newTestQuery(f func(context.Context) error) Query { - qry := &query{ - q: "test statement", - stmt: testStmt(f), - ng: ng, - stats: stats.NewTimerGroup(), - } - return qry -} - -// exec executes the query. -// -// At this point per query only one EvalStmt is evaluated. Alert and record -// statements are not handled by the Engine. -func (ng *Engine) exec(ctx context.Context, q *query) (Value, error) { - ng.metrics.currentQueries.Inc() - defer ng.metrics.currentQueries.Dec() - - ctx, cancel := context.WithTimeout(ctx, ng.timeout) - q.cancel = cancel - - execTimer := q.stats.GetTimer(stats.ExecTotalTime).Start() - defer execTimer.Stop() - queueTimer := q.stats.GetTimer(stats.ExecQueueTime).Start() - - if err := ng.gate.Start(ctx); err != nil { - return nil, err - } - defer ng.gate.Done() - - queueTimer.Stop() - ng.metrics.queryQueueTime.Observe(queueTimer.ElapsedTime().Seconds()) - - // Cancel when execution is done or an error was raised. - defer q.cancel() - - const env = "query execution" - - evalTimer := q.stats.GetTimer(stats.EvalTotalTime).Start() - defer evalTimer.Stop() - - // The base context might already be canceled on the first iteration (e.g. during shutdown). - if err := contextDone(ctx, env); err != nil { - return nil, err - } - - switch s := q.Statement().(type) { - case *EvalStmt: - return ng.execEvalStmt(ctx, q, s) - case testStmt: - return nil, s(ctx) - } - - panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) -} - -func timeMilliseconds(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -func durationMilliseconds(d time.Duration) int64 { - return int64(d / (time.Millisecond / time.Nanosecond)) -} - -// execEvalStmt evaluates the expression of an evaluation statement for the given time range. -func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (Value, error) { - prepareTimer := query.stats.GetTimer(stats.QueryPreparationTime).Start() - querier, err := ng.populateSeries(ctx, query.queryable, s) - prepareTimer.Stop() - ng.metrics.queryPrepareTime.Observe(prepareTimer.ElapsedTime().Seconds()) - - // XXX(fabxc): the querier returned by populateSeries might be instantiated - // we must not return without closing irrespective of the error. - // TODO: make this semantically saner. - if querier != nil { - defer querier.Close() - } - - if err != nil { - return nil, err - } - - evalTimer := query.stats.GetTimer(stats.InnerEvalTime).Start() - // Instant evaluation. This is executed as a range evaluation with one step. - if s.Start == s.End && s.Interval == 0 { - start := timeMilliseconds(s.Start) - evaluator := &evaluator{ - startTimestamp: start, - endTimestamp: start, - interval: 1, - ctx: ctx, - logger: ng.logger, - } - val, err := evaluator.Eval(s.Expr) - if err != nil { - return nil, err - } - - evalTimer.Stop() - ng.metrics.queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds()) - - mat, ok := val.(Matrix) - if !ok { - panic(fmt.Errorf("promql.Engine.exec: invalid expression type %q", val.Type())) - } - query.matrix = mat - switch s.Expr.Type() { - case ValueTypeVector: - // Convert matrix with one value per series into vector. - vector := make(Vector, len(mat)) - for i, s := range mat { - // Point might have a different timestamp, force it to the evaluation - // timestamp as that is when we ran the evaluation. - vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}} - } - return vector, nil - case ValueTypeScalar: - return Scalar{V: mat[0].Points[0].V, T: start}, nil - case ValueTypeMatrix: - return mat, nil - default: - panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type())) - } - - } - - // Range evaluation. - evaluator := &evaluator{ - startTimestamp: timeMilliseconds(s.Start), - endTimestamp: timeMilliseconds(s.End), - interval: durationMilliseconds(s.Interval), - ctx: ctx, - logger: ng.logger, - } - val, err := evaluator.Eval(s.Expr) - if err != nil { - return nil, err - } - evalTimer.Stop() - ng.metrics.queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds()) - - mat, ok := val.(Matrix) - if !ok { - panic(fmt.Errorf("promql.Engine.exec: invalid expression type %q", val.Type())) - } - query.matrix = mat - - if err := contextDone(ctx, "expression evaluation"); err != nil { - return nil, err - } - - // TODO(fabxc): order ensured by storage? - // TODO(fabxc): where to ensure metric labels are a copy from the storage internals. - sortTimer := query.stats.GetTimer(stats.ResultSortTime).Start() - sort.Sort(mat) - sortTimer.Stop() - - ng.metrics.queryResultSort.Observe(sortTimer.ElapsedTime().Seconds()) - return mat, nil -} - -func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *EvalStmt) (storage.Querier, error) { - var maxOffset time.Duration - Inspect(s.Expr, func(node Node, _ []Node) error { - switch n := node.(type) { - case *VectorSelector: - if maxOffset < LookbackDelta { - maxOffset = LookbackDelta - } - if n.Offset+LookbackDelta > maxOffset { - maxOffset = n.Offset + LookbackDelta - } - case *MatrixSelector: - if maxOffset < n.Range { - maxOffset = n.Range - } - if n.Offset+n.Range > maxOffset { - maxOffset = n.Offset + n.Range - } - } - return nil - }) - - mint := s.Start.Add(-maxOffset) - - querier, err := q.Querier(ctx, timestamp.FromTime(mint), timestamp.FromTime(s.End)) - if err != nil { - return nil, err - } - - Inspect(s.Expr, func(node Node, path []Node) error { - var set storage.SeriesSet - params := &storage.SelectHints{ - Start: timestamp.FromTime(s.Start), - End: timestamp.FromTime(s.End), - Step: int64(s.Interval / time.Millisecond), - } - - switch n := node.(type) { - case *VectorSelector: - params.Start = params.Start - durationMilliseconds(LookbackDelta) - params.Func = extractFuncFromPath(path) - if n.Offset > 0 { - offsetMilliseconds := durationMilliseconds(n.Offset) - params.Start = params.Start - offsetMilliseconds - params.End = params.End - offsetMilliseconds - } - - set = querier.Select(false, params, n.LabelMatchers...) - n.series, err = expandSeriesSet(ctx, set) - if err != nil { - // TODO(fabxc): use multi-error. - level.Error(ng.logger).Log("msg", "error expanding series set", "err", err) - return err - } - - case *MatrixSelector: - params.Func = extractFuncFromPath(path) - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - params.Start = params.Start - durationMilliseconds(n.Range) - if n.Offset > 0 { - offsetMilliseconds := durationMilliseconds(n.Offset) - params.Start = params.Start - offsetMilliseconds - params.End = params.End - offsetMilliseconds - } - - set = querier.Select(false, params, n.LabelMatchers...) - n.series, err = expandSeriesSet(ctx, set) - if err != nil { - level.Error(ng.logger).Log("msg", "error expanding series set", "err", err) - return err - } - } - return nil - }) - return querier, err -} - -// extractFuncFromPath walks up the path and searches for the first instance of -// a function or aggregation. -func extractFuncFromPath(p []Node) string { - if len(p) == 0 { - return "" - } - switch n := p[len(p)-1].(type) { - case *AggregateExpr: - return n.Op.String() - case *Call: - return n.Func.Name - case *BinaryExpr: - // If we hit a binary expression we terminate since we only care about functions - // or aggregations over a single metric. - return "" - } - return extractFuncFromPath(p[:len(p)-1]) -} - -func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, err error) { - for it.Next() { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - res = append(res, it.At()) - } - return res, it.Err() -} - -// An evaluator evaluates given expressions over given fixed timestamps. It -// is attached to an engine through which it connects to a querier and reports -// errors. On timeout or cancellation of its context it terminates. -type evaluator struct { - ctx context.Context - - startTimestamp int64 // Start time in milliseconds. - - endTimestamp int64 // End time in milliseconds. - interval int64 // Interval in milliseconds. - - logger log.Logger -} - -// errorf causes a panic with the input formatted into an error. -func (ev *evaluator) errorf(format string, args ...interface{}) { - ev.error(fmt.Errorf(format, args...)) -} - -// error causes a panic with the given error. -func (ev *evaluator) error(err error) { - panic(err) -} - -// recover is the handler that turns panics into returns from the top level of evaluation. -func (ev *evaluator) recover(errp *error) { - e := recover() - if e == nil { - return - } - if err, ok := e.(runtime.Error); ok { - // Print the stack trace but do not inhibit the running application. - buf := make([]byte, 64<<10) - buf = buf[:runtime.Stack(buf, false)] - - level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf)) - *errp = fmt.Errorf("unexpected error: %s", err) - } else { - *errp = e.(error) - } -} - -func (ev *evaluator) Eval(expr Expr) (v Value, err error) { - defer ev.recover(&err) - return ev.eval(expr), nil -} - -// EvalNodeHelper stores extra information and caches for evaluating a single node across steps. -type EvalNodeHelper struct { - // Evaluation timestamp. - ts int64 - // Vector that can be used for output. - out Vector - - // Caches. - // dropMetricName and label_*. - dmn map[uint64]labels.Labels - // signatureFunc. - sigf map[uint64]uint64 - // funcHistogramQuantile. - signatureToMetricWithBuckets map[uint64]*metricWithBuckets - // label_replace. - regex *regexp.Regexp - - // For binary vector matching. - rightSigs map[uint64]Sample - matchedSigs map[uint64]map[uint64]struct{} - resultMetric map[uint64]labels.Labels -} - -// dropMetricName is a cached version of dropMetricName. -func (enh *EvalNodeHelper) dropMetricName(l labels.Labels) labels.Labels { - if enh.dmn == nil { - enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) - } - h := l.Hash() - ret, ok := enh.dmn[h] - if ok { - return ret - } - ret = dropMetricName(l) - enh.dmn[h] = ret - return ret -} - -// signatureFunc is a cached version of signatureFunc. -func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { - if enh.sigf == nil { - enh.sigf = make(map[uint64]uint64, len(enh.out)) - } - f := signatureFunc(on, names...) - return func(l labels.Labels) uint64 { - h := l.Hash() - ret, ok := enh.sigf[h] - if ok { - return ret - } - ret = f(l) - enh.sigf[h] = ret - return ret - } -} - -// rangeEval evaluates the given expressions, and then for each step calls -// the given function with the values computed for each expression at that -// step. The return value is the combination into time series of of all the -// function call results. -func (ev *evaluator) rangeEval(f func([]Value, *EvalNodeHelper) Vector, exprs ...Expr) Matrix { - numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 - matrixes := make([]Matrix, len(exprs)) - origMatrixes := make([]Matrix, len(exprs)) - for i, e := range exprs { - // Functions will take string arguments from the expressions, not the values. - if e != nil && e.Type() != ValueTypeString { - matrixes[i] = ev.eval(e).(Matrix) - - // Keep a copy of the original point slices so that they - // can be returned to the pool. - origMatrixes[i] = make(Matrix, len(matrixes[i])) - copy(origMatrixes[i], matrixes[i]) - } - } - - vectors := make([]Vector, len(exprs)) // Input vectors for the function. - args := make([]Value, len(exprs)) // Argument to function. - // Create an output vector that is as big as the input matrix with - // the most time series. - biggestLen := 1 - for i := range exprs { - vectors[i] = make(Vector, 0, len(matrixes[i])) - if len(matrixes[i]) > biggestLen { - biggestLen = len(matrixes[i]) - } - } - enh := &EvalNodeHelper{out: make(Vector, 0, biggestLen)} - seriess := make(map[uint64]Series, biggestLen) // Output series by series hash. - for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - // Gather input vectors for this timestamp. - for i := range exprs { - vectors[i] = vectors[i][:0] - for si, series := range matrixes[i] { - for _, point := range series.Points { - if point.T == ts { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point}) - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Points = series.Points[1:] - } - break - } - } - args[i] = vectors[i] - } - // Make the function call. - enh.ts = ts - result := f(args, enh) - enh.out = result[:0] // Reuse result vector. - // If this could be an instant query, shortcut so as not to change sort order. - if ev.endTimestamp == ev.startTimestamp { - mat := make(Matrix, len(result)) - for i, s := range result { - s.Point.T = ts - mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} - } - return mat - } - // Add samples in output vector to output series. - for _, sample := range result { - h := sample.Metric.Hash() - ss, ok := seriess[h] - if !ok { - ss = Series{ - Metric: sample.Metric, - Points: getPointSlice(numSteps), - } - } - sample.Point.T = ts - ss.Points = append(ss.Points, sample.Point) - seriess[h] = ss - } - } - // Reuse the original point slices. - for _, m := range origMatrixes { - for _, s := range m { - putPointSlice(s.Points) - } - } - // Assemble the output matrix. - mat := make(Matrix, 0, len(seriess)) - for _, ss := range seriess { - mat = append(mat, ss) - } - return mat -} - -// eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr Expr) Value { - // This is the top-level evaluation method. - // Thus, we check for timeout/cancellation here. - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { - ev.error(err) - } - numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 - - switch e := expr.(type) { - case *AggregateExpr: - if s, ok := e.Param.(*StringLiteral); ok { - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh) - }, e.Expr) - } - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - var param float64 - if e.Param != nil { - param = v[0].(Vector)[0].V - } - return ev.aggregation(e.Op, e.Grouping, e.Without, param, v[1].(Vector), enh) - }, e.Param, e.Expr) - - case *Call: - if e.Func.Name == "timestamp" { - // Matrix evaluation always returns the evaluation time, - // so this function needs special handling when given - // a vector selector. - vs, ok := e.Args[0].(*VectorSelector) - if ok { - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return e.Func.Call([]Value{ev.vectorSelector(vs, enh.ts)}, e.Args, enh) - }) - } - } - // Check if the function has a matrix argument. - var matrixArgIndex int - var matrixArg bool - for i, a := range e.Args { - _, ok := a.(*MatrixSelector) - if ok { - matrixArgIndex = i - matrixArg = true - break - } - } - if !matrixArg { - // Does not have a matrix argument. - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return e.Func.Call(v, e.Args, enh) - }, e.Args...) - } - - inArgs := make([]Value, len(e.Args)) - // Evaluate any non-matrix arguments. - otherArgs := make([]Matrix, len(e.Args)) - otherInArgs := make([]Vector, len(e.Args)) - for i, e := range e.Args { - if i != matrixArgIndex { - otherArgs[i] = ev.eval(e).(Matrix) - otherInArgs[i] = Vector{Sample{}} - inArgs[i] = otherInArgs[i] - } - } - - sel := e.Args[matrixArgIndex].(*MatrixSelector) - mat := make(Matrix, 0, len(sel.series)) // Output matrix. - offset := durationMilliseconds(sel.Offset) - selRange := durationMilliseconds(sel.Range) - stepRange := selRange - if stepRange > ev.interval { - stepRange = ev.interval - } - // Reuse objects across steps to save memory allocations. - points := getPointSlice(16) - inMatrix := make(Matrix, 1) - inArgs[matrixArgIndex] = inMatrix - enh := &EvalNodeHelper{out: make(Vector, 0, 1)} - // Process all the calls for one time series at a time. - it := storage.NewBuffer(selRange) - for i, s := range sel.series { - points = points[:0] - it.Reset(s.Iterator()) - ss := Series{ - // For all range vector functions, the only change to the - // output labels is dropping the metric name so just do - // it once here. - Metric: dropMetricName(sel.series[i].Labels()), - Points: getPointSlice(numSteps), - } - inMatrix[0].Metric = sel.series[i].Labels() - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { - step++ - // Set the non-matrix arguments. - // They are scalar, so it is safe to use the step number - // when looking up the argument, as there will be no gaps. - for j := range e.Args { - if j != matrixArgIndex { - otherInArgs[j][0].V = otherArgs[j][0].Points[step].V - } - } - maxt := ts - offset - mint := maxt - selRange - // Evaluate the matrix selector for this series for this step. - points = ev.matrixIterSlice(it, mint, maxt, points) - if len(points) == 0 { - continue - } - inMatrix[0].Points = points - enh.ts = ts - // Make the function call. - outVec := e.Func.Call(inArgs, e.Args, enh) - enh.out = outVec[:0] - if len(outVec) > 0 { - ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts}) - } - // Only buffer stepRange milliseconds from the second step on. - it.ReduceDelta(stepRange) - } - if len(ss.Points) > 0 { - mat = append(mat, ss) - } - } - putPointSlice(points) - return mat - - case *ParenExpr: - return ev.eval(e.Expr) - - case *UnaryExpr: - mat := ev.eval(e.Expr).(Matrix) - if e.Op == itemSUB { - for i := range mat { - mat[i].Metric = dropMetricName(mat[i].Metric) - for j := range mat[i].Points { - mat[i].Points[j].V = -mat[i].Points[j].V - } - } - } - return mat - - case *BinaryExpr: - switch lt, rt := e.LHS.Type(), e.RHS.Type(); { - case lt == ValueTypeScalar && rt == ValueTypeScalar: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) - return append(enh.out, Sample{Point: Point{V: val}}) - }, e.LHS, e.RHS) - case lt == ValueTypeVector && rt == ValueTypeVector: - switch e.Op { - case itemLAND: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) - }, e.LHS, e.RHS) - case itemLOR: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) - }, e.LHS, e.RHS) - case itemLUnless: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) - }, e.LHS, e.RHS) - default: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh) - }, e.LHS, e.RHS) - } - - case lt == ValueTypeVector && rt == ValueTypeScalar: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh) - }, e.LHS, e.RHS) - - case lt == ValueTypeScalar && rt == ValueTypeVector: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh) - }, e.LHS, e.RHS) - } - - case *NumberLiteral: - return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { - return append(enh.out, Sample{Point: Point{V: e.Val}}) - }) - - case *VectorSelector: - mat := make(Matrix, 0, len(e.series)) - it := storage.NewBuffer(durationMilliseconds(LookbackDelta)) - for i, s := range e.series { - it.Reset(s.Iterator()) - ss := Series{ - Metric: e.series[i].Labels(), - Points: getPointSlice(numSteps), - } - - for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - _, v, ok := ev.vectorSelectorSingle(it, e, ts) - if ok { - ss.Points = append(ss.Points, Point{V: v, T: ts}) - } - } - - if len(ss.Points) > 0 { - mat = append(mat, ss) - } - } - return mat - - case *MatrixSelector: - if ev.startTimestamp != ev.endTimestamp { - panic(fmt.Errorf("cannot do range evaluation of matrix selector")) - } - return ev.matrixSelector(e) - } - - panic(fmt.Errorf("unhandled expression of type: %T", expr)) -} - -// vectorSelector evaluates a *VectorSelector expression. -func (ev *evaluator) vectorSelector(node *VectorSelector, ts int64) Vector { - var ( - vec = make(Vector, 0, len(node.series)) - ) - - it := storage.NewBuffer(durationMilliseconds(LookbackDelta)) - for i, s := range node.series { - it.Reset(s.Iterator()) - - t, v, ok := ev.vectorSelectorSingle(it, node, ts) - if ok { - vec = append(vec, Sample{ - Metric: node.series[i].Labels(), - Point: Point{V: v, T: t}, - }) - } - - } - return vec -} - -// vectorSelectorSingle evaluates a instant vector for the iterator of one time series. -func (ev *evaluator) vectorSelectorSingle(it *storage.BufferedSeriesIterator, node *VectorSelector, ts int64) (int64, float64, bool) { - refTime := ts - durationMilliseconds(node.Offset) - var t int64 - var v float64 - - ok := it.Seek(refTime) - if !ok { - if it.Err() != nil { - ev.error(it.Err()) - } - } - - if ok { - t, v = it.At() - } - - if !ok || t > refTime { - t, v, ok = it.PeekBack(1) - if !ok || t < refTime-durationMilliseconds(LookbackDelta) { - return 0, 0, false - } - } - if value.IsStaleNaN(v) { - return 0, 0, false - } - return t, v, true -} - -var pointPool = sync.Pool{} - -func getPointSlice(sz int) []Point { - p := pointPool.Get() - if p != nil { - return p.([]Point) - } - return make([]Point, 0, sz) -} - -func putPointSlice(p []Point) { - pointPool.Put(p[:0]) -} - -// matrixSelector evaluates a *MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *MatrixSelector) Matrix { - var ( - offset = durationMilliseconds(node.Offset) - maxt = ev.startTimestamp - offset - mint = maxt - durationMilliseconds(node.Range) - matrix = make(Matrix, 0, len(node.series)) - ) - - it := storage.NewBuffer(durationMilliseconds(node.Range)) - for i, s := range node.series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { - ev.error(err) - } - it.Reset(s.Iterator()) - ss := Series{ - Metric: node.series[i].Labels(), - } - - ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) - - if len(ss.Points) > 0 { - matrix = append(matrix, ss) - } else { - putPointSlice(ss.Points) - } - } - return matrix -} - -// matrixIterSlice populates a matrix vector covering the requested range for a -// single time series, with points retrieved from an iterator. -// -// As an optimization, the matrix vector may already contain points of the same -// time series from the evaluation of an earlier step (with lower mint and maxt -// values). Any such points falling before mint are discarded; points that fall -// into the [mint, maxt] range are retained; only points with later timestamps -// are populated from the iterator. -func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Point) []Point { - if len(out) > 0 && out[len(out)-1].T >= mint { - // There is an overlap between previous and current ranges, retain common - // points. In most such cases: - // (a) the overlap is significantly larger than the eval step; and/or - // (b) the number of samples is relatively small. - // so a linear search will be as fast as a binary search. - var drop int - for drop = 0; out[drop].T < mint; drop++ { - } - copy(out, out[drop:]) - out = out[:len(out)-drop] - // Only append points with timestamps after the last timestamp we have. - mint = out[len(out)-1].T + 1 - } else { - out = out[:0] - } - - ok := it.Seek(maxt) - if !ok { - if it.Err() != nil { - ev.error(it.Err()) - } - } - - buf := it.Buffer() - for buf.Next() { - t, v := buf.At() - if value.IsStaleNaN(v) { - continue - } - // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { - out = append(out, Point{T: t, V: v}) - } - } - // The seeked sample might also be in the range. - if ok { - t, v := it.At() - if t == maxt && !value.IsStaleNaN(v) { - out = append(out, Point{T: t, V: v}) - } - } - return out -} - -func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *VectorMatching, enh *EvalNodeHelper) Vector { - if matching.Card != CardManyToMany { - panic("set operations must only use many-to-many matching") - } - sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) - - // The set of signatures for the right-hand side Vector. - rightSigs := map[uint64]struct{}{} - // Add all rhs samples to a map so we can easily find matches later. - for _, rs := range rhs { - rightSigs[sigf(rs.Metric)] = struct{}{} - } - - for _, ls := range lhs { - // If there's a matching entry in the right-hand side Vector, add the sample. - if _, ok := rightSigs[sigf(ls.Metric)]; ok { - enh.out = append(enh.out, ls) - } - } - return enh.out -} - -func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *VectorMatching, enh *EvalNodeHelper) Vector { - if matching.Card != CardManyToMany { - panic("set operations must only use many-to-many matching") - } - sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) - - leftSigs := map[uint64]struct{}{} - // Add everything from the left-hand-side Vector. - for _, ls := range lhs { - leftSigs[sigf(ls.Metric)] = struct{}{} - enh.out = append(enh.out, ls) - } - // Add all right-hand side elements which have not been added from the left-hand side. - for _, rs := range rhs { - if _, ok := leftSigs[sigf(rs.Metric)]; !ok { - enh.out = append(enh.out, rs) - } - } - return enh.out -} - -func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *VectorMatching, enh *EvalNodeHelper) Vector { - if matching.Card != CardManyToMany { - panic("set operations must only use many-to-many matching") - } - sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) - - rightSigs := map[uint64]struct{}{} - for _, rs := range rhs { - rightSigs[sigf(rs.Metric)] = struct{}{} - } - - for _, ls := range lhs { - if _, ok := rightSigs[sigf(ls.Metric)]; !ok { - enh.out = append(enh.out, ls) - } - } - return enh.out -} - -// VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op ItemType, lhs, rhs Vector, matching *VectorMatching, returnBool bool, enh *EvalNodeHelper) Vector { - if matching.Card == CardManyToMany { - panic("many-to-many only allowed for set operators") - } - sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) - - // The control flow below handles one-to-one or many-to-one matching. - // For one-to-many, swap sidedness and account for the swap when calculating - // values. - if matching.Card == CardOneToMany { - lhs, rhs = rhs, lhs - } - - // All samples from the rhs hashed by the matching label/values. - if enh.rightSigs == nil { - enh.rightSigs = make(map[uint64]Sample, len(enh.out)) - } else { - for k := range enh.rightSigs { - delete(enh.rightSigs, k) - } - } - rightSigs := enh.rightSigs - - // Add all rhs samples to a map so we can easily find matches later. - for _, rs := range rhs { - sig := sigf(rs.Metric) - // The rhs is guaranteed to be the 'one' side. Having multiple samples - // with the same signature means that the matching is many-to-many. - if _, found := rightSigs[sig]; found { - // Many-to-many matching not allowed. - ev.errorf("many-to-many matching not allowed: matching labels must be unique on one side") - } - rightSigs[sig] = rs - } - - // Tracks the match-signature. For one-to-one operations the value is nil. For many-to-one - // the value is a set of signatures to detect duplicated result elements. - if enh.matchedSigs == nil { - enh.matchedSigs = make(map[uint64]map[uint64]struct{}, len(rightSigs)) - } else { - for k := range enh.matchedSigs { - delete(enh.matchedSigs, k) - } - } - matchedSigs := enh.matchedSigs - - // For all lhs samples find a respective rhs sample and perform - // the binary operation. - for _, ls := range lhs { - sig := sigf(ls.Metric) - - rs, found := rightSigs[sig] // Look for a match in the rhs Vector. - if !found { - continue - } - - // Account for potentially swapped sidedness. - vl, vr := ls.V, rs.V - if matching.Card == CardOneToMany { - vl, vr = vr, vl - } - value, keep := vectorElemBinop(op, vl, vr) - if returnBool { - if keep { - value = 1.0 - } else { - value = 0.0 - } - } else if !keep { - continue - } - metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) - - insertedSigs, exists := matchedSigs[sig] - if matching.Card == CardOneToOne { - if exists { - ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)") - } - matchedSigs[sig] = nil // Set existence to true. - } else { - // In many-to-one matching the grouping labels have to ensure a unique metric - // for the result Vector. Check whether those labels have already been added for - // the same matching labels. - insertSig := metric.Hash() - - if !exists { - insertedSigs = map[uint64]struct{}{} - matchedSigs[sig] = insertedSigs - } else if _, duplicate := insertedSigs[insertSig]; duplicate { - ev.errorf("multiple matches for labels: grouping labels must ensure unique matches") - } - insertedSigs[insertSig] = struct{}{} - } - - enh.out = append(enh.out, Sample{ - Metric: metric, - Point: Point{V: value}, - }) - } - return enh.out -} - -// signatureFunc returns a function that calculates the signature for a metric -// ignoring the provided labels. If on, then the given labels are only used instead. -func signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { - sort.Strings(names) - if on { - return func(lset labels.Labels) uint64 { - h, _ := lset.HashForLabels(make([]byte, 0, 1024), names...) - return h - } - } - return func(lset labels.Labels) uint64 { - h, _ := lset.HashWithoutLabels(make([]byte, 0, 1024), names...) - return h - } -} - -// resultMetric returns the metric for the given sample(s) based on the Vector -// binary operation and the matching options. -func resultMetric(lhs, rhs labels.Labels, op ItemType, matching *VectorMatching, enh *EvalNodeHelper) labels.Labels { - if enh.resultMetric == nil { - enh.resultMetric = make(map[uint64]labels.Labels, len(enh.out)) - } - // op and matching are always the same for a given node, so - // there's no need to include them in the hash key. - // If the lhs and rhs are the same then the xor would be 0, - // so add in one side to protect against that. - lh := lhs.Hash() - h := (lh ^ rhs.Hash()) + lh - if ret, ok := enh.resultMetric[h]; ok { - return ret - } - - lb := labels.NewBuilder(lhs) - - if shouldDropMetricName(op) { - lb.Del(labels.MetricName) - } - - if matching.Card == CardOneToOne { - if matching.On { - Outer: - for _, l := range lhs { - for _, n := range matching.MatchingLabels { - if l.Name == n { - continue Outer - } - } - lb.Del(l.Name) - } - } else { - lb.Del(matching.MatchingLabels...) - } - } - for _, ln := range matching.Include { - // Included labels from the `group_x` modifier are taken from the "one"-side. - if v := rhs.Get(ln); v != "" { - lb.Set(ln, v) - } else { - lb.Del(ln) - } - } - - ret := lb.Labels() - enh.resultMetric[h] = ret - return ret -} - -// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { - for _, lhsSample := range lhs { - lv, rv := lhsSample.V, rhs.V - // lhs always contains the Vector. If the original position was different - // swap for calculating the value. - if swap { - lv, rv = rv, lv - } - value, keep := vectorElemBinop(op, lv, rv) - if returnBool { - if keep { - value = 1.0 - } else { - value = 0.0 - } - keep = true - } - if keep { - lhsSample.V = value - if shouldDropMetricName(op) || returnBool { - lhsSample.Metric = enh.dropMetricName(lhsSample.Metric) - } - enh.out = append(enh.out, lhsSample) - } - } - return enh.out -} - -func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels() -} - -// scalarBinop evaluates a binary operation between two Scalars. -func scalarBinop(op ItemType, lhs, rhs float64) float64 { - switch op { - case itemADD: - return lhs + rhs - case itemSUB: - return lhs - rhs - case itemMUL: - return lhs * rhs - case itemDIV: - return lhs / rhs - case itemPOW: - return math.Pow(lhs, rhs) - case itemMOD: - return math.Mod(lhs, rhs) - case itemEQL: - return btos(lhs == rhs) - case itemNEQ: - return btos(lhs != rhs) - case itemGTR: - return btos(lhs > rhs) - case itemLSS: - return btos(lhs < rhs) - case itemGTE: - return btos(lhs >= rhs) - case itemLTE: - return btos(lhs <= rhs) - } - panic(fmt.Errorf("operator %q not allowed for Scalar operations", op)) -} - -// vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op ItemType, lhs, rhs float64) (float64, bool) { - switch op { - case itemADD: - return lhs + rhs, true - case itemSUB: - return lhs - rhs, true - case itemMUL: - return lhs * rhs, true - case itemDIV: - return lhs / rhs, true - case itemPOW: - return math.Pow(lhs, rhs), true - case itemMOD: - return math.Mod(lhs, rhs), true - case itemEQL: - return lhs, lhs == rhs - case itemNEQ: - return lhs, lhs != rhs - case itemGTR: - return lhs, lhs > rhs - case itemLSS: - return lhs, lhs < rhs - case itemGTE: - return lhs, lhs >= rhs - case itemLTE: - return lhs, lhs <= rhs - } - panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) -} - -// intersection returns the metric of common label/value pairs of two input metrics. -func intersection(ls1, ls2 labels.Labels) labels.Labels { - res := make(labels.Labels, 0, 5) - - for _, l1 := range ls1 { - for _, l2 := range ls2 { - if l1.Name == l2.Name && l1.Value == l2.Value { - res = append(res, l1) - continue - } - } - } - return res -} - -type groupedAggregation struct { - labels labels.Labels - value float64 - valuesSquaredSum float64 - groupCount int - heap vectorByValueHeap - reverseHeap vectorByReverseValueHeap -} - -// aggregation evaluates an aggregation operation on a Vector. -func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, param interface{}, vec Vector, enh *EvalNodeHelper) Vector { - - result := map[uint64]*groupedAggregation{} - var k int64 - if op == itemTopK || op == itemBottomK { - f := param.(float64) - if !convertibleToInt64(f) { - ev.errorf("Scalar value %v overflows int64", f) - } - k = int64(f) - if k < 1 { - return Vector{} - } - } - var q float64 - if op == itemQuantile { - q = param.(float64) - } - var valueLabel string - if op == itemCountValues { - valueLabel = param.(string) - if !without { - grouping = append(grouping, valueLabel) - } - } - - sort.Strings(grouping) - buf := make([]byte, 0, 1024) - for _, s := range vec { - metric := s.Metric - - if op == itemCountValues { - lb := labels.NewBuilder(metric) - lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = lb.Labels() - } - - var ( - groupingKey uint64 - ) - if without { - groupingKey, buf = metric.HashWithoutLabels(buf, grouping...) - } else { - groupingKey, buf = metric.HashForLabels(buf, grouping...) - } - - group, ok := result[groupingKey] - // Add a new group if it doesn't exist. - if !ok { - var m labels.Labels - - if without { - lb := labels.NewBuilder(metric) - lb.Del(grouping...) - lb.Del(labels.MetricName) - m = lb.Labels() - } else { - m = make(labels.Labels, 0, len(grouping)) - for _, l := range metric { - for _, n := range grouping { - if l.Name == n { - m = append(m, l) - break - } - } - } - sort.Sort(m) - } - result[groupingKey] = &groupedAggregation{ - labels: m, - value: s.V, - valuesSquaredSum: s.V * s.V, - groupCount: 1, - } - inputVecLen := int64(len(vec)) - resultSize := k - if k > inputVecLen { - resultSize = inputVecLen - } - if op == itemTopK || op == itemQuantile { - result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize) - heap.Push(&result[groupingKey].heap, &Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - }) - } else if op == itemBottomK { - result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize) - heap.Push(&result[groupingKey].reverseHeap, &Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - }) - } - continue - } - - switch op { - case itemSum: - group.value += s.V - - case itemAvg: - group.value += s.V - group.groupCount++ - - case itemMax: - if group.value < s.V || math.IsNaN(group.value) { - group.value = s.V - } - - case itemMin: - if group.value > s.V || math.IsNaN(group.value) { - group.value = s.V - } - - case itemCount, itemCountValues: - group.groupCount++ - - case itemStdvar, itemStddev: - group.value += s.V - group.valuesSquaredSum += s.V * s.V - group.groupCount++ - - case itemTopK: - if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) { - if int64(len(group.heap)) == k { - heap.Pop(&group.heap) - } - heap.Push(&group.heap, &Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - }) - } - - case itemBottomK: - if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) { - if int64(len(group.reverseHeap)) == k { - heap.Pop(&group.reverseHeap) - } - heap.Push(&group.reverseHeap, &Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - }) - } - - case itemQuantile: - group.heap = append(group.heap, s) - - default: - panic(fmt.Errorf("expected aggregation operator but got %q", op)) - } - } - - // Construct the result Vector from the aggregated groups. - for _, aggr := range result { - switch op { - case itemAvg: - aggr.value = aggr.value / float64(aggr.groupCount) - - case itemCount, itemCountValues: - aggr.value = float64(aggr.groupCount) - - case itemStdvar: - avg := aggr.value / float64(aggr.groupCount) - aggr.value = aggr.valuesSquaredSum/float64(aggr.groupCount) - avg*avg - - case itemStddev: - avg := aggr.value / float64(aggr.groupCount) - aggr.value = math.Sqrt(aggr.valuesSquaredSum/float64(aggr.groupCount) - avg*avg) - - case itemTopK: - // The heap keeps the lowest value on top, so reverse it. - sort.Sort(sort.Reverse(aggr.heap)) - for _, v := range aggr.heap { - enh.out = append(enh.out, Sample{ - Metric: v.Metric, - Point: Point{V: v.V}, - }) - } - continue // Bypass default append. - - case itemBottomK: - // The heap keeps the lowest value on top, so reverse it. - sort.Sort(sort.Reverse(aggr.reverseHeap)) - for _, v := range aggr.reverseHeap { - enh.out = append(enh.out, Sample{ - Metric: v.Metric, - Point: Point{V: v.V}, - }) - } - continue // Bypass default append. - - case itemQuantile: - aggr.value = quantile(q, aggr.heap) - - default: - // For other aggregations, we already have the right value. - } - - enh.out = append(enh.out, Sample{ - Metric: aggr.labels, - Point: Point{V: aggr.value}, - }) - } - return enh.out -} - -// btos returns 1 if b is true, 0 otherwise. -func btos(b bool) float64 { - if b { - return 1 - } - return 0 -} - -// shouldDropMetricName returns whether the metric name should be dropped in the -// result of the op operation. -func shouldDropMetricName(op ItemType) bool { - switch op { - case itemADD, itemSUB, itemDIV, itemMUL, itemMOD: - return true - default: - return false - } -} - -// LookbackDelta determines the time since the last sample after which a time -// series is considered stale. -var LookbackDelta = 5 * time.Minute - -// A queryGate controls the maximum number of concurrently running and waiting queries. -type queryGate struct { - ch chan struct{} -} - -// newQueryGate returns a query gate that limits the number of queries -// being concurrently executed. -func newQueryGate(length int) *queryGate { - return &queryGate{ - ch: make(chan struct{}, length), - } -} - -// Start blocks until the gate has a free spot or the context is done. -func (g *queryGate) Start(ctx context.Context) error { - select { - case <-ctx.Done(): - return contextDone(ctx, "query queue") - case g.ch <- struct{}{}: - return nil - } -} - -// Done releases a single spot in the gate. -func (g *queryGate) Done() { - select { - case <-g.ch: - default: - panic("engine.queryGate.Done: more operations done than started") - } -} - -// documentedType returns the internal type to the equivalent -// user facing terminology as defined in the documentation. -func documentedType(t ValueType) string { - switch t { - case "vector": - return "instant vector" - case "matrix": - return "range vector" - default: - return string(t) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/functions.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/functions.go deleted file mode 100644 index d5c53b9f5..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/functions.go +++ /dev/null @@ -1,1269 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promql - -import ( - "fmt" - "math" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" -) - -// Function represents a function of the expression language and is -// used by function nodes. -type Function struct { - Name string - ArgTypes []ValueType - Variadic int - ReturnType ValueType - - // vals is a list of the evaluated arguments for the function call. - // For range vectors it will be a Matrix with one series, instant vectors a - // Vector, scalars a Vector with one series whose value is the scalar - // value,and nil for strings. - // args are the original arguments to the function, where you can access - // matrixSelectors, vectorSelectors, and StringLiterals. - // enh.out is a pre-allocated empty vector that you may use to accumulate - // output before returning it. The vectors in vals should not be returned.a - // Range vector functions need only return a vector with the right value, - // the metric and timestamp are not neded. - // Instant vector functions need only return a vector with the right values and - // metrics, the timestamp are not needed. - // Scalar results should be returned as the value of a sample in a Vector. - Call func(vals []Value, args Expressions, enh *EvalNodeHelper) Vector -} - -// === time() float64 === -func funcTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: float64(enh.ts) / 1000, - }}} -} - -// extrapolatedRate is a utility function for rate/increase/delta. -// It calculates the rate (allowing for counter resets if isCounter is true), -// extrapolates if the first/last sample is close to the boundary, and returns -// the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []Value, args Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { - ms := args[0].(*MatrixSelector) - - var ( - matrix = vals[0].(Matrix) - rangeStart = enh.ts - durationMilliseconds(ms.Range+ms.Offset) - rangeEnd = enh.ts - durationMilliseconds(ms.Offset) - ) - - for _, samples := range matrix { - // No sense in trying to compute a rate without at least two points. Drop - // this Vector element. - if len(samples.Points) < 2 { - continue - } - var ( - counterCorrection float64 - lastValue float64 - ) - for _, sample := range samples.Points { - if isCounter && sample.V < lastValue { - counterCorrection += lastValue - } - lastValue = sample.V - } - resultValue := lastValue - samples.Points[0].V + counterCorrection - - // Duration between first/last samples and boundary of range. - durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 - durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 - - sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 - averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) - - if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { - // Counters cannot be negative. If we have any slope at - // all (i.e. resultValue went up), we can extrapolate - // the zero point of the counter. If the duration to the - // zero point is shorter than the durationToStart, we - // take the zero point as the start of the series, - // thereby avoiding extrapolation to negative counter - // values. - durationToZero := sampledInterval * (samples.Points[0].V / resultValue) - if durationToZero < durationToStart { - durationToStart = durationToZero - } - } - - // If the first/last samples are close to the boundaries of the range, - // extrapolate the result. This is as we expect that another sample - // will exist given the spacing between samples we've seen thus far, - // with an allowance for noise. - extrapolationThreshold := averageDurationBetweenSamples * 1.1 - extrapolateToInterval := sampledInterval - - if durationToStart < extrapolationThreshold { - extrapolateToInterval += durationToStart - } else { - extrapolateToInterval += averageDurationBetweenSamples / 2 - } - if durationToEnd < extrapolationThreshold { - extrapolateToInterval += durationToEnd - } else { - extrapolateToInterval += averageDurationBetweenSamples / 2 - } - resultValue = resultValue * (extrapolateToInterval / sampledInterval) - if isRate { - resultValue = resultValue / ms.Range.Seconds() - } - - enh.out = append(enh.out, Sample{ - Point: Point{V: resultValue}, - }) - } - return enh.out -} - -// === delta(Matrix ValueTypeMatrix) Vector === -func funcDelta(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return extrapolatedRate(vals, args, enh, false, false) -} - -// === rate(node ValueTypeMatrix) Vector === -func funcRate(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return extrapolatedRate(vals, args, enh, true, true) -} - -// === increase(node ValueTypeMatrix) Vector === -func funcIncrease(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return extrapolatedRate(vals, args, enh, true, false) -} - -// === irate(node ValueTypeMatrix) Vector === -func funcIrate(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return instantValue(vals, enh.out, true) -} - -// === idelta(node model.ValMatric) Vector === -func funcIdelta(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return instantValue(vals, enh.out, false) -} - -func instantValue(vals []Value, out Vector, isRate bool) Vector { - for _, samples := range vals[0].(Matrix) { - // No sense in trying to compute a rate without at least two points. Drop - // this Vector element. - if len(samples.Points) < 2 { - continue - } - - lastSample := samples.Points[len(samples.Points)-1] - previousSample := samples.Points[len(samples.Points)-2] - - var resultValue float64 - if isRate && lastSample.V < previousSample.V { - // Counter reset. - resultValue = lastSample.V - } else { - resultValue = lastSample.V - previousSample.V - } - - sampledInterval := lastSample.T - previousSample.T - if sampledInterval == 0 { - // Avoid dividing by 0. - continue - } - - if isRate { - // Convert to per-second. - resultValue /= float64(sampledInterval) / 1000 - } - - out = append(out, Sample{ - Point: Point{V: resultValue}, - }) - } - return out -} - -// Calculate the trend value at the given index i in raw data d. -// This is somewhat analogous to the slope of the trend at the given index. -// The argument "s" is the set of computed smoothed values. -// The argument "b" is the set of computed trend factors. -// The argument "d" is the set of raw input values. -func calcTrendValue(i int, sf, tf, s0, s1, b float64) float64 { - if i == 0 { - return b - } - - x := tf * (s1 - s0) - y := (1 - tf) * b - - return x + y -} - -// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data. -// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current -// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects -// how trends in historical data will affect the current data. A higher trend factor increases the influence. -// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - mat := vals[0].(Matrix) - - // The smoothing factor argument. - sf := vals[1].(Vector)[0].V - - // The trend factor argument. - tf := vals[2].(Vector)[0].V - - // Sanity check the input. - if sf <= 0 || sf >= 1 { - panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) - } - if tf <= 0 || tf >= 1 { - panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) - } - - var l int - for _, samples := range mat { - l = len(samples.Points) - - // Can't do the smoothing operation with less than two points. - if l < 2 { - continue - } - - var s0, s1, b float64 - // Set initial values. - s1 = samples.Points[0].V - b = samples.Points[1].V - samples.Points[0].V - - // Run the smoothing operation. - var x, y float64 - for i := 1; i < l; i++ { - - // Scale the raw value against the smoothing factor. - x = sf * samples.Points[i].V - - // Scale the last smoothed value with the trend at this point. - b = calcTrendValue(i-1, sf, tf, s0, s1, b) - y = (1 - sf) * (s1 + b) - - s0, s1 = s1, x+y - } - - enh.out = append(enh.out, Sample{ - Point: Point{V: s1}, - }) - } - - return enh.out -} - -// === sort(node ValueTypeVector) Vector === -func funcSort(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - // NaN should sort to the bottom, so take descending sort with NaN first and - // reverse it. - byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) - sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) -} - -// === sortDesc(node ValueTypeVector) Vector === -func funcSortDesc(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - // NaN should sort to the bottom, so take ascending sort with NaN first and - // reverse it. - byValueSorter := vectorByValueHeap(vals[0].(Vector)) - sort.Sort(sort.Reverse(byValueSorter)) - return Vector(byValueSorter) -} - -// === clamp_max(Vector ValueTypeVector, max Scalar) Vector === -func funcClampMax(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - vec := vals[0].(Vector) - max := vals[1].(Vector)[0].Point.V - for _, el := range vec { - enh.out = append(enh.out, Sample{ - Metric: enh.dropMetricName(el.Metric), - Point: Point{V: math.Min(max, el.V)}, - }) - } - return enh.out -} - -// === clamp_min(Vector ValueTypeVector, min Scalar) Vector === -func funcClampMin(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V - for _, el := range vec { - enh.out = append(enh.out, Sample{ - Metric: enh.dropMetricName(el.Metric), - Point: Point{V: math.Max(min, el.V)}, - }) - } - return enh.out -} - -// === round(Vector ValueTypeVector, toNearest=1 Scalar) Vector === -func funcRound(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - vec := vals[0].(Vector) - // round returns a number rounded to toNearest. - // Ties are solved by rounding up. - toNearest := float64(1) - if len(args) >= 2 { - toNearest = vals[1].(Vector)[0].Point.V - } - // Invert as it seems to cause fewer floating point accuracy issues. - toNearestInverse := 1.0 / toNearest - - for _, el := range vec { - v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse - enh.out = append(enh.out, Sample{ - Metric: enh.dropMetricName(el.Metric), - Point: Point{V: v}, - }) - } - return enh.out -} - -// === Scalar(node ValueTypeVector) Scalar === -func funcScalar(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - v := vals[0].(Vector) - if len(v) != 1 { - return append(enh.out, Sample{ - Point: Point{V: math.NaN()}, - }) - } - return append(enh.out, Sample{ - Point: Point{V: v[0].V}, - }) -} - -func aggrOverTime(vals []Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { - mat := vals[0].(Matrix) - - for _, el := range mat { - if len(el.Points) == 0 { - continue - } - - enh.out = append(enh.out, Sample{ - Point: Point{V: aggrFn(el.Points)}, - }) - } - return enh.out -} - -// === avg_over_time(Matrix ValueTypeMatrix) Vector === -func funcAvgOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - var sum float64 - for _, v := range values { - sum += v.V - } - return sum / float64(len(values)) - }) -} - -// === count_over_time(Matrix ValueTypeMatrix) Vector === -func funcCountOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - return float64(len(values)) - }) -} - -// === floor(Vector ValueTypeVector) Vector === -// === max_over_time(Matrix ValueTypeMatrix) Vector === -func funcMaxOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - max := math.Inf(-1) - for _, v := range values { - max = math.Max(max, v.V) - } - return max - }) -} - -// === min_over_time(Matrix ValueTypeMatrix) Vector === -func funcMinOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - min := math.Inf(1) - for _, v := range values { - min = math.Min(min, v.V) - } - return min - }) -} - -// === sum_over_time(Matrix ValueTypeMatrix) Vector === -func funcSumOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - var sum float64 - for _, v := range values { - sum += v.V - } - return sum - }) -} - -// === quantile_over_time(Matrix ValueTypeMatrix) Vector === -func funcQuantileOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V - mat := vals[1].(Matrix) - - for _, el := range mat { - if len(el.Points) == 0 { - continue - } - - values := make(vectorByValueHeap, 0, len(el.Points)) - for _, v := range el.Points { - values = append(values, Sample{Point: Point{V: v.V}}) - } - enh.out = append(enh.out, Sample{ - Point: Point{V: quantile(q, values)}, - }) - } - return enh.out -} - -// === stddev_over_time(Matrix ValueTypeMatrix) Vector === -func funcStddevOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - var sum, squaredSum, count float64 - for _, v := range values { - sum += v.V - squaredSum += v.V * v.V - count++ - } - avg := sum / count - return math.Sqrt(squaredSum/count - avg*avg) - }) -} - -// === stdvar_over_time(Matrix ValueTypeMatrix) Vector === -func funcStdvarOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - var sum, squaredSum, count float64 - for _, v := range values { - sum += v.V - squaredSum += v.V * v.V - count++ - } - avg := sum / count - return squaredSum/count - avg*avg - }) -} - -// === absent(Vector ValueTypeVector) Vector === -func funcAbsent(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - if len(vals[0].(Vector)) > 0 { - return enh.out - } - m := []labels.Label{} - - if vs, ok := args[0].(*VectorSelector); ok { - for _, ma := range vs.LabelMatchers { - if ma.Type == labels.MatchEqual && ma.Name != labels.MetricName { - m = append(m, labels.Label{Name: ma.Name, Value: ma.Value}) - } - } - } - return append(enh.out, - Sample{ - Metric: labels.New(m...), - Point: Point{V: 1}, - }) -} - -func simpleFunc(vals []Value, enh *EvalNodeHelper, f func(float64) float64) Vector { - for _, el := range vals[0].(Vector) { - enh.out = append(enh.out, Sample{ - Metric: enh.dropMetricName(el.Metric), - Point: Point{V: f(el.V)}, - }) - } - return enh.out -} - -// === abs(Vector ValueTypeVector) Vector === -func funcAbs(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Abs) -} - -// === ceil(Vector ValueTypeVector) Vector === -func funcCeil(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Ceil) -} - -// === floor(Vector ValueTypeVector) Vector === -func funcFloor(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Floor) -} - -// === exp(Vector ValueTypeVector) Vector === -func funcExp(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Exp) -} - -// === sqrt(Vector VectorNode) Vector === -func funcSqrt(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Sqrt) -} - -// === ln(Vector ValueTypeVector) Vector === -func funcLn(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log) -} - -// === log2(Vector ValueTypeVector) Vector === -func funcLog2(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log2) -} - -// === log10(Vector ValueTypeVector) Vector === -func funcLog10(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return simpleFunc(vals, enh, math.Log10) -} - -// === timestamp(Vector ValueTypeVector) Vector === -func funcTimestamp(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - vec := vals[0].(Vector) - for _, el := range vec { - enh.out = append(enh.out, Sample{ - Metric: enh.dropMetricName(el.Metric), - Point: Point{V: float64(el.T) / 1000}, - }) - } - return enh.out -} - -// linearRegression performs a least-square linear regression analysis on the -// provided SamplePairs. It returns the slope, and the intercept value at the -// provided time. -func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { - var ( - n float64 - sumX, sumY float64 - sumXY, sumX2 float64 - ) - for _, sample := range samples { - x := float64(sample.T-interceptTime) / 1e3 - n += 1.0 - sumY += sample.V - sumX += x - sumXY += x * sample.V - sumX2 += x * x - } - covXY := sumXY - sumX*sumY/n - varX := sumX2 - sumX*sumX/n - - slope = covXY / varX - intercept = sumY/n - slope*sumX/n - return slope, intercept -} - -// === deriv(node ValueTypeMatrix) Vector === -func funcDeriv(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - mat := vals[0].(Matrix) - - for _, samples := range mat { - // No sense in trying to compute a derivative without at least two points. - // Drop this Vector element. - if len(samples.Points) < 2 { - continue - } - - // We pass in an arbitrary timestamp that is near the values in use - // to avoid floating point accuracy issues, see - // https://github.com/prometheus/prometheus/issues/2674 - slope, _ := linearRegression(samples.Points, samples.Points[0].T) - enh.out = append(enh.out, Sample{ - Point: Point{V: slope}, - }) - } - return enh.out -} - -// === predict_linear(node ValueTypeMatrix, k ValueTypeScalar) Vector === -func funcPredictLinear(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - mat := vals[0].(Matrix) - duration := vals[1].(Vector)[0].V - - for _, samples := range mat { - // No sense in trying to predict anything without at least two points. - // Drop this Vector element. - if len(samples.Points) < 2 { - continue - } - slope, intercept := linearRegression(samples.Points, enh.ts) - - enh.out = append(enh.out, Sample{ - Point: Point{V: slope*duration + intercept}, - }) - } - return enh.out -} - -// === histogram_quantile(k ValueTypeScalar, Vector ValueTypeVector) Vector === -func funcHistogramQuantile(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V - inVec := vals[1].(Vector) - sigf := enh.signatureFunc(false, excludedLabels...) - - if enh.signatureToMetricWithBuckets == nil { - enh.signatureToMetricWithBuckets = map[uint64]*metricWithBuckets{} - } else { - for _, v := range enh.signatureToMetricWithBuckets { - v.buckets = v.buckets[:0] - } - } - for _, el := range inVec { - upperBound, err := strconv.ParseFloat( - el.Metric.Get(model.BucketLabel), 64, - ) - if err != nil { - // Oops, no bucket label or malformed label value. Skip. - // TODO(beorn7): Issue a warning somehow. - continue - } - hash := sigf(el.Metric) - - mb, ok := enh.signatureToMetricWithBuckets[hash] - if !ok { - el.Metric = labels.NewBuilder(el.Metric). - Del(labels.BucketLabel, labels.MetricName). - Labels() - - mb = &metricWithBuckets{el.Metric, nil} - enh.signatureToMetricWithBuckets[hash] = mb - } - mb.buckets = append(mb.buckets, bucket{upperBound, el.V}) - } - - for _, mb := range enh.signatureToMetricWithBuckets { - if len(mb.buckets) > 0 { - enh.out = append(enh.out, Sample{ - Metric: mb.metric, - Point: Point{V: bucketQuantile(q, mb.buckets)}, - }) - } - } - - return enh.out -} - -// === resets(Matrix ValueTypeMatrix) Vector === -func funcResets(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - in := vals[0].(Matrix) - - for _, samples := range in { - resets := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V - if current < prev { - resets++ - } - prev = current - } - - enh.out = append(enh.out, Sample{ - Point: Point{V: float64(resets)}, - }) - } - return enh.out -} - -// === changes(Matrix ValueTypeMatrix) Vector === -func funcChanges(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - in := vals[0].(Matrix) - - for _, samples := range in { - changes := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V - if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { - changes++ - } - prev = current - } - - enh.out = append(enh.out, Sample{ - Point: Point{V: float64(changes)}, - }) - } - return enh.out -} - -// === label_replace(Vector ValueTypeVector, dst_label, replacement, src_labelname, regex ValueTypeString) Vector === -func funcLabelReplace(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - var ( - vector = vals[0].(Vector) - dst = args[1].(*StringLiteral).Val - repl = args[2].(*StringLiteral).Val - src = args[3].(*StringLiteral).Val - regexStr = args[4].(*StringLiteral).Val - ) - - if enh.regex == nil { - var err error - enh.regex, err = regexp.Compile("^(?:" + regexStr + ")$") - if err != nil { - panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) - } - if !model.LabelNameRE.MatchString(dst) { - panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) - } - enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) - } - - outSet := make(map[uint64]struct{}, len(vector)) - for _, el := range vector { - h := el.Metric.Hash() - var outMetric labels.Labels - if l, ok := enh.dmn[h]; ok { - outMetric = l - } else { - srcVal := el.Metric.Get(src) - indexes := enh.regex.FindStringSubmatchIndex(srcVal) - if indexes == nil { - // If there is no match, no replacement should take place. - outMetric = el.Metric - enh.dmn[h] = outMetric - } else { - res := enh.regex.ExpandString([]byte{}, repl, srcVal, indexes) - - lb := labels.NewBuilder(el.Metric).Del(dst) - if len(res) > 0 { - lb.Set(dst, string(res)) - } - outMetric = lb.Labels() - enh.dmn[h] = outMetric - } - } - - outHash := outMetric.Hash() - if _, ok := outSet[outHash]; ok { - panic(fmt.Errorf("duplicated label set in output of label_replace(): %s", el.Metric)) - } else { - enh.out = append(enh.out, - Sample{ - Metric: outMetric, - Point: Point{V: el.Point.V}, - }) - outSet[outHash] = struct{}{} - } - } - return enh.out -} - -// === Vector(s Scalar) Vector === -func funcVector(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return append(enh.out, - Sample{ - Metric: labels.Labels{}, - Point: Point{V: vals[0].(Vector)[0].V}, - }) -} - -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === -func funcLabelJoin(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - var ( - vector = vals[0].(Vector) - dst = args[1].(*StringLiteral).Val - sep = args[2].(*StringLiteral).Val - srcLabels = make([]string, len(args)-3) - ) - - if enh.dmn == nil { - enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) - } - - for i := 3; i < len(args); i++ { - src := args[i].(*StringLiteral).Val - if !model.LabelName(src).IsValid() { - panic(fmt.Errorf("invalid source label name in label_join(): %s", src)) - } - srcLabels[i-3] = src - } - - if !model.LabelName(dst).IsValid() { - panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) - } - - outSet := make(map[uint64]struct{}, len(vector)) - srcVals := make([]string, len(srcLabels)) - for _, el := range vector { - h := el.Metric.Hash() - var outMetric labels.Labels - if l, ok := enh.dmn[h]; ok { - outMetric = l - } else { - - for i, src := range srcLabels { - srcVals[i] = el.Metric.Get(src) - } - - lb := labels.NewBuilder(el.Metric) - - strval := strings.Join(srcVals, sep) - if strval == "" { - lb.Del(dst) - } else { - lb.Set(dst, strval) - } - - outMetric = lb.Labels() - enh.dmn[h] = outMetric - } - outHash := outMetric.Hash() - - if _, exists := outSet[outHash]; exists { - panic(fmt.Errorf("duplicated label set in output of label_join(): %s", el.Metric)) - } else { - enh.out = append(enh.out, Sample{ - Metric: outMetric, - Point: Point{V: el.Point.V}, - }) - outSet[outHash] = struct{}{} - } - } - return enh.out -} - -// Common code for date related functions. -func dateWrapper(vals []Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { - if len(vals) == 0 { - return append(enh.out, - Sample{ - Metric: labels.Labels{}, - Point: Point{V: f(time.Unix(enh.ts/1000, 0).UTC())}, - }) - } - - for _, el := range vals[0].(Vector) { - t := time.Unix(int64(el.V), 0).UTC() - enh.out = append(enh.out, Sample{ - Metric: enh.dropMetricName(el.Metric), - Point: Point{V: f(t)}, - }) - } - return enh.out -} - -// === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return dateWrapper(vals, enh, func(t time.Time) float64 { - return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) - }) -} - -// === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return dateWrapper(vals, enh, func(t time.Time) float64 { - return float64(t.Day()) - }) -} - -// === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return dateWrapper(vals, enh, func(t time.Time) float64 { - return float64(t.Weekday()) - }) -} - -// === hour(v Vector) Scalar === -func funcHour(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return dateWrapper(vals, enh, func(t time.Time) float64 { - return float64(t.Hour()) - }) -} - -// === minute(v Vector) Scalar === -func funcMinute(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return dateWrapper(vals, enh, func(t time.Time) float64 { - return float64(t.Minute()) - }) -} - -// === month(v Vector) Scalar === -func funcMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return dateWrapper(vals, enh, func(t time.Time) float64 { - return float64(t.Month()) - }) -} - -// === year(v Vector) Scalar === -func funcYear(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { - return dateWrapper(vals, enh, func(t time.Time) float64 { - return float64(t.Year()) - }) -} - -var functions = map[string]*Function{ - "abs": { - Name: "abs", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcAbs, - }, - "absent": { - Name: "absent", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcAbsent, - }, - "avg_over_time": { - Name: "avg_over_time", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcAvgOverTime, - }, - "ceil": { - Name: "ceil", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcCeil, - }, - "changes": { - Name: "changes", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcChanges, - }, - "clamp_max": { - Name: "clamp_max", - ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, - ReturnType: ValueTypeVector, - Call: funcClampMax, - }, - "clamp_min": { - Name: "clamp_min", - ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, - ReturnType: ValueTypeVector, - Call: funcClampMin, - }, - "count_over_time": { - Name: "count_over_time", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcCountOverTime, - }, - "days_in_month": { - Name: "days_in_month", - ArgTypes: []ValueType{ValueTypeVector}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcDaysInMonth, - }, - "day_of_month": { - Name: "day_of_month", - ArgTypes: []ValueType{ValueTypeVector}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcDayOfMonth, - }, - "day_of_week": { - Name: "day_of_week", - ArgTypes: []ValueType{ValueTypeVector}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcDayOfWeek, - }, - "delta": { - Name: "delta", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcDelta, - }, - "deriv": { - Name: "deriv", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcDeriv, - }, - "exp": { - Name: "exp", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcExp, - }, - "floor": { - Name: "floor", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcFloor, - }, - "histogram_quantile": { - Name: "histogram_quantile", - ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcHistogramQuantile, - }, - "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, - Call: funcHoltWinters, - }, - "hour": { - Name: "hour", - ArgTypes: []ValueType{ValueTypeVector}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcHour, - }, - "idelta": { - Name: "idelta", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcIdelta, - }, - "increase": { - Name: "increase", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcIncrease, - }, - "irate": { - Name: "irate", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcIrate, - }, - "label_replace": { - Name: "label_replace", - ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString, ValueTypeString}, - ReturnType: ValueTypeVector, - Call: funcLabelReplace, - }, - "label_join": { - Name: "label_join", - ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString}, - Variadic: -1, - ReturnType: ValueTypeVector, - Call: funcLabelJoin, - }, - "ln": { - Name: "ln", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcLn, - }, - "log10": { - Name: "log10", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcLog10, - }, - "log2": { - Name: "log2", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcLog2, - }, - "max_over_time": { - Name: "max_over_time", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcMaxOverTime, - }, - "min_over_time": { - Name: "min_over_time", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcMinOverTime, - }, - "minute": { - Name: "minute", - ArgTypes: []ValueType{ValueTypeVector}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcMinute, - }, - "month": { - Name: "month", - ArgTypes: []ValueType{ValueTypeVector}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcMonth, - }, - "predict_linear": { - Name: "predict_linear", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar}, - ReturnType: ValueTypeVector, - Call: funcPredictLinear, - }, - "quantile_over_time": { - Name: "quantile_over_time", - ArgTypes: []ValueType{ValueTypeScalar, ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcQuantileOverTime, - }, - "rate": { - Name: "rate", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcRate, - }, - "resets": { - Name: "resets", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcResets, - }, - "round": { - Name: "round", - ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcRound, - }, - "scalar": { - Name: "scalar", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeScalar, - Call: funcScalar, - }, - "sort": { - Name: "sort", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcSort, - }, - "sort_desc": { - Name: "sort_desc", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcSortDesc, - }, - "sqrt": { - Name: "sqrt", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcSqrt, - }, - "stddev_over_time": { - Name: "stddev_over_time", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcStddevOverTime, - }, - "stdvar_over_time": { - Name: "stdvar_over_time", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcStdvarOverTime, - }, - "sum_over_time": { - Name: "sum_over_time", - ArgTypes: []ValueType{ValueTypeMatrix}, - ReturnType: ValueTypeVector, - Call: funcSumOverTime, - }, - "time": { - Name: "time", - ArgTypes: []ValueType{}, - ReturnType: ValueTypeScalar, - Call: funcTime, - }, - "timestamp": { - Name: "timestamp", - ArgTypes: []ValueType{ValueTypeVector}, - ReturnType: ValueTypeVector, - Call: funcTimestamp, - }, - "vector": { - Name: "vector", - ArgTypes: []ValueType{ValueTypeScalar}, - ReturnType: ValueTypeVector, - Call: funcVector, - }, - "year": { - Name: "year", - ArgTypes: []ValueType{ValueTypeVector}, - Variadic: 1, - ReturnType: ValueTypeVector, - Call: funcYear, - }, -} - -// getFunction returns a predefined Function object for the given name. -func getFunction(name string) (*Function, bool) { - function, ok := functions[name] - return function, ok -} - -type vectorByValueHeap Vector - -func (s vectorByValueHeap) Len() int { - return len(s) -} - -func (s vectorByValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { - return true - } - return s[i].V < s[j].V -} - -func (s vectorByValueHeap) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s *vectorByValueHeap) Push(x interface{}) { - *s = append(*s, *(x.(*Sample))) -} - -func (s *vectorByValueHeap) Pop() interface{} { - old := *s - n := len(old) - el := old[n-1] - *s = old[0 : n-1] - return el -} - -type vectorByReverseValueHeap Vector - -func (s vectorByReverseValueHeap) Len() int { - return len(s) -} - -func (s vectorByReverseValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { - return true - } - return s[i].V > s[j].V -} - -func (s vectorByReverseValueHeap) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s *vectorByReverseValueHeap) Push(x interface{}) { - *s = append(*s, *(x.(*Sample))) -} - -func (s *vectorByReverseValueHeap) Pop() interface{} { - old := *s - n := len(old) - el := old[n-1] - *s = old[0 : n-1] - return el -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/fuzz.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/fuzz.go deleted file mode 100644 index ecddaf799..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/fuzz.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Only build when go-fuzz is in use -//go:build gofuzz -// +build gofuzz - -package promql - -import "github.com/prometheus/prometheus/model/textparse" - -// PromQL parser fuzzing instrumentation for use with -// https://github.com/dvyukov/go-fuzz. -// -// Fuzz each parser by building appropriately instrumented parser, ex. -// FuzzParseMetric and execute it with it's -// -// go-fuzz-build -func FuzzParseMetric -o FuzzParseMetric.zip github.com/prometheus/prometheus/promql -// -// And then run the tests with the appropriate inputs -// -// go-fuzz -bin FuzzParseMetric.zip -workdir fuzz-data/ParseMetric -// -// Further input samples should go in the folders fuzz-data/ParseMetric/corpus. -// -// Repeat for ParseMetricSeletion, ParseExpr and ParseStmt. - -// Tuning which value is returned from Fuzz*-functions has a strong influence -// on how quick the fuzzer converges on "interesting" cases. At least try -// switching between fuzzMeh (= included in corpus, but not a priority) and -// fuzzDiscard (=don't use this input for re-building later inputs) when -// experimenting. -const ( - fuzzInteresting = 1 - fuzzMeh = 0 - fuzzDiscard = -1 -) - -// Fuzz the metric parser. -// -// Note that his is not the parser for the text-based exposition-format; that -// lives in github.com/prometheus/client_golang/text. -func FuzzParseMetric(in []byte) int { - p := textparse.New(in) - for p.Next() { - } - - if p.Err() == nil { - return fuzzInteresting - } - - return fuzzMeh -} - -// Fuzz the metric selector parser. -func FuzzParseMetricSelector(in []byte) int { - _, err := ParseMetricSelector(string(in)) - if err == nil { - return fuzzInteresting - } - - return fuzzMeh -} - -// Fuzz the expression parser. -func FuzzParseExpr(in []byte) int { - _, err := ParseExpr(string(in)) - if err == nil { - return fuzzInteresting - } - - return fuzzMeh -} - -// Fuzz the parser. -func FuzzParseStmts(in []byte) int { - _, err := ParseStmts(string(in)) - if err == nil { - return fuzzInteresting - } - - return fuzzMeh -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/lex.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/lex.go deleted file mode 100644 index bb2476cf6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/lex.go +++ /dev/null @@ -1,906 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promql - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ ItemType // The type of this item. - pos Pos // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -// String returns a descriptive string for the item. -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case i.typ == itemIdentifier || i.typ == itemMetricIdentifier: - return fmt.Sprintf("%q", i.val) - case i.typ.isKeyword(): - return fmt.Sprintf("<%s>", i.val) - case i.typ.isOperator(): - return fmt.Sprintf("", i.val) - case i.typ.isAggregator(): - return fmt.Sprintf("", i.val) - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// isOperator returns true if the item corresponds to a arithmetic or set operator. -// Returns false otherwise. -func (i ItemType) isOperator() bool { return i > operatorsStart && i < operatorsEnd } - -// isAggregator returns true if the item belongs to the aggregator functions. -// Returns false otherwise -func (i ItemType) isAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd } - -// isAggregator returns true if the item is an aggregator that takes a parameter. -// Returns false otherwise -func (i ItemType) isAggregatorWithParam() bool { - return i == itemTopK || i == itemBottomK || i == itemCountValues || i == itemQuantile -} - -// isKeyword returns true if the item corresponds to a keyword. -// Returns false otherwise. -func (i ItemType) isKeyword() bool { return i > keywordsStart && i < keywordsEnd } - -// isCompairsonOperator returns true if the item corresponds to a comparison operator. -// Returns false otherwise. -func (i ItemType) isComparisonOperator() bool { - switch i { - case itemEQL, itemNEQ, itemLTE, itemLSS, itemGTE, itemGTR: - return true - default: - return false - } -} - -// isSetOperator returns whether the item corresponds to a set operator. -func (i ItemType) isSetOperator() bool { - switch i { - case itemLAND, itemLOR, itemLUnless: - return true - } - return false -} - -// LowestPrec is a constant for operator precedence in expressions. -const LowestPrec = 0 // Non-operators. - -// Precedence returns the operator precedence of the binary -// operator op. If op is not a binary operator, the result -// is LowestPrec. -func (i ItemType) precedence() int { - switch i { - case itemLOR: - return 1 - case itemLAND, itemLUnless: - return 2 - case itemEQL, itemNEQ, itemLTE, itemLSS, itemGTE, itemGTR: - return 3 - case itemADD, itemSUB: - return 4 - case itemMUL, itemDIV, itemMOD: - return 5 - case itemPOW: - return 6 - default: - return LowestPrec - } -} - -func (i ItemType) isRightAssociative() bool { - switch i { - case itemPOW: - return true - default: - return false - } - -} - -type ItemType int - -const ( - itemError ItemType = iota // Error occurred, value is error message - itemEOF - itemComment - itemIdentifier - itemMetricIdentifier - itemLeftParen - itemRightParen - itemLeftBrace - itemRightBrace - itemLeftBracket - itemRightBracket - itemComma - itemAssign - itemSemicolon - itemString - itemNumber - itemDuration - itemBlank - itemTimes - - operatorsStart - // Operators. - itemSUB - itemADD - itemMUL - itemMOD - itemDIV - itemLAND - itemLOR - itemLUnless - itemEQL - itemNEQ - itemLTE - itemLSS - itemGTE - itemGTR - itemEQLRegex - itemNEQRegex - itemPOW - operatorsEnd - - aggregatorsStart - // Aggregators. - itemAvg - itemCount - itemSum - itemMin - itemMax - itemStddev - itemStdvar - itemTopK - itemBottomK - itemCountValues - itemQuantile - aggregatorsEnd - - keywordsStart - // Keywords. - itemAlert - itemIf - itemFor - itemLabels - itemAnnotations - itemOffset - itemBy - itemWithout - itemOn - itemIgnoring - itemGroupLeft - itemGroupRight - itemBool - keywordsEnd -) - -var key = map[string]ItemType{ - // Operators. - "and": itemLAND, - "or": itemLOR, - "unless": itemLUnless, - - // Aggregators. - "sum": itemSum, - "avg": itemAvg, - "count": itemCount, - "min": itemMin, - "max": itemMax, - "stddev": itemStddev, - "stdvar": itemStdvar, - "topk": itemTopK, - "bottomk": itemBottomK, - "count_values": itemCountValues, - "quantile": itemQuantile, - - // Keywords. - "alert": itemAlert, - "if": itemIf, - "for": itemFor, - "labels": itemLabels, - "annotations": itemAnnotations, - "offset": itemOffset, - "by": itemBy, - "without": itemWithout, - "on": itemOn, - "ignoring": itemIgnoring, - "group_left": itemGroupLeft, - "group_right": itemGroupRight, - "bool": itemBool, -} - -// These are the default string representations for common items. It does not -// imply that those are the only character sequences that can be lexed to such an item. -var itemTypeStr = map[ItemType]string{ - itemLeftParen: "(", - itemRightParen: ")", - itemLeftBrace: "{", - itemRightBrace: "}", - itemLeftBracket: "[", - itemRightBracket: "]", - itemComma: ",", - itemAssign: "=", - itemSemicolon: ";", - itemBlank: "_", - itemTimes: "x", - - itemSUB: "-", - itemADD: "+", - itemMUL: "*", - itemMOD: "%", - itemDIV: "/", - itemEQL: "==", - itemNEQ: "!=", - itemLTE: "<=", - itemLSS: "<", - itemGTE: ">=", - itemGTR: ">", - itemEQLRegex: "=~", - itemNEQRegex: "!~", - itemPOW: "^", -} - -func init() { - // Add keywords to item type strings. - for s, ty := range key { - itemTypeStr[ty] = s - } - // Special numbers. - key["inf"] = itemNumber - key["nan"] = itemNumber -} - -func (i ItemType) String() string { - if s, ok := itemTypeStr[i]; ok { - return s - } - return fmt.Sprintf("", i) -} - -func (i item) desc() string { - if _, ok := itemTypeStr[i.typ]; ok { - return i.String() - } - if i.typ == itemEOF { - return i.typ.desc() - } - return fmt.Sprintf("%s %s", i.typ.desc(), i) -} - -func (i ItemType) desc() string { - switch i { - case itemError: - return "error" - case itemEOF: - return "end of input" - case itemComment: - return "comment" - case itemIdentifier: - return "identifier" - case itemMetricIdentifier: - return "metric identifier" - case itemString: - return "string" - case itemNumber: - return "number" - case itemDuration: - return "duration" - } - return fmt.Sprintf("%q", i) -} - -const eof = -1 - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// Pos is the position in a string. -type Pos int - -// lexer holds the state of the scanner. -type lexer struct { - input string // The string being scanned. - state stateFn // The next lexing function to enter. - pos Pos // Current position in the input. - start Pos // Start position of this item. - width Pos // Width of last rune read from input. - lastPos Pos // Position of most recent item returned by nextItem. - items chan item // Channel of scanned items. - - parenDepth int // Nesting depth of ( ) exprs. - braceOpen bool // Whether a { is opened. - bracketOpen bool // Whether a [ is opened. - stringOpen rune // Quote rune of the string currently being read. - - // seriesDesc is set when a series description for the testing - // language is lexed. - seriesDesc bool -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if int(l.pos) >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = Pos(w) - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t ItemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos]} - l.start = l.pos -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - // consume - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// linePosition reports at which character in the current line -// we are on. -func (l *lexer) linePosition() int { - lb := strings.LastIndex(l.input[:l.lastPos], "\n") - if lb == -1 { - return 1 + int(l.lastPos) - } - return 1 + int(l.lastPos) - lb -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - item := <-l.items - l.lastPos = item.pos - return item -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexStatements; l.state != nil; { - l.state = l.state(l) - } - close(l.items) -} - -// lineComment is the character that starts a line comment. -const lineComment = "#" - -// lexStatements is the top-level state for lexing. -func lexStatements(l *lexer) stateFn { - if l.braceOpen { - return lexInsideBraces - } - if strings.HasPrefix(l.input[l.pos:], lineComment) { - return lexLineComment - } - - switch r := l.next(); { - case r == eof: - if l.parenDepth != 0 { - return l.errorf("unclosed left parenthesis") - } else if l.bracketOpen { - return l.errorf("unclosed left bracket") - } - l.emit(itemEOF) - return nil - case r == ',': - l.emit(itemComma) - case isSpace(r): - return lexSpace - case r == '*': - l.emit(itemMUL) - case r == '/': - l.emit(itemDIV) - case r == '%': - l.emit(itemMOD) - case r == '+': - l.emit(itemADD) - case r == '-': - l.emit(itemSUB) - case r == '^': - l.emit(itemPOW) - case r == '=': - if t := l.peek(); t == '=' { - l.next() - l.emit(itemEQL) - } else if t == '~' { - return l.errorf("unexpected character after '=': %q", t) - } else { - l.emit(itemAssign) - } - case r == '!': - if t := l.next(); t == '=' { - l.emit(itemNEQ) - } else { - return l.errorf("unexpected character after '!': %q", t) - } - case r == '<': - if t := l.peek(); t == '=' { - l.next() - l.emit(itemLTE) - } else { - l.emit(itemLSS) - } - case r == '>': - if t := l.peek(); t == '=' { - l.next() - l.emit(itemGTE) - } else { - l.emit(itemGTR) - } - case isDigit(r) || (r == '.' && isDigit(l.peek())): - l.backup() - return lexNumberOrDuration - case r == '"' || r == '\'': - l.stringOpen = r - return lexString - case r == '`': - l.stringOpen = r - return lexRawString - case isAlpha(r) || r == ':': - l.backup() - return lexKeywordOrIdentifier - case r == '(': - l.emit(itemLeftParen) - l.parenDepth++ - return lexStatements - case r == ')': - l.emit(itemRightParen) - l.parenDepth-- - if l.parenDepth < 0 { - return l.errorf("unexpected right parenthesis %q", r) - } - return lexStatements - case r == '{': - l.emit(itemLeftBrace) - l.braceOpen = true - return lexInsideBraces(l) - case r == '[': - if l.bracketOpen { - return l.errorf("unexpected left bracket %q", r) - } - l.emit(itemLeftBracket) - l.bracketOpen = true - return lexDuration - case r == ']': - if !l.bracketOpen { - return l.errorf("unexpected right bracket %q", r) - } - l.emit(itemRightBracket) - l.bracketOpen = false - - default: - return l.errorf("unexpected character: %q", r) - } - return lexStatements -} - -// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and -// scanned as identifiers. -func lexInsideBraces(l *lexer) stateFn { - if strings.HasPrefix(l.input[l.pos:], lineComment) { - return lexLineComment - } - - switch r := l.next(); { - case r == eof: - return l.errorf("unexpected end of input inside braces") - case isSpace(r): - return lexSpace - case isAlpha(r): - l.backup() - return lexIdentifier - case r == ',': - l.emit(itemComma) - case r == '"' || r == '\'': - l.stringOpen = r - return lexString - case r == '`': - l.stringOpen = r - return lexRawString - case r == '=': - if l.next() == '~' { - l.emit(itemEQLRegex) - break - } - l.backup() - l.emit(itemEQL) - case r == '!': - switch nr := l.next(); { - case nr == '~': - l.emit(itemNEQRegex) - case nr == '=': - l.emit(itemNEQ) - default: - return l.errorf("unexpected character after '!' inside braces: %q", nr) - } - case r == '{': - return l.errorf("unexpected left brace %q", r) - case r == '}': - l.emit(itemRightBrace) - l.braceOpen = false - - if l.seriesDesc { - return lexValueSequence - } - return lexStatements - default: - return l.errorf("unexpected character inside braces: %q", r) - } - return lexInsideBraces -} - -// lexValueSequence scans a value sequence of a series description. -func lexValueSequence(l *lexer) stateFn { - switch r := l.next(); { - case r == eof: - return lexStatements - case isSpace(r): - lexSpace(l) - case r == '+': - l.emit(itemADD) - case r == '-': - l.emit(itemSUB) - case r == 'x': - l.emit(itemTimes) - case r == '_': - l.emit(itemBlank) - case isDigit(r) || (r == '.' && isDigit(l.peek())): - l.backup() - lexNumber(l) - case isAlpha(r): - l.backup() - // We might lex invalid items here but this will be caught by the parser. - return lexKeywordOrIdentifier - default: - return l.errorf("unexpected character in series sequence: %q", r) - } - return lexValueSequence -} - -// lexEscape scans a string escape sequence. The initial escaping character (\) -// has already been seen. -// -// NOTE: This function as well as the helper function digitVal() and associated -// tests have been adapted from the corresponding functions in the "go/scanner" -// package of the Go standard library to work for Prometheus-style strings. -// None of the actual escaping/quoting logic was changed in this function - it -// was only modified to integrate with our lexer. -func lexEscape(l *lexer) { - var n int - var base, max uint32 - - ch := l.next() - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen: - return - case '0', '1', '2', '3', '4', '5', '6', '7': - n, base, max = 3, 8, 255 - case 'x': - ch = l.next() - n, base, max = 2, 16, 255 - case 'u': - ch = l.next() - n, base, max = 4, 16, unicode.MaxRune - case 'U': - ch = l.next() - n, base, max = 8, 16, unicode.MaxRune - case eof: - l.errorf("escape sequence not terminated") - default: - l.errorf("unknown escape sequence %#U", ch) - } - - var x uint32 - for n > 0 { - d := uint32(digitVal(ch)) - if d >= base { - if ch == eof { - l.errorf("escape sequence not terminated") - } - l.errorf("illegal character %#U in escape sequence", ch) - } - x = x*base + d - ch = l.next() - n-- - } - - if x > max || 0xD800 <= x && x < 0xE000 { - l.errorf("escape sequence is an invalid Unicode code point") - } -} - -// digitVal returns the digit value of a rune or 16 in case the rune does not -// represent a valid digit. -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // Larger than any legal digit val. -} - -// lexString scans a quoted string. The initial quote has already been seen. -func lexString(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - lexEscape(l) - case utf8.RuneError: - return l.errorf("invalid UTF-8 rune") - case eof, '\n': - return l.errorf("unterminated quoted string") - case l.stringOpen: - break Loop - } - } - l.emit(itemString) - return lexStatements -} - -// lexRawString scans a raw quoted string. The initial quote has already been seen. -func lexRawString(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case utf8.RuneError: - return l.errorf("invalid UTF-8 rune") - case eof: - return l.errorf("unterminated raw string") - case l.stringOpen: - break Loop - } - } - l.emit(itemString) - return lexStatements -} - -// lexSpace scans a run of space characters. One space has already been seen. -func lexSpace(l *lexer) stateFn { - for isSpace(l.peek()) { - l.next() - } - l.ignore() - return lexStatements -} - -// lexLineComment scans a line comment. Left comment marker is known to be present. -func lexLineComment(l *lexer) stateFn { - l.pos += Pos(len(lineComment)) - for r := l.next(); !isEndOfLine(r) && r != eof; { - r = l.next() - } - l.backup() - l.emit(itemComment) - return lexStatements -} - -func lexDuration(l *lexer) stateFn { - if l.scanNumber() { - return l.errorf("missing unit character in duration") - } - // Next two chars must be a valid unit and a non-alphanumeric. - if l.accept("smhdwy") { - if isAlphaNumeric(l.next()) { - return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos]) - } - l.backup() - l.emit(itemDuration) - return lexStatements - } - return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos]) -} - -// lexNumber scans a number: decimal, hex, oct or float. -func lexNumber(l *lexer) stateFn { - if !l.scanNumber() { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - l.emit(itemNumber) - return lexStatements -} - -// lexNumberOrDuration scans a number or a duration item. -func lexNumberOrDuration(l *lexer) stateFn { - if l.scanNumber() { - l.emit(itemNumber) - return lexStatements - } - // Next two chars must be a valid unit and a non-alphanumeric. - if l.accept("smhdwy") { - if isAlphaNumeric(l.next()) { - return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos]) - } - l.backup() - l.emit(itemDuration) - return lexStatements - } - return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos]) -} - -// scanNumber scans numbers of different formats. The scanned item is -// not necessarily a valid number. This case is caught by the parser. -func (l *lexer) scanNumber() bool { - digits := "0123456789" - // Disallow hexadecimal in series descriptions as the syntax is ambiguous. - if !l.seriesDesc && l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" - } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") - } - // Next thing must not be alphanumeric unless it's the times token - // for series repetitions. - if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { - return true - } - return false -} - -// lexIdentifier scans an alphanumeric identifier. The next character -// is known to be a letter. -func lexIdentifier(l *lexer) stateFn { - for isAlphaNumeric(l.next()) { - // absorb - } - l.backup() - l.emit(itemIdentifier) - return lexStatements -} - -// lexKeywordOrIdentifier scans an alphanumeric identifier which may contain -// a colon rune. If the identifier is a keyword the respective keyword item -// is scanned. -func lexKeywordOrIdentifier(l *lexer) stateFn { -Loop: - for { - switch r := l.next(); { - case isAlphaNumeric(r) || r == ':': - // absorb. - default: - l.backup() - word := l.input[l.start:l.pos] - if kw, ok := key[strings.ToLower(word)]; ok { - l.emit(kw) - } else if !strings.Contains(word, ":") { - l.emit(itemIdentifier) - } else { - l.emit(itemMetricIdentifier) - } - break Loop - } - } - if l.seriesDesc && l.peek() != '{' { - return lexValueSequence - } - return lexStatements -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' || r == '\n' || r == '\r' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return isAlpha(r) || isDigit(r) -} - -// isDigit reports whether r is a digit. Note: we cannot use unicode.IsDigit() -// instead because that also classifies non-Latin digits as digits. See -// https://github.com/prometheus/prometheus/issues/939. -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -// isAlpha reports whether r is an alphabetic or underscore. -func isAlpha(r rune) bool { - return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') -} - -// isLabel reports whether the string can be used as label. -func isLabel(s string) bool { - if len(s) == 0 || !isAlpha(rune(s[0])) { - return false - } - for _, c := range s[1:] { - if !isAlphaNumeric(c) { - return false - } - } - return true -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/parse.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/parse.go deleted file mode 100644 index 86a01d41e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/parse.go +++ /dev/null @@ -1,1139 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//nolint //Since this was copied from Prometheus leave it as is -package promql - -import ( - "fmt" - "math" - "os" - "runtime" - "sort" - "strconv" - "strings" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/value" - - "github.com/prometheus/prometheus/util/strutil" -) - -type parser struct { - lex *lexer - token [3]item - peekCount int -} - -// ParseErr wraps a parsing error with line and position context. -// If the parsing input was a single line, line will be 0 and omitted -// from the error string. -type ParseErr struct { - Line, Pos int - Err error -} - -func (e *ParseErr) Error() string { - if e.Line == 0 { - return fmt.Sprintf("parse error at char %d: %s", e.Pos, e.Err) - } - return fmt.Sprintf("parse error at line %d, char %d: %s", e.Line, e.Pos, e.Err) -} - -// ParseStmts parses the input and returns the resulting statements or any occurring error. -func ParseStmts(input string) (Statements, error) { - p := newParser(input) - - stmts, err := p.parseStmts() - if err != nil { - return nil, err - } - err = p.typecheck(stmts) - return stmts, err -} - -// ParseExpr returns the expression parsed from the input. -func ParseExpr(input string) (Expr, error) { - p := newParser(input) - - expr, err := p.parseExpr() - if err != nil { - return nil, err - } - err = p.typecheck(expr) - return expr, err -} - -// ParseMetric parses the input into a metric -func ParseMetric(input string) (m labels.Labels, err error) { - p := newParser(input) - defer p.recover(&err) - - m = p.metric() - if p.peek().typ != itemEOF { - p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:]) - } - return m, nil -} - -// ParseMetricSelector parses the provided textual metric selector into a list of -// label matchers. -func ParseMetricSelector(input string) (m []*labels.Matcher, err error) { - p := newParser(input) - defer p.recover(&err) - - name := "" - if t := p.peek().typ; t == itemMetricIdentifier || t == itemIdentifier { - name = p.next().val - } - vs := p.VectorSelector(name) - if p.peek().typ != itemEOF { - p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:]) - } - return vs.LabelMatchers, nil -} - -// newParser returns a new parser. -func newParser(input string) *parser { - p := &parser{ - lex: lex(input), - } - return p -} - -// parseStmts parses a sequence of statements from the input. -func (p *parser) parseStmts() (stmts Statements, err error) { - defer p.recover(&err) - stmts = Statements{} - - for p.peek().typ != itemEOF { - if p.peek().typ == itemComment { - continue - } - stmts = append(stmts, p.stmt()) - } - return -} - -// parseExpr parses a single expression from the input. -func (p *parser) parseExpr() (expr Expr, err error) { - defer p.recover(&err) - - for p.peek().typ != itemEOF { - if p.peek().typ == itemComment { - continue - } - if expr != nil { - p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:]) - } - expr = p.expr() - } - - if expr == nil { - p.errorf("no expression found in input") - } - return -} - -// sequenceValue is an omittable value in a sequence of time series values. -type sequenceValue struct { - value float64 - omitted bool -} - -func (v sequenceValue) String() string { - if v.omitted { - return "_" - } - return fmt.Sprintf("%f", v.value) -} - -// parseSeriesDesc parses the description of a time series. -func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) { - p := newParser(input) - p.lex.seriesDesc = true - - return p.parseSeriesDesc() -} - -// parseSeriesDesc parses a description of a time series into its metric and value sequence. -func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) { - defer p.recover(&err) - - m = p.metric() - - const ctx = "series values" - for { - if p.peek().typ == itemEOF { - break - } - - // Extract blanks. - if p.peek().typ == itemBlank { - p.next() - times := uint64(1) - if p.peek().typ == itemTimes { - p.next() - times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64) - if err != nil { - p.errorf("invalid repetition in %s: %s", ctx, err) - } - } - for i := uint64(0); i < times; i++ { - vals = append(vals, sequenceValue{omitted: true}) - } - continue - } - - // Extract values. - sign := 1.0 - if t := p.peek().typ; t == itemSUB || t == itemADD { - if p.next().typ == itemSUB { - sign = -1 - } - } - var k float64 - if t := p.peek().typ; t == itemNumber { - k = sign * p.number(p.expect(itemNumber, ctx).val) - } else if t == itemIdentifier && p.peek().val == "stale" { - p.next() - k = math.Float64frombits(value.StaleNaN) - } else { - p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek()) - } - vals = append(vals, sequenceValue{ - value: k, - }) - - // If there are no offset repetitions specified, proceed with the next value. - if t := p.peek(); t.typ == itemNumber || t.typ == itemBlank || t.typ == itemIdentifier && t.val == "stale" { - continue - } else if t.typ == itemEOF { - break - } else if t.typ != itemADD && t.typ != itemSUB { - p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek()) - } - - // Expand the repeated offsets into values. - sign = 1.0 - if p.next().typ == itemSUB { - sign = -1.0 - } - offset := sign * p.number(p.expect(itemNumber, ctx).val) - p.expect(itemTimes, ctx) - - times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64) - if err != nil { - p.errorf("invalid repetition in %s: %s", ctx, err) - } - - for i := uint64(0); i < times; i++ { - k += offset - vals = append(vals, sequenceValue{ - value: k, - }) - } - } - return m, vals, nil -} - -// typecheck checks correct typing of the parsed statements or expression. -func (p *parser) typecheck(node Node) (err error) { - defer p.recover(&err) - - p.checkType(node) - return nil -} - -// next returns the next token. -func (p *parser) next() item { - if p.peekCount > 0 { - p.peekCount-- - } else { - t := p.lex.nextItem() - // Skip comments. - for t.typ == itemComment { - t = p.lex.nextItem() - } - p.token[0] = t - } - if p.token[p.peekCount].typ == itemError { - p.errorf("%s", p.token[p.peekCount].val) - } - return p.token[p.peekCount] -} - -// peek returns but does not consume the next token. -func (p *parser) peek() item { - if p.peekCount > 0 { - return p.token[p.peekCount-1] - } - p.peekCount = 1 - - t := p.lex.nextItem() - // Skip comments. - for t.typ == itemComment { - t = p.lex.nextItem() - } - p.token[0] = t - return p.token[0] -} - -// backup backs the input stream up one token. -func (p *parser) backup() { - p.peekCount++ -} - -// errorf formats the error and terminates processing. -func (p *parser) errorf(format string, args ...interface{}) { - p.error(fmt.Errorf(format, args...)) -} - -// error terminates processing. -func (p *parser) error(err error) { - perr := &ParseErr{ - Line: p.lex.lineNumber(), - Pos: p.lex.linePosition(), - Err: err, - } - if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 { - perr.Line = 0 - } - panic(perr) -} - -// expect consumes the next token and guarantees it has the required type. -func (p *parser) expect(exp ItemType, context string) item { - token := p.next() - if token.typ != exp { - p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc()) - } - return token -} - -// expectOneOf consumes the next token and guarantees it has one of the required types. -func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item { - token := p.next() - if token.typ != exp1 && token.typ != exp2 { - p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc()) - } - return token -} - -var errUnexpected = fmt.Errorf("unexpected error") - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - // Print the stack trace but do not inhibit the running application. - buf := make([]byte, 64<<10) - buf = buf[:runtime.Stack(buf, false)] - - fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf) - *errp = errUnexpected - } else { - *errp = e.(error) - } - } -} - -// stmt parses any statement. -// -// alertStatement | recordStatement -// -func (p *parser) stmt() Statement { - switch tok := p.peek(); tok.typ { - case itemAlert: - return p.alertStmt() - case itemIdentifier, itemMetricIdentifier: - return p.recordStmt() - } - p.errorf("no valid statement detected") - return nil -} - -// alertStmt parses an alert rule. -// -// ALERT name IF expr [FOR duration] -// [LABELS label_set] -// [ANNOTATIONS label_set] -// -func (p *parser) alertStmt() *AlertStmt { - const ctx = "alert statement" - - p.expect(itemAlert, ctx) - name := p.expect(itemIdentifier, ctx) - // Alerts require a Vector typed expression. - p.expect(itemIf, ctx) - expr := p.expr() - - // Optional for clause. - var ( - duration time.Duration - err error - ) - if p.peek().typ == itemFor { - p.next() - dur := p.expect(itemDuration, ctx) - duration, err = parseDuration(dur.val) - if err != nil { - p.error(err) - } - } - - var ( - lset labels.Labels - annotations labels.Labels - ) - if p.peek().typ == itemLabels { - p.expect(itemLabels, ctx) - lset = p.labelSet() - } - if p.peek().typ == itemAnnotations { - p.expect(itemAnnotations, ctx) - annotations = p.labelSet() - } - - return &AlertStmt{ - Name: name.val, - Expr: expr, - Duration: duration, - Labels: lset, - Annotations: annotations, - } -} - -// recordStmt parses a recording rule. -func (p *parser) recordStmt() *RecordStmt { - const ctx = "record statement" - - name := p.expectOneOf(itemIdentifier, itemMetricIdentifier, ctx).val - - var lset labels.Labels - if p.peek().typ == itemLeftBrace { - lset = p.labelSet() - } - - p.expect(itemAssign, ctx) - expr := p.expr() - - return &RecordStmt{ - Name: name, - Labels: lset, - Expr: expr, - } -} - -// expr parses any expression. -func (p *parser) expr() Expr { - // Parse the starting expression. - expr := p.unaryExpr() - - // Loop through the operations and construct a binary operation tree based - // on the operators' precedence. - for { - // If the next token is not an operator the expression is done. - op := p.peek().typ - if !op.isOperator() { - return expr - } - p.next() // Consume operator. - - // Parse optional operator matching options. Its validity - // is checked in the type-checking stage. - vecMatching := &VectorMatching{ - Card: CardOneToOne, - } - if op.isSetOperator() { - vecMatching.Card = CardManyToMany - } - - returnBool := false - // Parse bool modifier. - if p.peek().typ == itemBool { - if !op.isComparisonOperator() { - p.errorf("bool modifier can only be used on comparison operators") - } - p.next() - returnBool = true - } - - // Parse ON/IGNORING clause. - if p.peek().typ == itemOn || p.peek().typ == itemIgnoring { - if p.peek().typ == itemOn { - vecMatching.On = true - } - p.next() - vecMatching.MatchingLabels = p.labels() - - // Parse grouping. - if t := p.peek().typ; t == itemGroupLeft || t == itemGroupRight { - p.next() - if t == itemGroupLeft { - vecMatching.Card = CardManyToOne - } else { - vecMatching.Card = CardOneToMany - } - if p.peek().typ == itemLeftParen { - vecMatching.Include = p.labels() - } - } - } - - for _, ln := range vecMatching.MatchingLabels { - for _, ln2 := range vecMatching.Include { - if ln == ln2 && vecMatching.On { - p.errorf("label %q must not occur in ON and GROUP clause at once", ln) - } - } - } - - // Parse the next operand. - rhs := p.unaryExpr() - - // Assign the new root based on the precedence of the LHS and RHS operators. - expr = p.balance(expr, op, rhs, vecMatching, returnBool) - } -} - -func (p *parser) balance(lhs Expr, op ItemType, rhs Expr, vecMatching *VectorMatching, returnBool bool) *BinaryExpr { - if lhsBE, ok := lhs.(*BinaryExpr); ok { - precd := lhsBE.Op.precedence() - op.precedence() - if (precd < 0) || (precd == 0 && op.isRightAssociative()) { - balanced := p.balance(lhsBE.RHS, op, rhs, vecMatching, returnBool) - if lhsBE.Op.isComparisonOperator() && !lhsBE.ReturnBool && balanced.Type() == ValueTypeScalar && lhsBE.LHS.Type() == ValueTypeScalar { - p.errorf("comparisons between scalars must use BOOL modifier") - } - return &BinaryExpr{ - Op: lhsBE.Op, - LHS: lhsBE.LHS, - RHS: balanced, - VectorMatching: lhsBE.VectorMatching, - ReturnBool: lhsBE.ReturnBool, - } - } - } - if op.isComparisonOperator() && !returnBool && rhs.Type() == ValueTypeScalar && lhs.Type() == ValueTypeScalar { - p.errorf("comparisons between scalars must use BOOL modifier") - } - return &BinaryExpr{ - Op: op, - LHS: lhs, - RHS: rhs, - VectorMatching: vecMatching, - ReturnBool: returnBool, - } -} - -// unaryExpr parses a unary expression. -// -// | | (+|-) | '(' ')' -// -func (p *parser) unaryExpr() Expr { - switch t := p.peek(); t.typ { - case itemADD, itemSUB: - p.next() - e := p.unaryExpr() - - // Simplify unary expressions for number literals. - if nl, ok := e.(*NumberLiteral); ok { - if t.typ == itemSUB { - nl.Val *= -1 - } - return nl - } - return &UnaryExpr{Op: t.typ, Expr: e} - - case itemLeftParen: - p.next() - e := p.expr() - p.expect(itemRightParen, "paren expression") - - return &ParenExpr{Expr: e} - } - e := p.primaryExpr() - - // Expression might be followed by a range selector. - if p.peek().typ == itemLeftBracket { - vs, ok := e.(*VectorSelector) - if !ok { - p.errorf("range specification must be preceded by a metric selector, but follows a %T instead", e) - } - e = p.rangeSelector(vs) - } - - // Parse optional offset. - if p.peek().typ == itemOffset { - offset := p.offset() - - switch s := e.(type) { - case *VectorSelector: - s.Offset = offset - case *MatrixSelector: - s.Offset = offset - default: - p.errorf("offset modifier must be preceded by an instant or range selector, but follows a %T instead", e) - } - } - - return e -} - -// rangeSelector parses a Matrix (a.k.a. range) selector based on a given -// Vector selector. -// -// '[' ']' -// -func (p *parser) rangeSelector(vs *VectorSelector) *MatrixSelector { - const ctx = "range selector" - p.next() - - var erange time.Duration - var err error - - erangeStr := p.expect(itemDuration, ctx).val - erange, err = parseDuration(erangeStr) - if err != nil { - p.error(err) - } - - p.expect(itemRightBracket, ctx) - - e := &MatrixSelector{ - Name: vs.Name, - LabelMatchers: vs.LabelMatchers, - Range: erange, - } - return e -} - -// number parses a number. -func (p *parser) number(val string) float64 { - n, err := strconv.ParseInt(val, 0, 64) - f := float64(n) - if err != nil { - f, err = strconv.ParseFloat(val, 64) - } - if err != nil { - p.errorf("error parsing number: %s", err) - } - return f -} - -// primaryExpr parses a primary expression. -// -// | | | -// -func (p *parser) primaryExpr() Expr { - switch t := p.next(); { - case t.typ == itemNumber: - f := p.number(t.val) - return &NumberLiteral{f} - - case t.typ == itemString: - return &StringLiteral{p.unquoteString(t.val)} - - case t.typ == itemLeftBrace: - // Metric selector without metric name. - p.backup() - return p.VectorSelector("") - - case t.typ == itemIdentifier: - // Check for function call. - if p.peek().typ == itemLeftParen { - return p.call(t.val) - } - fallthrough // Else metric selector. - - case t.typ == itemMetricIdentifier: - return p.VectorSelector(t.val) - - case t.typ.isAggregator(): - p.backup() - return p.aggrExpr() - - default: - p.errorf("no valid expression found") - } - return nil -} - -// labels parses a list of labelnames. -// -// '(' , ... ')' -// -func (p *parser) labels() []string { - const ctx = "grouping opts" - - p.expect(itemLeftParen, ctx) - - labels := []string{} - if p.peek().typ != itemRightParen { - for { - id := p.next() - if !isLabel(id.val) { - p.errorf("unexpected %s in %s, expected label", id.desc(), ctx) - } - labels = append(labels, id.val) - - if p.peek().typ != itemComma { - break - } - p.next() - } - } - p.expect(itemRightParen, ctx) - - return labels -} - -// aggrExpr parses an aggregation expression. -// -// () [by|without ] -// [by|without ] () -// -func (p *parser) aggrExpr() *AggregateExpr { - const ctx = "aggregation" - - agop := p.next() - if !agop.typ.isAggregator() { - p.errorf("expected aggregation operator but got %s", agop) - } - var grouping []string - var without bool - - modifiersFirst := false - - if t := p.peek().typ; t == itemBy || t == itemWithout { - if t == itemWithout { - without = true - } - p.next() - grouping = p.labels() - modifiersFirst = true - } - - p.expect(itemLeftParen, ctx) - var param Expr - if agop.typ.isAggregatorWithParam() { - param = p.expr() - p.expect(itemComma, ctx) - } - e := p.expr() - p.expect(itemRightParen, ctx) - - if !modifiersFirst { - if t := p.peek().typ; t == itemBy || t == itemWithout { - if len(grouping) > 0 { - p.errorf("aggregation must only contain one grouping clause") - } - if t == itemWithout { - without = true - } - p.next() - grouping = p.labels() - } - } - - return &AggregateExpr{ - Op: agop.typ, - Expr: e, - Param: param, - Grouping: grouping, - Without: without, - } -} - -// call parses a function call. -// -// '(' [ , ...] ')' -// -func (p *parser) call(name string) *Call { - const ctx = "function call" - - fn, exist := getFunction(name) - if !exist { - p.errorf("unknown function with name %q", name) - } - - p.expect(itemLeftParen, ctx) - // Might be call without args. - if p.peek().typ == itemRightParen { - p.next() // Consume. - return &Call{fn, nil} - } - - var args []Expr - for { - e := p.expr() - args = append(args, e) - - // Terminate if no more arguments. - if p.peek().typ != itemComma { - break - } - p.next() - } - - // Call must be closed. - p.expect(itemRightParen, ctx) - - return &Call{Func: fn, Args: args} -} - -// labelSet parses a set of label matchers -// -// '{' [ '=' , ... ] '}' -// -func (p *parser) labelSet() labels.Labels { - set := []labels.Label{} - for _, lm := range p.labelMatchers(itemEQL) { - set = append(set, labels.Label{Name: lm.Name, Value: lm.Value}) - } - return labels.New(set...) -} - -// labelMatchers parses a set of label matchers. -// -// '{' [ , ... ] '}' -// -func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher { - const ctx = "label matching" - - matchers := []*labels.Matcher{} - - p.expect(itemLeftBrace, ctx) - - // Check if no matchers are provided. - if p.peek().typ == itemRightBrace { - p.next() - return matchers - } - - for { - label := p.expect(itemIdentifier, ctx) - - op := p.next().typ - if !op.isOperator() { - p.errorf("expected label matching operator but got %s", op) - } - var validOp = false - for _, allowedOp := range operators { - if op == allowedOp { - validOp = true - } - } - if !validOp { - p.errorf("operator must be one of %q, is %q", operators, op) - } - - val := p.unquoteString(p.expect(itemString, ctx).val) - - // Map the item to the respective match type. - var matchType labels.MatchType - switch op { - case itemEQL: - matchType = labels.MatchEqual - case itemNEQ: - matchType = labels.MatchNotEqual - case itemEQLRegex: - matchType = labels.MatchRegexp - case itemNEQRegex: - matchType = labels.MatchNotRegexp - default: - p.errorf("item %q is not a metric match type", op) - } - - m, err := labels.NewMatcher(matchType, label.val, val) - if err != nil { - p.error(err) - } - - matchers = append(matchers, m) - - if p.peek().typ == itemIdentifier { - p.errorf("missing comma before next identifier %q", p.peek().val) - } - - // Terminate list if last matcher. - if p.peek().typ != itemComma { - break - } - p.next() - - // Allow comma after each item in a multi-line listing. - if p.peek().typ == itemRightBrace { - break - } - } - - p.expect(itemRightBrace, ctx) - - return matchers -} - -// metric parses a metric. -// -// -// [] -// -func (p *parser) metric() labels.Labels { - name := "" - var m labels.Labels - - t := p.peek().typ - if t == itemIdentifier || t == itemMetricIdentifier { - name = p.next().val - t = p.peek().typ - } - if t != itemLeftBrace && name == "" { - p.errorf("missing metric name or metric selector") - } - if t == itemLeftBrace { - m = p.labelSet() - } - if name != "" { - m = append(m, labels.Label{Name: labels.MetricName, Value: name}) - sort.Sort(m) - } - return m -} - -// offset parses an offset modifier. -// -// offset -// -func (p *parser) offset() time.Duration { - const ctx = "offset" - - p.next() - offi := p.expect(itemDuration, ctx) - - offset, err := parseDuration(offi.val) - if err != nil { - p.error(err) - } - - return offset -} - -// VectorSelector parses a new (instant) vector selector. -// -// [] -// [] -// -func (p *parser) VectorSelector(name string) *VectorSelector { - var matchers []*labels.Matcher - // Parse label matching if any. - if t := p.peek(); t.typ == itemLeftBrace { - matchers = p.labelMatchers(itemEQL, itemNEQ, itemEQLRegex, itemNEQRegex) - } - // Metric name must not be set in the label matchers and before at the same time. - if name != "" { - for _, m := range matchers { - if m.Name == labels.MetricName { - p.errorf("metric name must not be set twice: %q or %q", name, m.Value) - } - } - // Set name label matching. - m, err := labels.NewMatcher(labels.MatchEqual, labels.MetricName, name) - if err != nil { - panic(err) // Must not happen with metric.Equal. - } - matchers = append(matchers, m) - } - - if len(matchers) == 0 { - p.errorf("vector selector must contain label matchers or metric name") - } - // A Vector selector must contain at least one non-empty matcher to prevent - // implicit selection of all metrics (e.g. by a typo). - notEmpty := false - for _, lm := range matchers { - if !lm.Matches("") { - notEmpty = true - break - } - } - if !notEmpty { - p.errorf("vector selector must contain at least one non-empty matcher") - } - - return &VectorSelector{ - Name: name, - LabelMatchers: matchers, - } -} - -// expectType checks the type of the node and raises an error if it -// is not of the expected type. -func (p *parser) expectType(node Node, want ValueType, context string) { - t := p.checkType(node) - if t != want { - p.errorf("expected type %s in %s, got %s", documentedType(want), context, documentedType(t)) - } -} - -// check the types of the children of each node and raise an error -// if they do not form a valid node. -// -// Some of these checks are redundant as the the parsing stage does not allow -// them, but the costs are small and might reveal errors when making changes. -func (p *parser) checkType(node Node) (typ ValueType) { - // For expressions the type is determined by their Type function. - // Statements and lists do not have a type but are not invalid either. - switch n := node.(type) { - case Statements, Expressions, Statement: - typ = ValueTypeNone - case Expr: - typ = n.Type() - default: - p.errorf("unknown node type: %T", node) - } - - // Recursively check correct typing for child nodes and raise - // errors in case of bad typing. - switch n := node.(type) { - case Statements: - for _, s := range n { - p.expectType(s, ValueTypeNone, "statement list") - } - case *AlertStmt: - p.expectType(n.Expr, ValueTypeVector, "alert statement") - - case *EvalStmt: - ty := p.checkType(n.Expr) - if ty == ValueTypeNone { - p.errorf("evaluation statement must have a valid expression type but got %s", documentedType(ty)) - } - - case *RecordStmt: - ty := p.checkType(n.Expr) - if ty != ValueTypeVector && ty != ValueTypeScalar { - p.errorf("record statement must have a valid expression of type instant vector or scalar but got %s", documentedType(ty)) - } - - case Expressions: - for _, e := range n { - ty := p.checkType(e) - if ty == ValueTypeNone { - p.errorf("expression must have a valid expression type but got %s", documentedType(ty)) - } - } - case *AggregateExpr: - if !n.Op.isAggregator() { - p.errorf("aggregation operator expected in aggregation expression but got %q", n.Op) - } - p.expectType(n.Expr, ValueTypeVector, "aggregation expression") - if n.Op == itemTopK || n.Op == itemBottomK || n.Op == itemQuantile { - p.expectType(n.Param, ValueTypeScalar, "aggregation parameter") - } - if n.Op == itemCountValues { - p.expectType(n.Param, ValueTypeString, "aggregation parameter") - } - - case *BinaryExpr: - lt := p.checkType(n.LHS) - rt := p.checkType(n.RHS) - - if !n.Op.isOperator() { - p.errorf("binary expression does not support operator %q", n.Op) - } - if (lt != ValueTypeScalar && lt != ValueTypeVector) || (rt != ValueTypeScalar && rt != ValueTypeVector) { - p.errorf("binary expression must contain only scalar and instant vector types") - } - - if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil { - if len(n.VectorMatching.MatchingLabels) > 0 { - p.errorf("vector matching only allowed between instant vectors") - } - n.VectorMatching = nil - } else { - // Both operands are Vectors. - if n.Op.isSetOperator() { - if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { - p.errorf("no grouping allowed for %q operation", n.Op) - } - if n.VectorMatching.Card != CardManyToMany { - p.errorf("set operations must always be many-to-many") - } - } - } - - if (lt == ValueTypeScalar || rt == ValueTypeScalar) && n.Op.isSetOperator() { - p.errorf("set operator %q not allowed in binary scalar expression", n.Op) - } - - case *Call: - nargs := len(n.Func.ArgTypes) - if n.Func.Variadic == 0 { - if nargs != len(n.Args) { - p.errorf("expected %d argument(s) in call to %q, got %d", nargs, n.Func.Name, len(n.Args)) - } - } else { - na := nargs - 1 - if na > len(n.Args) { - p.errorf("expected at least %d argument(s) in call to %q, got %d", na, n.Func.Name, len(n.Args)) - } else if nargsmax := na + n.Func.Variadic; n.Func.Variadic > 0 && nargsmax < len(n.Args) { - p.errorf("expected at most %d argument(s) in call to %q, got %d", nargsmax, n.Func.Name, len(n.Args)) - } - } - - for i, arg := range n.Args { - if i >= len(n.Func.ArgTypes) { - i = len(n.Func.ArgTypes) - 1 - } - p.expectType(arg, n.Func.ArgTypes[i], fmt.Sprintf("call to function %q", n.Func.Name)) - } - - case *ParenExpr: - p.checkType(n.Expr) - - case *UnaryExpr: - if n.Op != itemADD && n.Op != itemSUB { - p.errorf("only + and - operators allowed for unary expressions") - } - if t := p.checkType(n.Expr); t != ValueTypeScalar && t != ValueTypeVector { - p.errorf("unary expression only allowed on expressions of type scalar or instant vector, got %q", documentedType(t)) - } - - case *NumberLiteral, *MatrixSelector, *StringLiteral, *VectorSelector: - // Nothing to do for terminals. - - default: - p.errorf("unknown node type: %T", node) - } - return -} - -func (p *parser) unquoteString(s string) string { - unquoted, err := strutil.Unquote(s) - if err != nil { - p.errorf("error unquoting string %q: %s", s, err) - } - return unquoted -} - -func parseDuration(ds string) (time.Duration, error) { - dur, err := model.ParseDuration(ds) - if err != nil { - return 0, err - } - if dur == 0 { - return 0, fmt.Errorf("duration must be greater than 0") - } - return time.Duration(dur), nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/printer.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/printer.go deleted file mode 100644 index 417f3458f..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/printer.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promql - -import ( - "fmt" - "sort" - "strings" - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" -) - -// Tree returns a string of the tree structure of the given node. -func Tree(node Node) string { - return tree(node, "") -} - -func tree(node Node, level string) string { - if node == nil { - return fmt.Sprintf("%s |---- %T\n", level, node) - } - typs := strings.Split(fmt.Sprintf("%T", node), ".")[1] - - var t string - // Only print the number of statements for readability. - if stmts, ok := node.(Statements); ok { - t = fmt.Sprintf("%s |---- %s :: %d\n", level, typs, len(stmts)) - } else { - t = fmt.Sprintf("%s |---- %s :: %s\n", level, typs, node) - } - - level += " · · ·" - - switch n := node.(type) { - case Statements: - for _, s := range n { - t += tree(s, level) - } - case *AlertStmt: - t += tree(n.Expr, level) - - case *EvalStmt: - t += tree(n.Expr, level) - - case *RecordStmt: - t += tree(n.Expr, level) - - case Expressions: - for _, e := range n { - t += tree(e, level) - } - case *AggregateExpr: - t += tree(n.Expr, level) - - case *BinaryExpr: - t += tree(n.LHS, level) - t += tree(n.RHS, level) - - case *Call: - t += tree(n.Args, level) - - case *ParenExpr: - t += tree(n.Expr, level) - - case *UnaryExpr: - t += tree(n.Expr, level) - - case *MatrixSelector, *NumberLiteral, *StringLiteral, *VectorSelector: - // nothing to do - - default: - panic("promql.Tree: not all node types covered") - } - return t -} - -func (stmts Statements) String() (s string) { - if len(stmts) == 0 { - return "" - } - for _, stmt := range stmts { - s += stmt.String() - s += "\n\n" - } - return s[:len(s)-2] -} - -func (node *AlertStmt) String() string { - s := fmt.Sprintf("ALERT %s", node.Name) - s += fmt.Sprintf("\n\tIF %s", node.Expr) - if node.Duration > 0 { - s += fmt.Sprintf("\n\tFOR %s", model.Duration(node.Duration)) - } - if len(node.Labels) > 0 { - s += fmt.Sprintf("\n\tLABELS %s", node.Labels) - } - if len(node.Annotations) > 0 { - s += fmt.Sprintf("\n\tANNOTATIONS %s", node.Annotations) - } - return s -} - -func (node *EvalStmt) String() string { - return "EVAL " + node.Expr.String() -} - -func (node *RecordStmt) String() string { - s := fmt.Sprintf("%s%s = %s", node.Name, node.Labels, node.Expr) - return s -} - -func (es Expressions) String() (s string) { - if len(es) == 0 { - return "" - } - for _, e := range es { - s += e.String() - s += ", " - } - return s[:len(s)-2] -} - -func (node *AggregateExpr) String() string { - aggrString := node.Op.String() - - if node.Without { - aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", ")) - } else { - if len(node.Grouping) > 0 { - aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", ")) - } - } - - aggrString += "(" - if node.Op.isAggregatorWithParam() { - aggrString += fmt.Sprintf("%s, ", node.Param) - } - aggrString += fmt.Sprintf("%s)", node.Expr) - - return aggrString -} - -func (node *BinaryExpr) String() string { - returnBool := "" - if node.ReturnBool { - returnBool = " bool" - } - - matching := "" - vm := node.VectorMatching - if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) { - if vm.On { - matching = fmt.Sprintf(" on(%s)", strings.Join(vm.MatchingLabels, ", ")) - } else { - matching = fmt.Sprintf(" ignoring(%s)", strings.Join(vm.MatchingLabels, ", ")) - } - if vm.Card == CardManyToOne || vm.Card == CardOneToMany { - matching += " group_" - if vm.Card == CardManyToOne { - matching += "left" - } else { - matching += "right" - } - matching += fmt.Sprintf("(%s)", strings.Join(vm.Include, ", ")) - } - } - return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS) -} - -func (node *Call) String() string { - return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args) -} - -func (node *MatrixSelector) String() string { - vecSelector := &VectorSelector{ - Name: node.Name, - LabelMatchers: node.LabelMatchers, - } - offset := "" - if node.Offset != time.Duration(0) { - offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset)) - } - return fmt.Sprintf("%s[%s]%s", vecSelector.String(), model.Duration(node.Range), offset) -} - -func (node *NumberLiteral) String() string { - return fmt.Sprint(node.Val) -} - -func (node *ParenExpr) String() string { - return fmt.Sprintf("(%s)", node.Expr) -} - -func (node *StringLiteral) String() string { - return fmt.Sprintf("%q", node.Val) -} - -func (node *UnaryExpr) String() string { - return fmt.Sprintf("%s%s", node.Op, node.Expr) -} - -func (node *VectorSelector) String() string { - labelStrings := make([]string, 0, len(node.LabelMatchers)-1) - for _, matcher := range node.LabelMatchers { - // Only include the __name__ label if its no equality matching. - if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual { - continue - } - labelStrings = append(labelStrings, matcher.String()) - } - offset := "" - if node.Offset != time.Duration(0) { - offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset)) - } - - if len(labelStrings) == 0 { - return fmt.Sprintf("%s%s", node.Name, offset) - } - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}%s", node.Name, strings.Join(labelStrings, ","), offset) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/quantile.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/quantile.go deleted file mode 100644 index 420afd3eb..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/quantile.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promql - -import ( - "math" - "sort" - - "github.com/prometheus/prometheus/model/labels" -) - -// Helpers to calculate quantiles. - -// excludedLabels are the labels to exclude from signature calculation for -// quantiles. -var excludedLabels = []string{ - labels.MetricName, - labels.BucketLabel, -} - -type bucket struct { - upperBound float64 - count float64 -} - -// buckets implements sort.Interface. -type buckets []bucket - -func (b buckets) Len() int { return len(b) } -func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound } - -type metricWithBuckets struct { - metric labels.Labels - buckets buckets -} - -// bucketQuantile calculates the quantile 'q' based on the given buckets. The -// buckets will be sorted by upperBound by this function (i.e. no sorting -// needed before calling this function). The quantile value is interpolated -// assuming a linear distribution within a bucket. However, if the quantile -// falls into the highest bucket, the upper bound of the 2nd highest bucket is -// returned. A natural lower bound of 0 is assumed if the upper bound of the -// lowest bucket is greater 0. In that case, interpolation in the lowest bucket -// happens linearly between 0 and the upper bound of the lowest bucket. -// However, if the lowest bucket has an upper bound less or equal 0, this upper -// bound is returned if the quantile falls into the lowest bucket. -// -// There are a number of special cases (once we have a way to report errors -// happening during evaluations of AST functions, we should report those -// explicitly): -// -// If 'buckets' has fewer than 2 elements, NaN is returned. -// -// If the highest bucket is not +Inf, NaN is returned. -// -// If q<0, -Inf is returned. -// -// If q>1, +Inf is returned. -func bucketQuantile(q float64, buckets buckets) float64 { - if q < 0 { - return math.Inf(-1) - } - if q > 1 { - return math.Inf(+1) - } - if len(buckets) < 2 { - return math.NaN() - } - sort.Sort(buckets) - if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { - return math.NaN() - } - - ensureMonotonic(buckets) - - rank := q * buckets[len(buckets)-1].count - b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) - - if b == len(buckets)-1 { - return buckets[len(buckets)-2].upperBound - } - if b == 0 && buckets[0].upperBound <= 0 { - return buckets[0].upperBound - } - var ( - bucketStart float64 - bucketEnd = buckets[b].upperBound - count = buckets[b].count - ) - if b > 0 { - bucketStart = buckets[b-1].upperBound - count -= buckets[b-1].count - rank -= buckets[b-1].count - } - return bucketStart + (bucketEnd-bucketStart)*(rank/count) -} - -// The assumption that bucket counts increase monotonically with increasing -// upperBound may be violated during: -// -// * Recording rule evaluation of histogram_quantile, especially when rate() -// has been applied to the underlying bucket timeseries. -// * Evaluation of histogram_quantile computed over federated bucket -// timeseries, especially when rate() has been applied. -// -// This is because scraped data is not made available to rule evaluation or -// federation atomically, so some buckets are computed with data from the -// most recent scrapes, but the other buckets are missing data from the most -// recent scrape. -// -// Monotonicity is usually guaranteed because if a bucket with upper bound -// u1 has count c1, then any bucket with a higher upper bound u > u1 must -// have counted all c1 observations and perhaps more, so that c >= c1. -// -// Randomly interspersed partial sampling breaks that guarantee, and rate() -// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from -// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The -// monotonicity is broken. It is exacerbated by rate() because under normal -// operation, cumulative counting of buckets will cause the bucket counts to -// diverge such that small differences from missing samples are not a problem. -// rate() removes this divergence.) -// -// bucketQuantile depends on that monotonicity to do a binary search for the -// bucket with the φ-quantile count, so breaking the monotonicity -// guarantee causes bucketQuantile() to return undefined (nonsense) results. -// -// As a somewhat hacky solution until ingestion is atomic per scrape, we -// calculate the "envelope" of the histogram buckets, essentially removing -// any decreases in the count between successive buckets. - -func ensureMonotonic(buckets buckets) { - max := buckets[0].count - for i := range buckets[1:] { - switch { - case buckets[i].count > max: - max = buckets[i].count - case buckets[i].count < max: - buckets[i].count = max - } - } -} - -// qauntile calculates the given quantile of a vector of samples. -// -// The Vector will be sorted. -// If 'values' has zero elements, NaN is returned. -// If q<0, -Inf is returned. -// If q>1, +Inf is returned. -func quantile(q float64, values vectorByValueHeap) float64 { - if len(values) == 0 { - return math.NaN() - } - if q < 0 { - return math.Inf(-1) - } - if q > 1 { - return math.Inf(+1) - } - sort.Sort(values) - - n := float64(len(values)) - // When the quantile lies between two samples, - // we use a weighted average of the two samples. - rank := q * (n - 1) - - lowerIndex := math.Max(0, math.Floor(rank)) - upperIndex := math.Min(n-1, lowerIndex+1) - - weight := rank - math.Floor(rank) - return values[int(lowerIndex)].V*(1-weight) + values[int(upperIndex)].V*weight -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go deleted file mode 100644 index 1c823d78b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go +++ /dev/null @@ -1,626 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//nolint //Since this was copied from Prometheus leave it as is -package promql - -import ( - "context" - "fmt" - "io/ioutil" - "math" - "os" - "regexp" - "strconv" - "strings" - "time" - - "github.com/prometheus/common/model" - - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb" - "github.com/prometheus/prometheus/util/testutil" -) - -var ( - minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. - - patSpace = regexp.MustCompile("[\t ]+") - patLoad = regexp.MustCompile(`^load\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) -) - -const ( - epsilon = 0.000001 // Relative error allowed for sample values. -) - -var testStartTime = time.Unix(0, 0) - -// Test is a sequence of read and write commands that are run -// against a test storage. -type Test struct { - testutil.T - - cmds []testCommand - - storage storage.Storage - - queryEngine *Engine - context context.Context - cancelCtx context.CancelFunc -} - -// NewTest returns an initialized empty Test. -func NewTest(t testutil.T, input string) (*Test, error) { - test := &Test{ - T: t, - cmds: []testCommand{}, - } - err := test.parse(input) - test.clear() - - return test, err -} - -func newTestFromFile(t testutil.T, filename string) (*Test, error) { - content, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return NewTest(t, string(content)) -} - -// QueryEngine returns the test's query engine. -func (t *Test) QueryEngine() *Engine { - return t.queryEngine -} - -// Queryable allows querying the test data. -func (t *Test) Queryable() storage.Queryable { - return t.storage -} - -// Context returns the test's context. -func (t *Test) Context() context.Context { - return t.context -} - -// Storage returns the test's storage. -func (t *Test) Storage() storage.Storage { - return t.storage -} - -func raise(line int, format string, v ...interface{}) error { - return &ParseErr{ - Line: line + 1, - Err: fmt.Errorf(format, v...), - } -} - -func (t *Test) parseLoad(lines []string, i int) (int, *loadCmd, error) { - if !patLoad.MatchString(lines[i]) { - return i, nil, raise(i, "invalid load command. (load )") - } - parts := patLoad.FindStringSubmatch(lines[i]) - - gap, err := model.ParseDuration(parts[1]) - if err != nil { - return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) - } - cmd := newLoadCmd(time.Duration(gap)) - for i+1 < len(lines) { - i++ - defLine := lines[i] - if len(defLine) == 0 { - i-- - break - } - metric, vals, err := parseSeriesDesc(defLine) - if err != nil { - if perr, ok := err.(*ParseErr); ok { - perr.Line = i + 1 - } - return i, nil, err - } - cmd.set(metric, vals...) - } - return i, cmd, nil -} - -func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) { - if !patEvalInstant.MatchString(lines[i]) { - return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at ] ") - } - parts := patEvalInstant.FindStringSubmatch(lines[i]) - var ( - mod = parts[1] - at = parts[2] - expr = parts[3] - ) - _, err := ParseExpr(expr) - if err != nil { - if perr, ok := err.(*ParseErr); ok { - perr.Line = i + 1 - perr.Pos += strings.Index(lines[i], expr) - } - return i, nil, err - } - - offset, err := model.ParseDuration(at) - if err != nil { - return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) - } - ts := testStartTime.Add(time.Duration(offset)) - - cmd := newEvalCmd(expr, ts, i+1) - switch mod { - case "ordered": - cmd.ordered = true - case "fail": - cmd.fail = true - } - - for j := 1; i+1 < len(lines); j++ { - i++ - defLine := lines[i] - if len(defLine) == 0 { - i-- - break - } - if f, err := parseNumber(defLine); err == nil { - cmd.expect(0, nil, sequenceValue{value: f}) - break - } - metric, vals, err := parseSeriesDesc(defLine) - if err != nil { - if perr, ok := err.(*ParseErr); ok { - perr.Line = i + 1 - } - return i, nil, err - } - - // Currently, we are not expecting any matrices. - if len(vals) > 1 { - return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed") - } - cmd.expect(j, metric, vals...) - } - return i, cmd, nil -} - -// parse the given command sequence and appends it to the test. -func (t *Test) parse(input string) error { - // Trim lines and remove comments. - lines := strings.Split(input, "\n") - for i, l := range lines { - l = strings.TrimSpace(l) - if strings.HasPrefix(l, "#") { - l = "" - } - lines[i] = l - } - var err error - - // Scan for steps line by line. - for i := 0; i < len(lines); i++ { - l := lines[i] - if len(l) == 0 { - continue - } - var cmd testCommand - - switch c := strings.ToLower(patSpace.Split(l, 2)[0]); { - case c == "clear": - cmd = &clearCmd{} - case c == "load": - i, cmd, err = t.parseLoad(lines, i) - case strings.HasPrefix(c, "eval"): - i, cmd, err = t.parseEval(lines, i) - default: - return raise(i, "invalid command %q", l) - } - if err != nil { - return err - } - t.cmds = append(t.cmds, cmd) - } - return nil -} - -// testCommand is an interface that ensures that only the package internal -// types can be a valid command for a test. -type testCommand interface { - testCmd() -} - -func (*clearCmd) testCmd() {} -func (*loadCmd) testCmd() {} -func (*evalCmd) testCmd() {} - -// loadCmd is a command that loads sequences of sample values for specific -// metrics into the storage. -type loadCmd struct { - gap time.Duration - metrics map[uint64]labels.Labels - defs map[uint64][]Point -} - -func newLoadCmd(gap time.Duration) *loadCmd { - return &loadCmd{ - gap: gap, - metrics: map[uint64]labels.Labels{}, - defs: map[uint64][]Point{}, - } -} - -func (cmd loadCmd) String() string { - return "load" -} - -// set a sequence of sample values for the given metric. -func (cmd *loadCmd) set(m labels.Labels, vals ...sequenceValue) { - h := m.Hash() - - samples := make([]Point, 0, len(vals)) - ts := testStartTime - for _, v := range vals { - if !v.omitted { - samples = append(samples, Point{ - T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond), - V: v.value, - }) - } - ts = ts.Add(cmd.gap) - } - cmd.defs[h] = samples - cmd.metrics[h] = m -} - -// append the defined time series to the storage. -func (cmd *loadCmd) append(a storage.Appender) error { - for h, smpls := range cmd.defs { - m := cmd.metrics[h] - - for _, s := range smpls { - if _, err := a.Append(0, m, s.T, s.V); err != nil { - return err - } - } - } - return nil -} - -// evalCmd is a command that evaluates an expression for the given time (range) -// and expects a specific result. -type evalCmd struct { - expr string - start time.Time - line int - - fail, ordered bool - - metrics map[uint64]labels.Labels - expected map[uint64]entry -} - -type entry struct { - pos int - vals []sequenceValue -} - -func (e entry) String() string { - return fmt.Sprintf("%d: %s", e.pos, e.vals) -} - -func newEvalCmd(expr string, start time.Time, line int) *evalCmd { - return &evalCmd{ - expr: expr, - start: start, - line: line, - - metrics: map[uint64]labels.Labels{}, - expected: map[uint64]entry{}, - } -} - -func (ev *evalCmd) String() string { - return "eval" -} - -// expect adds a new metric with a sequence of values to the set of expected -// results for the query. -func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...sequenceValue) { - if m == nil { - ev.expected[0] = entry{pos: pos, vals: vals} - return - } - h := m.Hash() - ev.metrics[h] = m - ev.expected[h] = entry{pos: pos, vals: vals} -} - -// compareResult compares the result value with the defined expectation. -func (ev *evalCmd) compareResult(result Value) error { - switch val := result.(type) { - case Matrix: - return fmt.Errorf("received range result on instant evaluation") - - case Vector: - seen := map[uint64]bool{} - for pos, v := range val { - fp := v.Metric.Hash() - if _, ok := ev.metrics[fp]; !ok { - return fmt.Errorf("unexpected metric %s in result", v.Metric) - } - exp := ev.expected[fp] - if ev.ordered && exp.pos != pos+1 { - return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1) - } - if !almostEqual(exp.vals[0].value, v.V) { - return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].value, v.Metric, v.V) - } - - seen[fp] = true - } - for fp, expVals := range ev.expected { - if !seen[fp] { - fmt.Println("vector result", len(val), ev.expr) - for _, ss := range val { - fmt.Println(" ", ss.Metric, ss.Point) - } - return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals) - } - } - - case Scalar: - if !almostEqual(ev.expected[0].vals[0].value, val.V) { - return fmt.Errorf("expected Scalar %v but got %v", val.V, ev.expected[0].vals[0].value) - } - - default: - panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result)) - } - return nil -} - -// clearCmd is a command that wipes the test's storage state. -type clearCmd struct{} - -func (cmd clearCmd) String() string { - return "clear" -} - -// Run executes the command sequence of the test. Until the maximum error number -// is reached, evaluation errors do not terminate execution. -func (t *Test) Run() error { - for _, cmd := range t.cmds { - err := t.exec(cmd) - // TODO(fabxc): aggregate command errors, yield diffs for result - // comparison errors. - if err != nil { - return err - } - } - return nil -} - -// exec processes a single step of the test. -func (t *Test) exec(tc testCommand) error { - switch cmd := tc.(type) { - case *clearCmd: - t.clear() - - case *loadCmd: - app := t.storage.Appender(context.Background()) - if err := cmd.append(app); err != nil { - app.Rollback() - return err - } - - if err := app.Commit(); err != nil { - return err - } - - case *evalCmd: - q, _ := t.queryEngine.NewInstantQuery(t.storage, cmd.expr, cmd.start) - res := q.Exec(t.context) - if res.Err != nil { - if cmd.fail { - return nil - } - return fmt.Errorf("error evaluating query %q (line %d): %s", cmd.expr, cmd.line, res.Err) - } - defer q.Close() - if res.Err == nil && cmd.fail { - return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) - } - - err := cmd.compareResult(res.Value) - if err != nil { - return fmt.Errorf("error in %s %s: %s", cmd, cmd.expr, err) - } - - // Check query returns same result in range mode, - /// by checking against the middle step. - q, _ = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute) - rangeRes := q.Exec(t.context) - if rangeRes.Err != nil { - return fmt.Errorf("error evaluating query %q (line %d) in range mode: %s", cmd.expr, cmd.line, rangeRes.Err) - } - defer q.Close() - if cmd.ordered { - // Ordering isn't defined for range queries. - return nil - } - mat := rangeRes.Value.(Matrix) - vec := make(Vector, 0, len(mat)) - for _, series := range mat { - for _, point := range series.Points { - if point.T == timeMilliseconds(cmd.start) { - vec = append(vec, Sample{Metric: series.Metric, Point: point}) - break - } - } - } - if _, ok := res.Value.(Scalar); ok { - err = cmd.compareResult(Scalar{V: vec[0].Point.V}) - } else { - err = cmd.compareResult(vec) - } - if err != nil { - return fmt.Errorf("error in %s %s (line %d) rande mode: %s", cmd, cmd.expr, cmd.line, err) - } - - default: - panic("promql.Test.exec: unknown test command type") - } - return nil -} - -// clear the current test storage of all inserted samples. -func (t *Test) clear() { - if t.storage != nil { - if err := t.storage.Close(); err != nil { - t.T.Errorf("closing test storage: %s", err) - t.T.FailNow() - } - } - if t.cancelCtx != nil { - t.cancelCtx() - } - t.storage = NewStorage(t) - - t.queryEngine = NewEngine(nil, nil, 20, 10*time.Second) - t.context, t.cancelCtx = context.WithCancel(context.Background()) -} - -// Close closes resources associated with the Test. -func (t *Test) Close() { - t.cancelCtx() - - if err := t.storage.Close(); err != nil { - t.T.Errorf("closing test storage: %s", err) - t.T.FailNow() - } -} - -// samplesAlmostEqual returns true if the two sample lines only differ by a -// small relative error in their sample value. -func almostEqual(a, b float64) bool { - // NaN has no equality but for testing we still want to know whether both values - // are NaN. - if math.IsNaN(a) && math.IsNaN(b) { - return true - } - - // Cf. http://floating-point-gui.de/errors/comparison/ - if a == b { - return true - } - - diff := math.Abs(a - b) - - if a == 0 || b == 0 || diff < minNormal { - return diff < epsilon*minNormal - } - return diff/(math.Abs(a)+math.Abs(b)) < epsilon -} - -func parseNumber(s string) (float64, error) { - n, err := strconv.ParseInt(s, 0, 64) - f := float64(n) - if err != nil { - f, err = strconv.ParseFloat(s, 64) - } - if err != nil { - return 0, fmt.Errorf("error parsing number: %s", err) - } - return f, nil -} - -// NewStorage returns a new storage for testing purposes -// that removes all associated files on closing. -func NewStorage(t testutil.T) storage.Storage { - dir, err := ioutil.TempDir("", "test_storage") - if err != nil { - t.Errorf("Opening test dir failed: %s", err) - t.FailNow() - } - - // Tests just load data for a series sequentially. Thus we - // need a long appendable window. - db, err := tsdb.Open(dir, nil, nil, &tsdb.Options{ - MinBlockDuration: int64(24 * time.Hour / time.Millisecond), - MaxBlockDuration: int64(24 * time.Hour / time.Millisecond), - }, nil) - if err != nil { - t.Errorf("Opening test storage failed: %s", err) - t.FailNow() - } - return testStorage{Storage: Adapter(db, int64(0)), dir: dir} -} - -type testStorage struct { - storage.Storage - dir string -} - -func (s testStorage) Close() error { - if err := s.Storage.Close(); err != nil { - return err - } - return os.RemoveAll(s.dir) -} - -// Adapter return an adapter as storage.Storage. -func Adapter(db *tsdb.DB, startTimeMargin int64) storage.Storage { - return &adapter{db: db, startTimeMargin: startTimeMargin} -} - -// adapter implements a storage.Storage around TSDB. -type adapter struct { - db *tsdb.DB - startTimeMargin int64 -} - -// StartTime implements the Storage interface. -func (a adapter) StartTime() (int64, error) { - var startTime int64 - - if len(a.db.Blocks()) > 0 { - startTime = a.db.Blocks()[0].Meta().MinTime - } else { - startTime = time.Now().Unix() * 1000 - } - - // Add a safety margin as it may take a few minutes for everything to spin up. - return startTime + a.startTimeMargin, nil -} - -func (a adapter) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return a.db.Querier(ctx, mint, maxt) -} - -func (a adapter) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - return a.db.ChunkQuerier(ctx, mint, maxt) -} - -// Appender returns a new appender against the storage. -func (a adapter) Appender(ctx context.Context) storage.Appender { - return a.db.Appender(ctx) -} - -// Close closes the storage and all its underlying resources. -func (a adapter) Close() error { - return a.db.Close() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/value.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/value.go deleted file mode 100644 index d7fc473ee..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/value.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promql - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/prometheus/model/labels" -) - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValueTypeMatrix } -func (Vector) Type() ValueType { return ValueTypeVector } -func (Scalar) Type() ValueType { return ValueTypeScalar } -func (String) Type() ValueType { return ValueTypeString } - -// ValueType describes a type of a value. -type ValueType string - -// The valid value types. -const ( - ValueTypeNone = "none" - ValueTypeVector = "vector" - ValueTypeScalar = "scalar" - ValueTypeMatrix = "matrix" - ValueTypeString = "string" -) - -// String represents a string value. -type String struct { - V string - T int64 -} - -func (s String) String() string { - return s.V -} - -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V}) -} - -// Scalar is a data point that's explicitly not associated with a metric. -type Scalar struct { - T int64 - V float64 -} - -func (s Scalar) String() string { - v := strconv.FormatFloat(s.V, 'f', -1, 64) - return fmt.Sprintf("scalar: %v @[%v]", v, s.T) -} - -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(s.V, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(s.T) / 1000, v}) -} - -// Series is a stream of data points belonging to a metric. -type Series struct { - Metric labels.Labels `json:"metric"` - Points []Point `json:"values"` -} - -func (s Series) String() string { - vals := make([]string, len(s.Points)) - for i, v := range s.Points { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n")) -} - -// Point represents a single data point for a given timestamp. -type Point struct { - T int64 - V float64 -} - -func (p Point) String() string { - v := strconv.FormatFloat(p.V, 'f', -1, 64) - return fmt.Sprintf("%v @[%v]", v, p.T) -} - -// MarshalJSON implements json.Marshaler. -func (p Point) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(p.V, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) -} - -// Sample is a single sample belonging to a metric. -type Sample struct { - Point - - Metric labels.Labels -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, s.Point) -} - -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - M labels.Labels `json:"metric"` - V Point `json:"value"` - }{ - M: s.Metric, - V: s.Point, - } - return json.Marshal(v) -} - -// Vector is basically only an alias for model.Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -// Matrix is a slice of Seriess that implements sort.Interface and -// has a String method. -type Matrix []Series - -func (m Matrix) String() string { - // TODO(fabxc): sort, or can we rely on order from the querier? - strs := make([]string, len(m)) - - for i, ss := range m { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return labels.Compare(m[i].Metric, m[j].Metric) < 0 } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -// Result holds the resulting value of an execution or an error -// if any occurred. -type Result struct { - Err error - Value Value -} - -// Vector returns a Vector if the result value is one. An error is returned if -// the result was an error or the result value is not a Vector. -func (r *Result) Vector() (Vector, error) { - if r.Err != nil { - return nil, r.Err - } - v, ok := r.Value.(Vector) - if !ok { - return nil, fmt.Errorf("query result is not a Vector") - } - return v, nil -} - -// Matrix returns a Matrix. An error is returned if -// the result was an error or the result value is not a Matrix. -func (r *Result) Matrix() (Matrix, error) { - if r.Err != nil { - return nil, r.Err - } - v, ok := r.Value.(Matrix) - if !ok { - return nil, fmt.Errorf("query result is not a range Vector") - } - return v, nil -} - -// Scalar returns a Scalar value. An error is returned if -// the result was an error or the result value is not a Scalar. -func (r *Result) Scalar() (Scalar, error) { - if r.Err != nil { - return Scalar{}, r.Err - } - v, ok := r.Value.(Scalar) - if !ok { - return Scalar{}, fmt.Errorf("query result is not a Scalar") - } - return v, nil -} - -func (r *Result) String() string { - if r.Err != nil { - return r.Err.Error() - } - if r.Value == nil { - return "" - } - return r.Value.String() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go deleted file mode 100644 index a0ae53731..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go +++ /dev/null @@ -1,459 +0,0 @@ -package userconfig - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/rulefmt" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/rules" - "gopkg.in/yaml.v3" - - legacy_promql "github.com/cortexproject/cortex/pkg/configs/legacy_promql" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// An ID is the ID of a single users's Cortex configuration. When a -// configuration changes, it gets a new ID. -type ID int - -// RuleFormatVersion indicates which Prometheus rule format (v1 vs. v2) to use in parsing. -type RuleFormatVersion int - -const ( - // RuleFormatV1 is the Prometheus 1.x rule format. - RuleFormatV1 RuleFormatVersion = iota - // RuleFormatV2 is the Prometheus 2.x rule format. - RuleFormatV2 RuleFormatVersion = iota -) - -// IsValid returns whether the rules format version is a valid (known) version. -func (v RuleFormatVersion) IsValid() bool { - switch v { - case RuleFormatV1, RuleFormatV2: - return true - default: - return false - } -} - -// MarshalJSON implements json.Marshaler. -func (v RuleFormatVersion) MarshalJSON() ([]byte, error) { - switch v { - case RuleFormatV1: - return json.Marshal("1") - case RuleFormatV2: - return json.Marshal("2") - default: - return nil, fmt.Errorf("unknown rule format version %d", v) - } -} - -// MarshalYAML implements yaml.Marshaler. -func (v RuleFormatVersion) MarshalYAML() (interface{}, error) { - switch v { - case RuleFormatV1: - return yaml.Marshal("1") - case RuleFormatV2: - return yaml.Marshal("2") - default: - return nil, fmt.Errorf("unknown rule format version %d", v) - } -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *RuleFormatVersion) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - switch s { - case "1": - *v = RuleFormatV1 - case "2": - *v = RuleFormatV2 - default: - return fmt.Errorf("unknown rule format version %q", string(data)) - } - return nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (v *RuleFormatVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - switch s { - case "1": - *v = RuleFormatV1 - case "2": - *v = RuleFormatV2 - default: - return fmt.Errorf("unknown rule format version %q", s) - } - return nil -} - -// A Config is a Cortex configuration for a single user. -type Config struct { - // RulesFiles maps from a rules filename to file contents. - RulesConfig RulesConfig - TemplateFiles map[string]string - AlertmanagerConfig string -} - -// configCompat is a compatibility struct to support old JSON config blobs -// saved in the config DB that didn't have a rule format version yet and -// just had a top-level field for the rule files. -type configCompat struct { - RulesFiles map[string]string `json:"rules_files" yaml:"rules_files"` - RuleFormatVersion RuleFormatVersion `json:"rule_format_version" yaml:"rule_format_version"` - TemplateFiles map[string]string `json:"template_files" yaml:"template_files"` - AlertmanagerConfig string `json:"alertmanager_config" yaml:"alertmanager_config"` -} - -// MarshalJSON implements json.Marshaler. -func (c Config) MarshalJSON() ([]byte, error) { - compat := &configCompat{ - RulesFiles: c.RulesConfig.Files, - RuleFormatVersion: c.RulesConfig.FormatVersion, - TemplateFiles: c.TemplateFiles, - AlertmanagerConfig: c.AlertmanagerConfig, - } - - return json.Marshal(compat) -} - -// MarshalYAML implements yaml.Marshaler. -func (c Config) MarshalYAML() (interface{}, error) { - compat := &configCompat{ - RulesFiles: c.RulesConfig.Files, - RuleFormatVersion: c.RulesConfig.FormatVersion, - TemplateFiles: c.TemplateFiles, - AlertmanagerConfig: c.AlertmanagerConfig, - } - - return yaml.Marshal(compat) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (c *Config) UnmarshalJSON(data []byte) error { - compat := configCompat{} - if err := json.Unmarshal(data, &compat); err != nil { - return err - } - *c = Config{ - RulesConfig: RulesConfig{ - Files: compat.RulesFiles, - FormatVersion: compat.RuleFormatVersion, - }, - TemplateFiles: compat.TemplateFiles, - AlertmanagerConfig: compat.AlertmanagerConfig, - } - return nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { - compat := configCompat{} - if err := unmarshal(&compat); err != nil { - return errors.WithStack(err) - } - *c = Config{ - RulesConfig: RulesConfig{ - Files: compat.RulesFiles, - FormatVersion: compat.RuleFormatVersion, - }, - TemplateFiles: compat.TemplateFiles, - AlertmanagerConfig: compat.AlertmanagerConfig, - } - return nil -} - -// View is what's returned from the Weave Cloud configs service -// when we ask for all Cortex configurations. -// -// The configs service is essentially a JSON blob store that gives each -// _version_ of a configuration a unique ID and guarantees that later versions -// have greater IDs. -type View struct { - ID ID `json:"id"` - Config Config `json:"config"` - DeletedAt time.Time `json:"deleted_at"` -} - -// IsDeleted tells you if the config is deleted. -func (v View) IsDeleted() bool { - return !v.DeletedAt.IsZero() -} - -// GetVersionedRulesConfig specializes the view to just the rules config. -func (v View) GetVersionedRulesConfig() *VersionedRulesConfig { - if v.Config.RulesConfig.Files == nil { - return nil - } - return &VersionedRulesConfig{ - ID: v.ID, - Config: v.Config.RulesConfig, - DeletedAt: v.DeletedAt, - } -} - -// RulesConfig is the rules configuration for a particular organization. -type RulesConfig struct { - FormatVersion RuleFormatVersion `json:"format_version"` - Files map[string]string `json:"files"` -} - -// Equal compares two RulesConfigs for equality. -// -// instance Eq RulesConfig -func (c RulesConfig) Equal(o RulesConfig) bool { - if c.FormatVersion != o.FormatVersion { - return false - } - if len(o.Files) != len(c.Files) { - return false - } - for k, v1 := range c.Files { - v2, ok := o.Files[k] - if !ok || v1 != v2 { - return false - } - } - return true -} - -// Parse parses and validates the content of the rule files in a RulesConfig -// according to the passed rule format version. -func (c RulesConfig) Parse() (map[string][]rules.Rule, error) { - switch c.FormatVersion { - case RuleFormatV1: - return c.parseV1() - case RuleFormatV2: - return c.parseV2() - default: - return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion) - } -} - -// ParseFormatted returns the rulefmt map of a users rules configs. It allows -// for rules to be mapped to disk and read by the prometheus rules manager. -func (c RulesConfig) ParseFormatted() (map[string]rulefmt.RuleGroups, error) { - switch c.FormatVersion { - case RuleFormatV1: - return c.parseV1Formatted() - case RuleFormatV2: - return c.parseV2Formatted() - default: - return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion) - } -} - -// parseV2 parses and validates the content of the rule files in a RulesConfig -// according to the Prometheus 2.x rule format. -func (c RulesConfig) parseV2Formatted() (map[string]rulefmt.RuleGroups, error) { - ruleMap := map[string]rulefmt.RuleGroups{} - - for fn, content := range c.Files { - rgs, errs := rulefmt.Parse([]byte(content)) - for _, err := range errs { // return just the first error, if any - return nil, err - } - ruleMap[fn] = *rgs - - } - return ruleMap, nil -} - -// parseV1 parses and validates the content of the rule files in a RulesConfig -// according to the Prometheus 1.x rule format. -func (c RulesConfig) parseV1Formatted() (map[string]rulefmt.RuleGroups, error) { - result := map[string]rulefmt.RuleGroups{} - for fn, content := range c.Files { - stmts, err := legacy_promql.ParseStmts(content) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %s", fn, err) - } - - ra := []rulefmt.RuleNode{} - for _, stmt := range stmts { - var rule rulefmt.RuleNode - switch r := stmt.(type) { - case *legacy_promql.AlertStmt: - _, err := parser.ParseExpr(r.Expr.String()) - if err != nil { - return nil, err - } - - rule = rulefmt.RuleNode{ - Alert: yaml.Node{Value: r.Name}, - Expr: yaml.Node{Value: r.Expr.String()}, - For: model.Duration(r.Duration), - Labels: r.Labels.Map(), - Annotations: r.Annotations.Map(), - } - - case *legacy_promql.RecordStmt: - _, err := parser.ParseExpr(r.Expr.String()) - if err != nil { - return nil, err - } - - rule = rulefmt.RuleNode{ - Record: yaml.Node{Value: r.Name}, - Expr: yaml.Node{Value: r.Expr.String()}, - Labels: r.Labels.Map(), - } - - default: - return nil, fmt.Errorf("ruler.GetRules: unknown statement type") - } - ra = append(ra, rule) - } - result[fn] = rulefmt.RuleGroups{ - Groups: []rulefmt.RuleGroup{ - { - Name: "rg:" + fn, - Rules: ra, - }, - }, - } - } - return result, nil -} - -// parseV2 parses and validates the content of the rule files in a RulesConfig -// according to the Prometheus 2.x rule format. -// -// NOTE: On one hand, we cannot return fully-fledged lists of rules.Group -// here yet, as creating a rules.Group requires already -// passing in rules.ManagerOptions options (which in turn require a -// notifier, appender, etc.), which we do not want to create simply -// for parsing. On the other hand, we should not return barebones -// rulefmt.RuleGroup sets here either, as only a fully-converted rules.Rule -// is able to track alert states over multiple rule evaluations. The caller -// would otherwise have to ensure to convert the rulefmt.RuleGroup only exactly -// once, not for every evaluation (or risk losing alert pending states). So -// it's probably better to just return a set of rules.Rule here. -func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { - groups := map[string][]rules.Rule{} - - for fn, content := range c.Files { - rgs, errs := rulefmt.Parse([]byte(content)) - if len(errs) > 0 { - return nil, fmt.Errorf("error parsing %s: %v", fn, errs[0]) - } - - for _, rg := range rgs.Groups { - rls := make([]rules.Rule, 0, len(rg.Rules)) - for _, rl := range rg.Rules { - expr, err := parser.ParseExpr(rl.Expr.Value) - if err != nil { - return nil, err - } - - if rl.Alert.Value != "" { - rls = append(rls, rules.NewAlertingRule( - rl.Alert.Value, - expr, - time.Duration(rl.For), - labels.FromMap(rl.Labels), - labels.FromMap(rl.Annotations), - nil, - "", - true, - log.With(util_log.Logger, "alert", rl.Alert.Value), - )) - continue - } - rls = append(rls, rules.NewRecordingRule( - rl.Record.Value, - expr, - labels.FromMap(rl.Labels), - )) - } - - // Group names have to be unique in Prometheus, but only within one rules file. - groups[rg.Name+";"+fn] = rls - } - } - - return groups, nil -} - -// parseV1 parses and validates the content of the rule files in a RulesConfig -// according to the Prometheus 1.x rule format. -// -// The same comment about rule groups as on ParseV2() applies here. -func (c RulesConfig) parseV1() (map[string][]rules.Rule, error) { - result := map[string][]rules.Rule{} - for fn, content := range c.Files { - stmts, err := legacy_promql.ParseStmts(content) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %s", fn, err) - } - ra := []rules.Rule{} - for _, stmt := range stmts { - var rule rules.Rule - - switch r := stmt.(type) { - case *legacy_promql.AlertStmt: - // legacy_promql.ParseStmts has parsed the whole rule for us. - // Ideally we'd just use r.Expr and pass that to rules.NewAlertingRule, - // but it is of the type legacy_proql.Expr and not promql.Expr. - // So we convert it back to a string, and then parse it again with the - // upstream parser to get it into the right type. - expr, err := parser.ParseExpr(r.Expr.String()) - if err != nil { - return nil, err - } - - rule = rules.NewAlertingRule( - r.Name, - expr, - r.Duration, - r.Labels, - r.Annotations, - nil, - "", - true, - log.With(util_log.Logger, "alert", r.Name), - ) - - case *legacy_promql.RecordStmt: - expr, err := parser.ParseExpr(r.Expr.String()) - if err != nil { - return nil, err - } - - rule = rules.NewRecordingRule(r.Name, expr, r.Labels) - - default: - return nil, fmt.Errorf("ruler.GetRules: unknown statement type") - } - ra = append(ra, rule) - } - result[fn] = ra - } - return result, nil -} - -// VersionedRulesConfig is a RulesConfig together with a version. -// `data Versioned a = Versioned { id :: ID , config :: a }` -type VersionedRulesConfig struct { - ID ID `json:"id"` - Config RulesConfig `json:"config"` - DeletedAt time.Time `json:"deleted_at"` -} - -// IsDeleted tells you if the config is deleted. -func (vr VersionedRulesConfig) IsDeleted() bool { - return !vr.DeletedAt.IsZero() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go deleted file mode 100644 index cb7206e20..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go +++ /dev/null @@ -1,527 +0,0 @@ -package cortex - -import ( - "bytes" - "context" - "flag" - "fmt" - "net/http" - "os" - "reflect" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/promql" - prom_storage "github.com/prometheus/prometheus/storage" - "github.com/weaveworks/common/server" - "github.com/weaveworks/common/signals" - "google.golang.org/grpc/health/grpc_health_v1" - "gopkg.in/yaml.v2" - - "github.com/cortexproject/cortex/pkg/alertmanager" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" - "github.com/cortexproject/cortex/pkg/api" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/encoding" - "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/chunk/storage" - chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/compactor" - "github.com/cortexproject/cortex/pkg/configs" - configAPI "github.com/cortexproject/cortex/pkg/configs/api" - "github.com/cortexproject/cortex/pkg/configs/db" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/distributor" - "github.com/cortexproject/cortex/pkg/flusher" - "github.com/cortexproject/cortex/pkg/frontend" - frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" - "github.com/cortexproject/cortex/pkg/ingester" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/querier/queryrange" - "github.com/cortexproject/cortex/pkg/querier/tenantfederation" - querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" - "github.com/cortexproject/cortex/pkg/ruler" - "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/scheduler" - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/fakeauth" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/cortexproject/cortex/pkg/util/grpcutil" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/modules" - "github.com/cortexproject/cortex/pkg/util/process" - "github.com/cortexproject/cortex/pkg/util/runtimeconfig" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - errInvalidHTTPPrefix = errors.New("HTTP prefix should be empty or start with /") -) - -// The design pattern for Cortex is a series of config objects, which are -// registered for command line flags, and then a series of components that -// are instantiated and composed. Some rules of thumb: -// - Config types should only contain 'simple' types (ints, strings, urls etc). -// - Flag validation should be done by the flag; use a flag.Value where -// appropriate. -// - Config types should map 1:1 with a component type. -// - Config types should define flags with a common prefix. -// - It's fine to nest configs within configs, but this should match the -// nesting of components within components. -// - Limit as much is possible sharing of configuration between config types. -// Where necessary, use a pointer for this - avoid repetition. -// - Where a nesting of components its not obvious, it's fine to pass -// references to other components constructors to compose them. -// - First argument for a components constructor should be its matching config -// object. - -// Config is the root config for Cortex. -type Config struct { - Target flagext.StringSliceCSV `yaml:"target"` - AuthEnabled bool `yaml:"auth_enabled"` - PrintConfig bool `yaml:"-"` - HTTPPrefix string `yaml:"http_prefix"` - - API api.Config `yaml:"api"` - Server server.Config `yaml:"server"` - Distributor distributor.Config `yaml:"distributor"` - Querier querier.Config `yaml:"querier"` - IngesterClient client.Config `yaml:"ingester_client"` - Ingester ingester.Config `yaml:"ingester"` - Flusher flusher.Config `yaml:"flusher"` - Storage storage.Config `yaml:"storage"` - ChunkStore chunk.StoreConfig `yaml:"chunk_store"` - Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) - LimitsConfig validation.Limits `yaml:"limits"` - Prealloc cortexpb.PreallocConfig `yaml:"prealloc" doc:"hidden"` - Worker querier_worker.Config `yaml:"frontend_worker"` - Frontend frontend.CombinedFrontendConfig `yaml:"frontend"` - QueryRange queryrange.Config `yaml:"query_range"` - TableManager chunk.TableManagerConfig `yaml:"table_manager"` - Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. - BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` - Compactor compactor.Config `yaml:"compactor"` - StoreGateway storegateway.Config `yaml:"store_gateway"` - PurgerConfig purger.Config `yaml:"purger"` - TenantFederation tenantfederation.Config `yaml:"tenant_federation"` - - Ruler ruler.Config `yaml:"ruler"` - RulerStorage rulestore.Config `yaml:"ruler_storage"` - Configs configs.Config `yaml:"configs"` - Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` - AlertmanagerStorage alertstore.Config `yaml:"alertmanager_storage"` - RuntimeConfig runtimeconfig.Config `yaml:"runtime_config"` - MemberlistKV memberlist.KVConfig `yaml:"memberlist"` - QueryScheduler scheduler.Config `yaml:"query_scheduler"` -} - -// RegisterFlags registers flag. -func (c *Config) RegisterFlags(f *flag.FlagSet) { - c.Server.MetricsNamespace = "cortex" - c.Server.ExcludeRequestInLog = true - - // Set the default module list to 'all' - c.Target = []string{All} - - f.Var(&c.Target, "target", "Comma-separated list of Cortex modules to load. "+ - "The alias 'all' can be used in the list to load a number of core modules and will enable single-binary mode. "+ - "Use '-modules' command line flag to get a list of available modules, and to see which modules are included in 'all'.") - - f.BoolVar(&c.AuthEnabled, "auth.enabled", true, "Set to false to disable auth.") - f.BoolVar(&c.PrintConfig, "print.config", false, "Print the config and exit.") - f.StringVar(&c.HTTPPrefix, "http.prefix", "/api/prom", "HTTP path prefix for Cortex API.") - - c.API.RegisterFlags(f) - c.registerServerFlagsWithChangedDefaultValues(f) - c.Distributor.RegisterFlags(f) - c.Querier.RegisterFlags(f) - c.IngesterClient.RegisterFlags(f) - c.Ingester.RegisterFlags(f) - c.Flusher.RegisterFlags(f) - c.Storage.RegisterFlags(f) - c.ChunkStore.RegisterFlags(f) - c.Schema.RegisterFlags(f) - c.LimitsConfig.RegisterFlags(f) - c.Prealloc.RegisterFlags(f) - c.Worker.RegisterFlags(f) - c.Frontend.RegisterFlags(f) - c.QueryRange.RegisterFlags(f) - c.TableManager.RegisterFlags(f) - c.Encoding.RegisterFlags(f) - c.BlocksStorage.RegisterFlags(f) - c.Compactor.RegisterFlags(f) - c.StoreGateway.RegisterFlags(f) - c.PurgerConfig.RegisterFlags(f) - c.TenantFederation.RegisterFlags(f) - - c.Ruler.RegisterFlags(f) - c.RulerStorage.RegisterFlags(f) - c.Configs.RegisterFlags(f) - c.Alertmanager.RegisterFlags(f) - c.AlertmanagerStorage.RegisterFlags(f) - c.RuntimeConfig.RegisterFlags(f) - c.MemberlistKV.RegisterFlags(f) - c.QueryScheduler.RegisterFlags(f) - - // These don't seem to have a home. - f.IntVar(&chunk_util.QueryParallelism, "querier.query-parallelism", 100, "Max subqueries run in parallel per higher-level query.") -} - -// Validate the cortex config and returns an error if the validation -// doesn't pass -func (c *Config) Validate(log log.Logger) error { - if err := c.validateYAMLEmptyNodes(); err != nil { - return err - } - - if c.HTTPPrefix != "" && !strings.HasPrefix(c.HTTPPrefix, "/") { - return errInvalidHTTPPrefix - } - - if err := c.Schema.Validate(); err != nil { - return errors.Wrap(err, "invalid schema config") - } - if err := c.Encoding.Validate(); err != nil { - return errors.Wrap(err, "invalid encoding config") - } - if err := c.Storage.Validate(); err != nil { - return errors.Wrap(err, "invalid storage config") - } - if err := c.ChunkStore.Validate(log); err != nil { - return errors.Wrap(err, "invalid chunk store config") - } - if err := c.RulerStorage.Validate(); err != nil { - return errors.Wrap(err, "invalid rulestore config") - } - if err := c.Ruler.Validate(c.LimitsConfig, log); err != nil { - return errors.Wrap(err, "invalid ruler config") - } - if err := c.BlocksStorage.Validate(); err != nil { - return errors.Wrap(err, "invalid TSDB config") - } - if err := c.LimitsConfig.Validate(c.Distributor.ShardByAllLabels); err != nil { - return errors.Wrap(err, "invalid limits config") - } - if err := c.Distributor.Validate(c.LimitsConfig); err != nil { - return errors.Wrap(err, "invalid distributor config") - } - if err := c.Querier.Validate(); err != nil { - return errors.Wrap(err, "invalid querier config") - } - if err := c.IngesterClient.Validate(log); err != nil { - return errors.Wrap(err, "invalid ingester_client config") - } - if err := c.Worker.Validate(log); err != nil { - return errors.Wrap(err, "invalid frontend_worker config") - } - if err := c.QueryRange.Validate(c.Querier); err != nil { - return errors.Wrap(err, "invalid query_range config") - } - if err := c.TableManager.Validate(); err != nil { - return errors.Wrap(err, "invalid table-manager config") - } - if err := c.StoreGateway.Validate(c.LimitsConfig); err != nil { - return errors.Wrap(err, "invalid store-gateway config") - } - if err := c.Compactor.Validate(c.LimitsConfig); err != nil { - return errors.Wrap(err, "invalid compactor config") - } - if err := c.AlertmanagerStorage.Validate(); err != nil { - return errors.Wrap(err, "invalid alertmanager storage config") - } - if err := c.Alertmanager.Validate(c.AlertmanagerStorage); err != nil { - return errors.Wrap(err, "invalid alertmanager config") - } - - if c.Storage.Engine == storage.StorageEngineBlocks && c.Querier.SecondStoreEngine != storage.StorageEngineChunks && len(c.Schema.Configs) > 0 { - level.Warn(log).Log("schema configuration is not used by the blocks storage engine, and will have no effect") - } - - return nil -} - -func (c *Config) isModuleEnabled(m string) bool { - return util.StringsContain(c.Target, m) -} - -// validateYAMLEmptyNodes ensure that no empty node has been specified in the YAML config file. -// When an empty node is defined in YAML, the YAML parser sets the whole struct to its zero value -// and so we loose all default values. It's very difficult to detect this case for the user, so we -// try to prevent it (on the root level) with this custom validation. -func (c *Config) validateYAMLEmptyNodes() error { - defaults := Config{} - flagext.DefaultValues(&defaults) - - defStruct := reflect.ValueOf(defaults) - cfgStruct := reflect.ValueOf(*c) - - // We expect all structs are the exact same. This check should never fail. - if cfgStruct.NumField() != defStruct.NumField() { - return errors.New("unable to validate configuration because of mismatching internal config data structure") - } - - for i := 0; i < cfgStruct.NumField(); i++ { - // If the struct has been reset due to empty YAML value and the zero struct value - // doesn't match the default one, then we should warn the user about the issue. - if cfgStruct.Field(i).Kind() == reflect.Struct && cfgStruct.Field(i).IsZero() && !defStruct.Field(i).IsZero() { - return fmt.Errorf("the %s configuration in YAML has been specified as an empty YAML node", cfgStruct.Type().Field(i).Name) - } - } - - return nil -} - -func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { - throwaway := flag.NewFlagSet("throwaway", flag.PanicOnError) - - // Register to throwaway flags first. Default values are remembered during registration and cannot be changed, - // but we can take values from throwaway flag set and reregister into supplied flags with new default values. - c.Server.RegisterFlags(throwaway) - - throwaway.VisitAll(func(f *flag.Flag) { - // Ignore errors when setting new values. We have a test to verify that it works. - switch f.Name { - case "server.grpc.keepalive.min-time-between-pings": - _ = f.Value.Set("10s") - - case "server.grpc.keepalive.ping-without-stream-allowed": - _ = f.Value.Set("true") - } - - fs.Var(f.Value, f.Name, f.Usage) - }) -} - -// Cortex is the root datastructure for Cortex. -type Cortex struct { - Cfg Config - - // set during initialization - ServiceMap map[string]services.Service - ModuleManager *modules.Manager - - API *api.API - Server *server.Server - Ring *ring.Ring - TenantLimits validation.TenantLimits - Overrides *validation.Overrides - Distributor *distributor.Distributor - Ingester *ingester.Ingester - Flusher *flusher.Flusher - Store chunk.Store - DeletesStore *purger.DeleteStore - Frontend *frontendv1.Frontend - TableManager *chunk.TableManager - RuntimeConfig *runtimeconfig.Manager - Purger *purger.Purger - TombstonesLoader *purger.TombstonesLoader - QuerierQueryable prom_storage.SampleAndChunkQueryable - ExemplarQueryable prom_storage.ExemplarQueryable - QuerierEngine *promql.Engine - QueryFrontendTripperware queryrange.Tripperware - - Ruler *ruler.Ruler - RulerStorage rulestore.RuleStore - ConfigAPI *configAPI.API - ConfigDB db.DB - Alertmanager *alertmanager.MultitenantAlertmanager - Compactor *compactor.Compactor - StoreGateway *storegateway.StoreGateway - MemberlistKV *memberlist.KVInitService - - // Queryables that the querier should use to query the long - // term storage. It depends on the storage engine used. - StoreQueryables []querier.QueryableWithFilter -} - -// New makes a new Cortex. -func New(cfg Config) (*Cortex, error) { - if cfg.PrintConfig { - if err := yaml.NewEncoder(os.Stdout).Encode(&cfg); err != nil { - fmt.Println("Error encoding config:", err) - } - os.Exit(0) - } - - // Swap out the default resolver to support multiple tenant IDs separated by a '|' - if cfg.TenantFederation.Enabled { - util_log.WarnExperimentalUse("tenant-federation") - tenant.WithDefaultResolver(tenant.NewMultiResolver()) - } - - // Don't check auth header on TransferChunks, as we weren't originally - // sending it and this could cause transfers to fail on update. - cfg.API.HTTPAuthMiddleware = fakeauth.SetupAuthMiddleware(&cfg.Server, cfg.AuthEnabled, - // Also don't check auth for these gRPC methods, since single call is used for multiple users (or no user like health check). - []string{ - "/grpc.health.v1.Health/Check", - "/cortex.Ingester/TransferChunks", - "/frontend.Frontend/Process", - "/frontend.Frontend/NotifyClientShutdown", - "/schedulerpb.SchedulerForFrontend/FrontendLoop", - "/schedulerpb.SchedulerForQuerier/QuerierLoop", - "/schedulerpb.SchedulerForQuerier/NotifyQuerierShutdown", - }) - - cortex := &Cortex{ - Cfg: cfg, - } - - cortex.setupThanosTracing() - - if err := cortex.setupModuleManager(); err != nil { - return nil, err - } - - return cortex, nil -} - -// setupThanosTracing appends a gRPC middleware used to inject our tracer into the custom -// context used by Thanos, in order to get Thanos spans correctly attached to our traces. -func (t *Cortex) setupThanosTracing() { - t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, ThanosTracerUnaryInterceptor) - t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, ThanosTracerStreamInterceptor) -} - -// Run starts Cortex running, and blocks until a Cortex stops. -func (t *Cortex) Run() error { - // Register custom process metrics. - if c, err := process.NewProcessCollector(); err == nil { - prometheus.MustRegister(c) - } else { - level.Warn(util_log.Logger).Log("msg", "skipped registration of custom process metrics collector", "err", err) - } - - for _, module := range t.Cfg.Target { - if !t.ModuleManager.IsUserVisibleModule(module) { - level.Warn(util_log.Logger).Log("msg", "selected target is an internal module, is this intended?", "target", module) - } - } - - var err error - t.ServiceMap, err = t.ModuleManager.InitModuleServices(t.Cfg.Target...) - if err != nil { - return err - } - - t.API.RegisterServiceMapHandler(http.HandlerFunc(t.servicesHandler)) - - // get all services, create service manager and tell it to start - servs := []services.Service(nil) - for _, s := range t.ServiceMap { - servs = append(servs, s) - } - - sm, err := services.NewManager(servs...) - if err != nil { - return err - } - - // before starting servers, register /ready handler and gRPC health check service. - // It should reflect entire Cortex. - t.Server.HTTP.Path("/ready").Handler(t.readyHandler(sm)) - grpc_health_v1.RegisterHealthServer(t.Server.GRPC, grpcutil.NewHealthCheck(sm)) - - // Let's listen for events from this manager, and log them. - healthy := func() { level.Info(util_log.Logger).Log("msg", "Cortex started") } - stopped := func() { level.Info(util_log.Logger).Log("msg", "Cortex stopped") } - serviceFailed := func(service services.Service) { - // if any service fails, stop entire Cortex - sm.StopAsync() - - // let's find out which module failed - for m, s := range t.ServiceMap { - if s == service { - if service.FailureCase() == modules.ErrStopProcess { - level.Info(util_log.Logger).Log("msg", "received stop signal via return error", "module", m, "err", service.FailureCase()) - } else { - level.Error(util_log.Logger).Log("msg", "module failed", "module", m, "err", service.FailureCase()) - } - return - } - } - - level.Error(util_log.Logger).Log("msg", "module failed", "module", "unknown", "err", service.FailureCase()) - } - - sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) - - // Setup signal handler. If signal arrives, we stop the manager, which stops all the services. - handler := signals.NewHandler(t.Server.Log) - go func() { - handler.Loop() - sm.StopAsync() - }() - - // Start all services. This can really only fail if some service is already - // in other state than New, which should not be the case. - err = sm.StartAsync(context.Background()) - if err == nil { - // Wait until service manager stops. It can stop in two ways: - // 1) Signal is received and manager is stopped. - // 2) Any service fails. - err = sm.AwaitStopped(context.Background()) - } - - // If there is no error yet (= service manager started and then stopped without problems), - // but any service failed, report that failure as an error to caller. - if err == nil { - if failed := sm.ServicesByState()[services.Failed]; len(failed) > 0 { - for _, f := range failed { - if f.FailureCase() != modules.ErrStopProcess { - // Details were reported via failure listener before - err = errors.New("failed services") - break - } - } - } - } - return err -} - -func (t *Cortex) readyHandler(sm *services.Manager) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !sm.IsHealthy() { - msg := bytes.Buffer{} - msg.WriteString("Some services are not Running:\n") - - byState := sm.ServicesByState() - for st, ls := range byState { - msg.WriteString(fmt.Sprintf("%v: %d\n", st, len(ls))) - } - - http.Error(w, msg.String(), http.StatusServiceUnavailable) - return - } - - // Ingester has a special check that makes sure that it was able to register into the ring, - // and that all other ring entries are OK too. - if t.Ingester != nil { - if err := t.Ingester.CheckReady(r.Context()); err != nil { - http.Error(w, "Ingester not ready: "+err.Error(), http.StatusServiceUnavailable) - return - } - } - - // Query Frontend has a special check that makes sure that a querier is attached before it signals - // itself as ready - if t.Frontend != nil { - if err := t.Frontend.CheckReady(r.Context()); err != nil { - http.Error(w, "Query Frontend not ready: "+err.Error(), http.StatusServiceUnavailable) - return - } - } - - util.WriteTextResponse(w, "ready") - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go deleted file mode 100644 index 20e2ed625..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ /dev/null @@ -1,919 +0,0 @@ -package cortex - -import ( - "context" - "flag" - "fmt" - "net/http" - "os" - "time" - - "github.com/go-kit/log/level" - "github.com/opentracing-contrib/go-stdlib/nethttp" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/rules" - prom_storage "github.com/prometheus/prometheus/storage" - "github.com/thanos-io/thanos/pkg/discovery/dns" - httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" - "github.com/weaveworks/common/server" - - "github.com/cortexproject/cortex/pkg/alertmanager" - "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" - "github.com/cortexproject/cortex/pkg/api" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/chunk/storage" - "github.com/cortexproject/cortex/pkg/compactor" - configAPI "github.com/cortexproject/cortex/pkg/configs/api" - "github.com/cortexproject/cortex/pkg/configs/db" - "github.com/cortexproject/cortex/pkg/distributor" - "github.com/cortexproject/cortex/pkg/flusher" - "github.com/cortexproject/cortex/pkg/frontend" - "github.com/cortexproject/cortex/pkg/frontend/transport" - "github.com/cortexproject/cortex/pkg/ingester" - "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/querier/queryrange" - "github.com/cortexproject/cortex/pkg/querier/tenantfederation" - querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv/codec" - "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" - "github.com/cortexproject/cortex/pkg/ruler" - "github.com/cortexproject/cortex/pkg/scheduler" - "github.com/cortexproject/cortex/pkg/storegateway" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/modules" - "github.com/cortexproject/cortex/pkg/util/runtimeconfig" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// The various modules that make up Cortex. -const ( - API string = "api" - Ring string = "ring" - RuntimeConfig string = "runtime-config" - Overrides string = "overrides" - OverridesExporter string = "overrides-exporter" - Server string = "server" - Distributor string = "distributor" - DistributorService string = "distributor-service" - Ingester string = "ingester" - IngesterService string = "ingester-service" - Flusher string = "flusher" - Querier string = "querier" - Queryable string = "queryable" - StoreQueryable string = "store-queryable" - QueryFrontend string = "query-frontend" - QueryFrontendTripperware string = "query-frontend-tripperware" - Store string = "store" - DeleteRequestsStore string = "delete-requests-store" - TableManager string = "table-manager" - RulerStorage string = "ruler-storage" - Ruler string = "ruler" - Configs string = "configs" - AlertManager string = "alertmanager" - Compactor string = "compactor" - StoreGateway string = "store-gateway" - MemberlistKV string = "memberlist-kv" - ChunksPurger string = "chunks-purger" - TenantDeletion string = "tenant-deletion" - Purger string = "purger" - QueryScheduler string = "query-scheduler" - TenantFederation string = "tenant-federation" - All string = "all" -) - -func newDefaultConfig() *Config { - defaultConfig := &Config{} - defaultFS := flag.NewFlagSet("", flag.PanicOnError) - defaultConfig.RegisterFlags(defaultFS) - return defaultConfig -} - -func (t *Cortex) initAPI() (services.Service, error) { - t.Cfg.API.ServerPrefix = t.Cfg.Server.PathPrefix - t.Cfg.API.LegacyHTTPPrefix = t.Cfg.HTTPPrefix - - a, err := api.New(t.Cfg.API, t.Cfg.Server, t.Server, util_log.Logger) - if err != nil { - return nil, err - } - - t.API = a - t.API.RegisterAPI(t.Cfg.Server.PathPrefix, t.Cfg, newDefaultConfig()) - - return nil, nil -} - -func (t *Cortex) initServer() (services.Service, error) { - // Cortex handles signals on its own. - DisableSignalHandling(&t.Cfg.Server) - serv, err := server.New(t.Cfg.Server) - if err != nil { - return nil, err - } - - t.Server = serv - - servicesToWaitFor := func() []services.Service { - svs := []services.Service(nil) - for m, s := range t.ServiceMap { - // Server should not wait for itself. - if m != Server { - svs = append(svs, s) - } - } - return svs - } - - s := NewServerService(t.Server, servicesToWaitFor) - - return s, nil -} - -func (t *Cortex) initRing() (serv services.Service, err error) { - t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.RuntimeConfig) - t.Ring, err = ring.New(t.Cfg.Ingester.LifecyclerConfig.RingConfig, "ingester", ingester.RingKey, util_log.Logger, prometheus.WrapRegistererWithPrefix("cortex_", prometheus.DefaultRegisterer)) - if err != nil { - return nil, err - } - - t.API.RegisterRing(t.Ring) - - return t.Ring, nil -} - -func (t *Cortex) initRuntimeConfig() (services.Service, error) { - if t.Cfg.RuntimeConfig.LoadPath == "" { - // no need to initialize module if load path is empty - return nil, nil - } - t.Cfg.RuntimeConfig.Loader = loadRuntimeConfig - - // make sure to set default limits before we start loading configuration into memory - validation.SetDefaultLimitsForYAMLUnmarshalling(t.Cfg.LimitsConfig) - - serv, err := runtimeconfig.New(t.Cfg.RuntimeConfig, prometheus.WrapRegistererWithPrefix("cortex_", prometheus.DefaultRegisterer), util_log.Logger) - if err == nil { - // TenantLimits just delegates to RuntimeConfig and doesn't have any state or need to do - // anything in the start/stopping phase. Thus we can create it as part of runtime config - // setup without any service instance of its own. - t.TenantLimits = newTenantLimits(serv) - } - - t.RuntimeConfig = serv - t.API.RegisterRuntimeConfig(runtimeConfigHandler(t.RuntimeConfig, t.Cfg.LimitsConfig)) - return serv, err -} - -func (t *Cortex) initOverrides() (serv services.Service, err error) { - t.Overrides, err = validation.NewOverrides(t.Cfg.LimitsConfig, t.TenantLimits) - // overrides don't have operational state, nor do they need to do anything more in starting/stopping phase, - // so there is no need to return any service. - return nil, err -} - -func (t *Cortex) initOverridesExporter() (services.Service, error) { - if t.Cfg.isModuleEnabled(OverridesExporter) && t.TenantLimits == nil { - // This target isn't enabled by default ("all") and requires per-tenant limits to - // work. Fail if it can't be setup correctly since the user explicitly wanted this - // target to run. - return nil, errors.New("overrides-exporter has been enabled, but no runtime configuration file was configured") - } - - exporter := validation.NewOverridesExporter(t.TenantLimits) - prometheus.MustRegister(exporter) - - // the overrides exporter has no state and reads overrides for runtime configuration each time it - // is collected so there is no need to return any service - return nil, nil -} - -func (t *Cortex) initDistributorService() (serv services.Service, err error) { - t.Cfg.Distributor.DistributorRing.ListenPort = t.Cfg.Server.GRPCListenPort - t.Cfg.Distributor.ShuffleShardingLookbackPeriod = t.Cfg.Querier.ShuffleShardingIngestersLookbackPeriod - - // Check whether the distributor can join the distributors ring, which is - // whenever it's not running as an internal dependency (ie. querier or - // ruler's dependency) - canJoinDistributorsRing := t.Cfg.isModuleEnabled(Distributor) || t.Cfg.isModuleEnabled(All) - - t.Distributor, err = distributor.New(t.Cfg.Distributor, t.Cfg.IngesterClient, t.Overrides, t.Ring, canJoinDistributorsRing, prometheus.DefaultRegisterer, util_log.Logger) - if err != nil { - return - } - - return t.Distributor, nil -} - -func (t *Cortex) initDistributor() (serv services.Service, err error) { - t.API.RegisterDistributor(t.Distributor, t.Cfg.Distributor) - - return nil, nil -} - -// initQueryable instantiates the queryable and promQL engine used to service queries to -// Cortex. It also registers the API endpoints associated with those two services. -func (t *Cortex) initQueryable() (serv services.Service, err error) { - querierRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "querier"}, prometheus.DefaultRegisterer) - - // Create a querier queryable and PromQL engine - t.QuerierQueryable, t.ExemplarQueryable, t.QuerierEngine = querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, querierRegisterer, util_log.Logger) - - // Register the default endpoints that are always enabled for the querier module - t.API.RegisterQueryable(t.QuerierQueryable, t.Distributor) - - return nil, nil -} - -// Enable merge querier if multi tenant query federation is enabled -func (t *Cortex) initTenantFederation() (serv services.Service, err error) { - if t.Cfg.TenantFederation.Enabled { - // Make sure the mergeQuerier is only used for request with more than a - // single tenant. This allows for a less impactful enabling of tenant - // federation. - byPassForSingleQuerier := true - t.QuerierQueryable = querier.NewSampleAndChunkQueryable(tenantfederation.NewQueryable(t.QuerierQueryable, byPassForSingleQuerier)) - } - return nil, nil -} - -// initQuerier registers an internal HTTP router with a Prometheus API backed by the -// Cortex Queryable. Then it does one of the following: -// -// 1. Query-Frontend Enabled: If Cortex has an All or QueryFrontend target, the internal -// HTTP router is wrapped with Tenant ID parsing middleware and passed to the frontend -// worker. -// -// 2. Querier Standalone: The querier will register the internal HTTP router with the external -// HTTP router for the Prometheus API routes. Then the external HTTP server will be passed -// as a http.Handler to the frontend worker. -// -// Route Diagram: -// -// │ query -// │ request -// │ -// ▼ -// ┌──────────────────┐ QF to ┌──────────────────┐ -// │ external HTTP │ Worker │ │ -// │ router │──────────────▶│ frontend worker │ -// │ │ │ │ -// └──────────────────┘ └──────────────────┘ -// │ │ -// │ -// only in │ │ -// microservice ┌──────────────────┐ │ -// querier │ │ internal Querier │ │ -// ─ ─ ─ ─▶│ router │◀─────┘ -// │ │ -// └──────────────────┘ -// │ -// │ -// /metadata & /chunk ┌─────────────────────┼─────────────────────┐ -// requests │ │ │ -// │ │ │ -// ▼ ▼ ▼ -// ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ -// │ │ │ │ │ │ -// │Querier Queryable │ │ /api/v1 router │ │ /api/prom router │ -// │ │ │ │ │ │ -// └──────────────────┘ └──────────────────┘ └──────────────────┘ -// ▲ │ │ -// │ └──────────┬──────────┘ -// │ ▼ -// │ ┌──────────────────┐ -// │ │ │ -// └──────────────────────│ Prometheus API │ -// │ │ -// └──────────────────┘ -// -func (t *Cortex) initQuerier() (serv services.Service, err error) { - // Create a internal HTTP handler that is configured with the Prometheus API routes and points - // to a Prometheus API struct instantiated with the Cortex Queryable. - internalQuerierRouter := api.NewQuerierHandler( - t.Cfg.API, - t.QuerierQueryable, - t.ExemplarQueryable, - t.QuerierEngine, - t.Distributor, - t.TombstonesLoader, - prometheus.DefaultRegisterer, - util_log.Logger, - ) - - // If the querier is running standalone without the query-frontend or query-scheduler, we must register it's internal - // HTTP handler externally and provide the external Cortex Server HTTP handler to the frontend worker - // to ensure requests it processes use the default middleware instrumentation. - if !t.Cfg.isModuleEnabled(QueryFrontend) && !t.Cfg.isModuleEnabled(QueryScheduler) && !t.Cfg.isModuleEnabled(All) { - // First, register the internal querier handler with the external HTTP server - t.API.RegisterQueryAPI(internalQuerierRouter) - - // Second, set the http.Handler that the frontend worker will use to process requests to point to - // the external HTTP server. This will allow the querier to consolidate query metrics both external - // and internal using the default instrumentation when running as a standalone service. - internalQuerierRouter = t.Server.HTTPServer.Handler - } else { - // Single binary mode requires a query frontend endpoint for the worker. If no frontend and scheduler endpoint - // is configured, Cortex will default to using frontend on localhost on it's own GRPC listening port. - if t.Cfg.Worker.FrontendAddress == "" && t.Cfg.Worker.SchedulerAddress == "" { - address := fmt.Sprintf("127.0.0.1:%d", t.Cfg.Server.GRPCListenPort) - level.Warn(util_log.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) - t.Cfg.Worker.FrontendAddress = address - } - - // Add a middleware to extract the trace context and add a header. - internalQuerierRouter = nethttp.MiddlewareFunc(opentracing.GlobalTracer(), internalQuerierRouter.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string { - return "internalQuerier" - })) - - // If queries are processed using the external HTTP Server, we need wrap the internal querier with - // HTTP router with middleware to parse the tenant ID from the HTTP header and inject it into the - // request context. - internalQuerierRouter = t.API.AuthMiddleware.Wrap(internalQuerierRouter) - } - - // If neither frontend address or scheduler address is configured, no worker is needed. - if t.Cfg.Worker.FrontendAddress == "" && t.Cfg.Worker.SchedulerAddress == "" { - return nil, nil - } - - t.Cfg.Worker.MaxConcurrentRequests = t.Cfg.Querier.MaxConcurrent - return querier_worker.NewQuerierWorker(t.Cfg.Worker, httpgrpc_server.NewServer(internalQuerierRouter), util_log.Logger, prometheus.DefaultRegisterer) -} - -func (t *Cortex) initStoreQueryables() (services.Service, error) { - var servs []services.Service - - //nolint:golint // I prefer this form over removing 'else', because it allows q to have smaller scope. - if q, err := initQueryableForEngine(t.Cfg.Storage.Engine, t.Cfg, t.Store, t.Overrides, prometheus.DefaultRegisterer); err != nil { - return nil, fmt.Errorf("failed to initialize querier for engine '%s': %v", t.Cfg.Storage.Engine, err) - } else { - t.StoreQueryables = append(t.StoreQueryables, querier.UseAlwaysQueryable(q)) - if s, ok := q.(services.Service); ok { - servs = append(servs, s) - } - } - - if t.Cfg.Querier.SecondStoreEngine != "" { - if t.Cfg.Querier.SecondStoreEngine == t.Cfg.Storage.Engine { - return nil, fmt.Errorf("second store engine used by querier '%s' must be different than primary engine '%s'", t.Cfg.Querier.SecondStoreEngine, t.Cfg.Storage.Engine) - } - - sq, err := initQueryableForEngine(t.Cfg.Querier.SecondStoreEngine, t.Cfg, t.Store, t.Overrides, prometheus.DefaultRegisterer) - if err != nil { - return nil, fmt.Errorf("failed to initialize querier for engine '%s': %v", t.Cfg.Querier.SecondStoreEngine, err) - } - - t.StoreQueryables = append(t.StoreQueryables, querier.UseBeforeTimestampQueryable(sq, time.Time(t.Cfg.Querier.UseSecondStoreBeforeTime))) - - if s, ok := sq.(services.Service); ok { - servs = append(servs, s) - } - } - - // Return service, if any. - switch len(servs) { - case 0: - return nil, nil - case 1: - return servs[0], nil - default: - // No need to support this case yet, since chunk store is not a service. - // When we get there, we will need a wrapper service, that starts all subservices, and will also monitor them for failures. - // Not difficult, but also not necessary right now. - return nil, fmt.Errorf("too many services") - } -} - -func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, limits *validation.Overrides, reg prometheus.Registerer) (prom_storage.Queryable, error) { - switch engine { - case storage.StorageEngineChunks: - if chunkStore == nil { - return nil, fmt.Errorf("chunk store not initialized") - } - return querier.NewChunkStoreQueryable(cfg.Querier, chunkStore), nil - - case storage.StorageEngineBlocks: - // When running in single binary, if the blocks sharding is disabled and no custom - // store-gateway address has been configured, we can set it to the running process. - if cfg.isModuleEnabled(All) && !cfg.StoreGateway.ShardingEnabled && cfg.Querier.StoreGatewayAddresses == "" { - cfg.Querier.StoreGatewayAddresses = fmt.Sprintf("127.0.0.1:%d", cfg.Server.GRPCListenPort) - } - - return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.BlocksStorage, limits, util_log.Logger, reg) - - default: - return nil, fmt.Errorf("unknown storage engine '%s'", engine) - } -} - -func (t *Cortex) tsdbIngesterConfig() { - t.Cfg.Ingester.BlocksStorageEnabled = t.Cfg.Storage.Engine == storage.StorageEngineBlocks - t.Cfg.Ingester.BlocksStorageConfig = t.Cfg.BlocksStorage -} - -func (t *Cortex) initIngesterService() (serv services.Service, err error) { - t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.RuntimeConfig) - t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort - t.Cfg.Ingester.DistributorShardingStrategy = t.Cfg.Distributor.ShardingStrategy - t.Cfg.Ingester.DistributorShardByAllLabels = t.Cfg.Distributor.ShardByAllLabels - t.Cfg.Ingester.StreamTypeFn = ingesterChunkStreaming(t.RuntimeConfig) - t.Cfg.Ingester.InstanceLimitsFn = ingesterInstanceLimits(t.RuntimeConfig) - t.tsdbIngesterConfig() - - t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Overrides, t.Store, prometheus.DefaultRegisterer, util_log.Logger) - if err != nil { - return - } - - return t.Ingester, nil -} - -func (t *Cortex) initIngester() (serv services.Service, err error) { - t.API.RegisterIngester(t.Ingester, t.Cfg.Distributor) - - return nil, nil -} - -func (t *Cortex) initFlusher() (serv services.Service, err error) { - t.tsdbIngesterConfig() - - t.Flusher, err = flusher.New( - t.Cfg.Flusher, - t.Cfg.Ingester, - t.Store, - t.Overrides, - prometheus.DefaultRegisterer, - util_log.Logger, - ) - if err != nil { - return - } - - return t.Flusher, nil -} - -func (t *Cortex) initChunkStore() (serv services.Service, err error) { - if t.Cfg.Storage.Engine != storage.StorageEngineChunks && t.Cfg.Querier.SecondStoreEngine != storage.StorageEngineChunks { - return nil, nil - } - err = t.Cfg.Schema.Load() - if err != nil { - return - } - - t.Store, err = storage.NewStore(t.Cfg.Storage, t.Cfg.ChunkStore, t.Cfg.Schema, t.Overrides, prometheus.DefaultRegisterer, t.TombstonesLoader, util_log.Logger) - if err != nil { - return - } - - return services.NewIdleService(nil, func(_ error) error { - t.Store.Stop() - return nil - }), nil -} - -func (t *Cortex) initDeleteRequestsStore() (serv services.Service, err error) { - if t.Cfg.Storage.Engine != storage.StorageEngineChunks || !t.Cfg.PurgerConfig.Enable { - // until we need to explicitly enable delete series support we need to do create TombstonesLoader without DeleteStore which acts as noop - t.TombstonesLoader = purger.NewTombstonesLoader(nil, nil) - - return - } - - var indexClient chunk.IndexClient - reg := prometheus.WrapRegistererWith( - prometheus.Labels{"component": DeleteRequestsStore}, prometheus.DefaultRegisterer) - indexClient, err = storage.NewIndexClient(t.Cfg.Storage.DeleteStoreConfig.Store, t.Cfg.Storage, t.Cfg.Schema, reg) - if err != nil { - return - } - - t.DeletesStore, err = purger.NewDeleteStore(t.Cfg.Storage.DeleteStoreConfig, indexClient) - if err != nil { - return - } - - t.TombstonesLoader = purger.NewTombstonesLoader(t.DeletesStore, prometheus.DefaultRegisterer) - - return -} - -// initQueryFrontendTripperware instantiates the tripperware used by the query frontend -// to optimize Prometheus query requests. -func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err error) { - // Load the schema only if sharded queries is set. - if t.Cfg.QueryRange.ShardedQueries { - err := t.Cfg.Schema.Load() - if err != nil { - return nil, err - } - } - - tripperware, cache, err := queryrange.NewTripperware( - t.Cfg.QueryRange, - util_log.Logger, - t.Overrides, - queryrange.PrometheusCodec, - queryrange.PrometheusResponseExtractor{}, - t.Cfg.Schema, - promql.EngineOpts{ - Logger: util_log.Logger, - Reg: prometheus.DefaultRegisterer, - MaxSamples: t.Cfg.Querier.MaxSamples, - Timeout: t.Cfg.Querier.Timeout, - EnableAtModifier: t.Cfg.Querier.AtModifierEnabled, - EnablePerStepStats: t.Cfg.Querier.EnablePerStepStats, - NoStepSubqueryIntervalFn: func(int64) int64 { - return t.Cfg.Querier.DefaultEvaluationInterval.Milliseconds() - }, - }, - t.Cfg.Querier.QueryIngestersWithin, - prometheus.DefaultRegisterer, - t.TombstonesLoader, - ) - - if err != nil { - return nil, err - } - - t.QueryFrontendTripperware = tripperware - - return services.NewIdleService(nil, func(_ error) error { - if cache != nil { - cache.Stop() - cache = nil - } - return nil - }), nil -} - -func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { - roundTripper, frontendV1, frontendV2, err := frontend.InitFrontend(t.Cfg.Frontend, t.Overrides, t.Cfg.Server.GRPCListenPort, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { - return nil, err - } - - // Wrap roundtripper into Tripperware. - roundTripper = t.QueryFrontendTripperware(roundTripper) - - handler := transport.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util_log.Logger, prometheus.DefaultRegisterer) - t.API.RegisterQueryFrontendHandler(handler) - - if frontendV1 != nil { - t.API.RegisterQueryFrontend1(frontendV1) - t.Frontend = frontendV1 - - return frontendV1, nil - } else if frontendV2 != nil { - t.API.RegisterQueryFrontend2(frontendV2) - - return frontendV2, nil - } - - return nil, nil -} - -func (t *Cortex) initTableManager() (services.Service, error) { - if t.Cfg.Storage.Engine == storage.StorageEngineBlocks { - return nil, nil // table manager isn't used in v2 - } - - err := t.Cfg.Schema.Load() - if err != nil { - return nil, err - } - - // Assume the newest config is the one to use - lastConfig := &t.Cfg.Schema.Configs[len(t.Cfg.Schema.Configs)-1] - - if (t.Cfg.TableManager.ChunkTables.WriteScale.Enabled || - t.Cfg.TableManager.IndexTables.WriteScale.Enabled || - t.Cfg.TableManager.ChunkTables.InactiveWriteScale.Enabled || - t.Cfg.TableManager.IndexTables.InactiveWriteScale.Enabled || - t.Cfg.TableManager.ChunkTables.ReadScale.Enabled || - t.Cfg.TableManager.IndexTables.ReadScale.Enabled || - t.Cfg.TableManager.ChunkTables.InactiveReadScale.Enabled || - t.Cfg.TableManager.IndexTables.InactiveReadScale.Enabled) && - t.Cfg.Storage.AWSStorageConfig.Metrics.URL == "" { - level.Error(util_log.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided") - os.Exit(1) - } - - reg := prometheus.WrapRegistererWith( - prometheus.Labels{"component": "table-manager-store"}, prometheus.DefaultRegisterer) - - tableClient, err := storage.NewTableClient(lastConfig.IndexType, t.Cfg.Storage, reg) - if err != nil { - return nil, err - } - - bucketClient, err := storage.NewBucketClient(t.Cfg.Storage) - util_log.CheckFatal("initializing bucket client", err) - - var extraTables []chunk.ExtraTables - if t.Cfg.PurgerConfig.Enable { - reg := prometheus.WrapRegistererWith( - prometheus.Labels{"component": "table-manager-" + DeleteRequestsStore}, prometheus.DefaultRegisterer) - - deleteStoreTableClient, err := storage.NewTableClient(t.Cfg.Storage.DeleteStoreConfig.Store, t.Cfg.Storage, reg) - if err != nil { - return nil, err - } - - extraTables = append(extraTables, chunk.ExtraTables{TableClient: deleteStoreTableClient, Tables: t.Cfg.Storage.DeleteStoreConfig.GetTables()}) - } - - t.TableManager, err = chunk.NewTableManager(t.Cfg.TableManager, t.Cfg.Schema, t.Cfg.Ingester.MaxChunkAge, tableClient, - bucketClient, extraTables, prometheus.DefaultRegisterer) - return t.TableManager, err -} - -func (t *Cortex) initRulerStorage() (serv services.Service, err error) { - // if the ruler is not configured and we're in single binary then let's just log an error and continue. - // unfortunately there is no way to generate a "default" config and compare default against actual - // to determine if it's unconfigured. the following check, however, correctly tests this. - // Single binary integration tests will break if this ever drifts - if t.Cfg.isModuleEnabled(All) && t.Cfg.Ruler.StoreConfig.IsDefaults() && t.Cfg.RulerStorage.IsDefaults() { - level.Info(util_log.Logger).Log("msg", "Ruler storage is not configured in single binary mode and will not be started.") - return - } - - if !t.Cfg.Ruler.StoreConfig.IsDefaults() { - t.RulerStorage, err = ruler.NewLegacyRuleStore(t.Cfg.Ruler.StoreConfig, rules.FileLoader{}, util_log.Logger) - } else { - t.RulerStorage, err = ruler.NewRuleStore(context.Background(), t.Cfg.RulerStorage, t.Overrides, rules.FileLoader{}, util_log.Logger, prometheus.DefaultRegisterer) - } - return -} - -func (t *Cortex) initRuler() (serv services.Service, err error) { - if t.RulerStorage == nil { - level.Info(util_log.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.") - return nil, nil - } - - t.Cfg.Ruler.Ring.ListenPort = t.Cfg.Server.GRPCListenPort - rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer) - // TODO: Consider wrapping logger to differentiate from querier module logger - queryable, _, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, rulerRegisterer, util_log.Logger) - - managerFactory := ruler.DefaultTenantManagerFactory(t.Cfg.Ruler, t.Distributor, queryable, engine, t.Overrides, prometheus.DefaultRegisterer) - manager, err := ruler.NewDefaultMultiTenantManager(t.Cfg.Ruler, managerFactory, prometheus.DefaultRegisterer, util_log.Logger) - if err != nil { - return nil, err - } - - t.Ruler, err = ruler.NewRuler( - t.Cfg.Ruler, - manager, - prometheus.DefaultRegisterer, - util_log.Logger, - t.RulerStorage, - t.Overrides, - ) - if err != nil { - return - } - - // Expose HTTP/GRPC endpoints for the Ruler service - t.API.RegisterRuler(t.Ruler) - - // If the API is enabled, register the Ruler API - if t.Cfg.Ruler.EnableAPI { - t.API.RegisterRulerAPI(ruler.NewAPI(t.Ruler, t.RulerStorage, util_log.Logger)) - } - - return t.Ruler, nil -} - -func (t *Cortex) initConfig() (serv services.Service, err error) { - t.ConfigDB, err = db.New(t.Cfg.Configs.DB) - if err != nil { - return - } - - t.ConfigAPI = configAPI.New(t.ConfigDB, t.Cfg.Configs.API) - t.ConfigAPI.RegisterRoutes(t.Server.HTTP) - return services.NewIdleService(nil, func(_ error) error { - t.ConfigDB.Close() - return nil - }), nil -} - -func (t *Cortex) initAlertManager() (serv services.Service, err error) { - t.Cfg.Alertmanager.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort - - // Initialise the store. - var store alertstore.AlertStore - if !t.Cfg.Alertmanager.Store.IsDefaults() { - store, err = alertstore.NewLegacyAlertStore(t.Cfg.Alertmanager.Store, util_log.Logger) - } else { - store, err = alertstore.NewAlertStore(context.Background(), t.Cfg.AlertmanagerStorage, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer) - } - if err != nil { - return - } - - t.Alertmanager, err = alertmanager.NewMultitenantAlertmanager(&t.Cfg.Alertmanager, store, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { - return - } - - t.API.RegisterAlertmanager(t.Alertmanager, t.Cfg.isModuleEnabled(AlertManager), t.Cfg.Alertmanager.EnableAPI) - return t.Alertmanager, nil -} - -func (t *Cortex) initCompactor() (serv services.Service, err error) { - t.Cfg.Compactor.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort - - t.Compactor, err = compactor.NewCompactor(t.Cfg.Compactor, t.Cfg.BlocksStorage, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer, t.Overrides) - if err != nil { - return - } - - // Expose HTTP endpoints. - t.API.RegisterCompactor(t.Compactor) - return t.Compactor, nil -} - -func (t *Cortex) initStoreGateway() (serv services.Service, err error) { - if t.Cfg.Storage.Engine != storage.StorageEngineBlocks { - if !t.Cfg.isModuleEnabled(All) { - return nil, fmt.Errorf("storage engine must be set to blocks to enable the store-gateway") - } - return nil, nil - } - - t.Cfg.StoreGateway.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort - - t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.BlocksStorage, t.Overrides, t.Cfg.Server.LogLevel, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { - return nil, err - } - - // Expose HTTP endpoints. - t.API.RegisterStoreGateway(t.StoreGateway) - - return t.StoreGateway, nil -} - -func (t *Cortex) initMemberlistKV() (services.Service, error) { - reg := prometheus.DefaultRegisterer - t.Cfg.MemberlistKV.MetricsRegisterer = reg - t.Cfg.MemberlistKV.Codecs = []codec.Codec{ - ring.GetCodec(), - } - dnsProviderReg := prometheus.WrapRegistererWithPrefix( - "cortex_", - prometheus.WrapRegistererWith( - prometheus.Labels{"name": "memberlist"}, - reg, - ), - ) - dnsProvider := dns.NewProvider(util_log.Logger, dnsProviderReg, dns.GolangResolverType) - t.MemberlistKV = memberlist.NewKVInitService(&t.Cfg.MemberlistKV, util_log.Logger, dnsProvider, reg) - t.API.RegisterMemberlistKV(t.MemberlistKV) - - // Update the config. - t.Cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.StoreGateway.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Cfg.Alertmanager.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - - return t.MemberlistKV, nil -} - -func (t *Cortex) initChunksPurger() (services.Service, error) { - if t.Cfg.Storage.Engine != storage.StorageEngineChunks || !t.Cfg.PurgerConfig.Enable { - return nil, nil - } - - storageClient, err := storage.NewObjectClient(t.Cfg.PurgerConfig.ObjectStoreType, t.Cfg.Storage) - if err != nil { - return nil, err - } - - t.Purger, err = purger.NewPurger(t.Cfg.PurgerConfig, t.DeletesStore, t.Store, storageClient, prometheus.DefaultRegisterer) - if err != nil { - return nil, err - } - - t.API.RegisterChunksPurger(t.DeletesStore, t.Cfg.PurgerConfig.DeleteRequestCancelPeriod) - - return t.Purger, nil -} - -func (t *Cortex) initTenantDeletionAPI() (services.Service, error) { - if t.Cfg.Storage.Engine != storage.StorageEngineBlocks { - return nil, nil - } - - // t.RulerStorage can be nil when running in single-binary mode, and rule storage is not configured. - tenantDeletionAPI, err := purger.NewTenantDeletionAPI(t.Cfg.BlocksStorage, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { - return nil, err - } - - t.API.RegisterTenantDeletion(tenantDeletionAPI) - return nil, nil -} - -func (t *Cortex) initQueryScheduler() (services.Service, error) { - s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer) - if err != nil { - return nil, errors.Wrap(err, "query-scheduler init") - } - - t.API.RegisterQueryScheduler(s) - return s, nil -} - -func (t *Cortex) setupModuleManager() error { - mm := modules.NewManager(util_log.Logger) - - // Register all modules here. - // RegisterModule(name string, initFn func()(services.Service, error)) - mm.RegisterModule(Server, t.initServer, modules.UserInvisibleModule) - mm.RegisterModule(API, t.initAPI, modules.UserInvisibleModule) - mm.RegisterModule(RuntimeConfig, t.initRuntimeConfig, modules.UserInvisibleModule) - mm.RegisterModule(MemberlistKV, t.initMemberlistKV, modules.UserInvisibleModule) - mm.RegisterModule(Ring, t.initRing, modules.UserInvisibleModule) - mm.RegisterModule(Overrides, t.initOverrides, modules.UserInvisibleModule) - mm.RegisterModule(OverridesExporter, t.initOverridesExporter) - mm.RegisterModule(Distributor, t.initDistributor) - mm.RegisterModule(DistributorService, t.initDistributorService, modules.UserInvisibleModule) - mm.RegisterModule(Store, t.initChunkStore, modules.UserInvisibleModule) - mm.RegisterModule(DeleteRequestsStore, t.initDeleteRequestsStore, modules.UserInvisibleModule) - mm.RegisterModule(Ingester, t.initIngester) - mm.RegisterModule(IngesterService, t.initIngesterService, modules.UserInvisibleModule) - mm.RegisterModule(Flusher, t.initFlusher) - mm.RegisterModule(Queryable, t.initQueryable, modules.UserInvisibleModule) - mm.RegisterModule(Querier, t.initQuerier) - mm.RegisterModule(StoreQueryable, t.initStoreQueryables, modules.UserInvisibleModule) - mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendTripperware, modules.UserInvisibleModule) - mm.RegisterModule(QueryFrontend, t.initQueryFrontend) - mm.RegisterModule(TableManager, t.initTableManager) - mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule) - mm.RegisterModule(Ruler, t.initRuler) - mm.RegisterModule(Configs, t.initConfig) - mm.RegisterModule(AlertManager, t.initAlertManager) - mm.RegisterModule(Compactor, t.initCompactor) - mm.RegisterModule(StoreGateway, t.initStoreGateway) - mm.RegisterModule(ChunksPurger, t.initChunksPurger, modules.UserInvisibleModule) - mm.RegisterModule(TenantDeletion, t.initTenantDeletionAPI, modules.UserInvisibleModule) - mm.RegisterModule(Purger, nil) - mm.RegisterModule(QueryScheduler, t.initQueryScheduler) - mm.RegisterModule(TenantFederation, t.initTenantFederation, modules.UserInvisibleModule) - mm.RegisterModule(All, nil) - - // Add dependencies - deps := map[string][]string{ - API: {Server}, - MemberlistKV: {API}, - RuntimeConfig: {API}, - Ring: {API, RuntimeConfig, MemberlistKV}, - Overrides: {RuntimeConfig}, - OverridesExporter: {RuntimeConfig}, - Distributor: {DistributorService, API}, - DistributorService: {Ring, Overrides}, - Store: {Overrides, DeleteRequestsStore}, - Ingester: {IngesterService, API}, - IngesterService: {Overrides, Store, RuntimeConfig, MemberlistKV}, - Flusher: {Store, API}, - Queryable: {Overrides, DistributorService, Store, Ring, API, StoreQueryable, MemberlistKV}, - Querier: {TenantFederation}, - StoreQueryable: {Overrides, Store, MemberlistKV}, - QueryFrontendTripperware: {API, Overrides, DeleteRequestsStore}, - QueryFrontend: {QueryFrontendTripperware}, - QueryScheduler: {API, Overrides}, - TableManager: {API}, - Ruler: {DistributorService, Store, StoreQueryable, RulerStorage}, - RulerStorage: {Overrides}, - Configs: {API}, - AlertManager: {API, MemberlistKV, Overrides}, - Compactor: {API, MemberlistKV, Overrides}, - StoreGateway: {API, Overrides, MemberlistKV}, - ChunksPurger: {Store, DeleteRequestsStore, API}, - TenantDeletion: {Store, API, Overrides}, - Purger: {ChunksPurger, TenantDeletion}, - TenantFederation: {Queryable}, - All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, - } - for mod, targets := range deps { - if err := mm.AddDependency(mod, targets...); err != nil { - return err - } - } - - t.ModuleManager = mm - - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go deleted file mode 100644 index 150c25727..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go +++ /dev/null @@ -1,187 +0,0 @@ -package cortex - -import ( - "errors" - "io" - "net/http" - - "gopkg.in/yaml.v2" - - "github.com/cortexproject/cortex/pkg/ingester" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/runtimeconfig" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - errMultipleDocuments = errors.New("the provided runtime configuration contains multiple documents") -) - -// runtimeConfigValues are values that can be reloaded from configuration file while Cortex is running. -// Reloading is done by runtime_config.Manager, which also keeps the currently loaded config. -// These values are then pushed to the components that are interested in them. -type runtimeConfigValues struct { - TenantLimits map[string]*validation.Limits `yaml:"overrides"` - - Multi kv.MultiRuntimeConfig `yaml:"multi_kv_config"` - - IngesterChunkStreaming *bool `yaml:"ingester_stream_chunks_when_using_blocks"` - - IngesterLimits *ingester.InstanceLimits `yaml:"ingester_limits"` -} - -// runtimeConfigTenantLimits provides per-tenant limit overrides based on a runtimeconfig.Manager -// that reads limits from a configuration file on disk and periodically reloads them. -type runtimeConfigTenantLimits struct { - manager *runtimeconfig.Manager -} - -// newTenantLimits creates a new validation.TenantLimits that loads per-tenant limit overrides from -// a runtimeconfig.Manager -func newTenantLimits(manager *runtimeconfig.Manager) validation.TenantLimits { - return &runtimeConfigTenantLimits{ - manager: manager, - } -} - -func (l *runtimeConfigTenantLimits) ByUserID(userID string) *validation.Limits { - return l.AllByUserID()[userID] -} - -func (l *runtimeConfigTenantLimits) AllByUserID() map[string]*validation.Limits { - cfg, ok := l.manager.GetConfig().(*runtimeConfigValues) - if cfg != nil && ok { - return cfg.TenantLimits - } - - return nil -} - -func loadRuntimeConfig(r io.Reader) (interface{}, error) { - var overrides = &runtimeConfigValues{} - - decoder := yaml.NewDecoder(r) - decoder.SetStrict(true) - - // Decode the first document. An empty document (EOF) is OK. - if err := decoder.Decode(&overrides); err != nil && !errors.Is(err, io.EOF) { - return nil, err - } - - // Ensure the provided YAML config is not composed of multiple documents, - if err := decoder.Decode(&runtimeConfigValues{}); !errors.Is(err, io.EOF) { - return nil, errMultipleDocuments - } - - return overrides, nil -} - -func multiClientRuntimeConfigChannel(manager *runtimeconfig.Manager) func() <-chan kv.MultiRuntimeConfig { - if manager == nil { - return nil - } - // returns function that can be used in MultiConfig.ConfigProvider - return func() <-chan kv.MultiRuntimeConfig { - outCh := make(chan kv.MultiRuntimeConfig, 1) - - // push initial config to the channel - val := manager.GetConfig() - if cfg, ok := val.(*runtimeConfigValues); ok && cfg != nil { - outCh <- cfg.Multi - } - - ch := manager.CreateListenerChannel(1) - go func() { - for val := range ch { - if cfg, ok := val.(*runtimeConfigValues); ok && cfg != nil { - outCh <- cfg.Multi - } - } - }() - - return outCh - } -} - -func ingesterChunkStreaming(manager *runtimeconfig.Manager) func() ingester.QueryStreamType { - if manager == nil { - return nil - } - - return func() ingester.QueryStreamType { - val := manager.GetConfig() - if cfg, ok := val.(*runtimeConfigValues); ok && cfg != nil { - if cfg.IngesterChunkStreaming == nil { - return ingester.QueryStreamDefault - } - - if *cfg.IngesterChunkStreaming { - return ingester.QueryStreamChunks - } - return ingester.QueryStreamSamples - } - - return ingester.QueryStreamDefault - } -} - -func ingesterInstanceLimits(manager *runtimeconfig.Manager) func() *ingester.InstanceLimits { - if manager == nil { - return nil - } - - return func() *ingester.InstanceLimits { - val := manager.GetConfig() - if cfg, ok := val.(*runtimeConfigValues); ok && cfg != nil { - return cfg.IngesterLimits - } - return nil - } -} - -func runtimeConfigHandler(runtimeCfgManager *runtimeconfig.Manager, defaultLimits validation.Limits) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - cfg, ok := runtimeCfgManager.GetConfig().(*runtimeConfigValues) - if !ok || cfg == nil { - util.WriteTextResponse(w, "runtime config file doesn't exist") - return - } - - var output interface{} - switch r.URL.Query().Get("mode") { - case "diff": - // Default runtime config is just empty struct, but to make diff work, - // we set defaultLimits for every tenant that exists in runtime config. - defaultCfg := runtimeConfigValues{} - defaultCfg.TenantLimits = map[string]*validation.Limits{} - for k, v := range cfg.TenantLimits { - if v != nil { - defaultCfg.TenantLimits[k] = &defaultLimits - } - } - - cfgYaml, err := util.YAMLMarshalUnmarshal(cfg) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - defaultCfgYaml, err := util.YAMLMarshalUnmarshal(defaultCfg) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - output, err = util.DiffConfig(defaultCfgYaml, cfgYaml) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - default: - output = cfg - } - util.WriteYAMLResponse(w, output) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go deleted file mode 100644 index dacced9ee..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go +++ /dev/null @@ -1,70 +0,0 @@ -package cortex - -import ( - "context" - "fmt" - - "github.com/go-kit/log/level" - "github.com/weaveworks/common/server" - - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// NewServerService constructs service from Server component. -// servicesToWaitFor is called when server is stopping, and should return all -// services that need to terminate before server actually stops. -// N.B.: this function is NOT Cortex specific, please let's keep it that way. -// Passed server should not react on signals. Early return from Run function is considered to be an error. -func NewServerService(serv *server.Server, servicesToWaitFor func() []services.Service) services.Service { - serverDone := make(chan error, 1) - - runFn := func(ctx context.Context) error { - go func() { - defer close(serverDone) - serverDone <- serv.Run() - }() - - select { - case <-ctx.Done(): - return nil - case err := <-serverDone: - if err != nil { - return err - } - return fmt.Errorf("server stopped unexpectedly") - } - } - - stoppingFn := func(_ error) error { - // wait until all modules are done, and then shutdown server. - for _, s := range servicesToWaitFor() { - _ = s.AwaitTerminated(context.Background()) - } - - // shutdown HTTP and gRPC servers (this also unblocks Run) - serv.Shutdown() - - // if not closed yet, wait until server stops. - <-serverDone - level.Info(util_log.Logger).Log("msg", "server stopped") - return nil - } - - return services.NewBasicService(nil, runFn, stoppingFn) -} - -// DisableSignalHandling puts a dummy signal handler -func DisableSignalHandling(config *server.Config) { - config.SignalHandler = make(ignoreSignalHandler) -} - -type ignoreSignalHandler chan struct{} - -func (dh ignoreSignalHandler) Loop() { - <-dh -} - -func (dh ignoreSignalHandler) Stop() { - close(dh) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/status.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/status.go deleted file mode 100644 index 9b5fdba32..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/status.go +++ /dev/null @@ -1,72 +0,0 @@ -package cortex - -import ( - "html/template" - "net/http" - "sort" - "time" - - "github.com/cortexproject/cortex/pkg/util" -) - -const tpl = ` - - - - - Cortex Services Status - - -

Cortex Services Status

-

Current time: {{ .Now }}

- - - - - - - - - {{ range .Services }} - - - - - {{ end }} - -
ServiceStatus
{{ .Name }}{{ .Status }}
- -` - -var tmpl *template.Template - -type renderService struct { - Name string `json:"name"` - Status string `json:"status"` -} - -func init() { - tmpl = template.Must(template.New("webpage").Parse(tpl)) -} - -func (t *Cortex) servicesHandler(w http.ResponseWriter, r *http.Request) { - svcs := make([]renderService, 0) - for mod, s := range t.ServiceMap { - svcs = append(svcs, renderService{ - Name: mod, - Status: s.State().String(), - }) - } - sort.Slice(svcs, func(i, j int) bool { - return svcs[i].Name < svcs[j].Name - }) - - // TODO: this could be extended to also print sub-services, if given service has any - util.RenderHTTPResponse(w, struct { - Now time.Time `json:"now"` - Services []renderService `json:"services"` - }{ - Now: time.Now(), - Services: svcs, - }, tmpl, r) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go deleted file mode 100644 index daa05e154..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go +++ /dev/null @@ -1,33 +0,0 @@ -package cortex - -import ( - "context" - - "github.com/opentracing/opentracing-go" - "github.com/thanos-io/thanos/pkg/tracing" - "google.golang.org/grpc" -) - -// ThanosTracerUnaryInterceptor injects the opentracing global tracer into the context -// in order to get it picked up by Thanos components. -func ThanosTracerUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - return handler(tracing.ContextWithTracer(ctx, opentracing.GlobalTracer()), req) -} - -// ThanosTracerStreamInterceptor injects the opentracing global tracer into the context -// in order to get it picked up by Thanos components. -func ThanosTracerStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - return handler(srv, wrappedServerStream{ - ctx: tracing.ContextWithTracer(ss.Context(), opentracing.GlobalTracer()), - ServerStream: ss, - }) -} - -type wrappedServerStream struct { - ctx context.Context - grpc.ServerStream -} - -func (ss wrappedServerStream) Context() context.Context { - return ss.ctx -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go deleted file mode 100644 index 2093ed4c7..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ /dev/null @@ -1,1259 +0,0 @@ -package distributor - -import ( - "context" - "flag" - "fmt" - io "io" - "net/http" - "sort" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/relabel" - "github.com/prometheus/prometheus/scrape" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/instrument" - "github.com/weaveworks/common/user" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/cortexpb" - ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/prom1/storage/metric" - "github.com/cortexproject/cortex/pkg/ring" - ring_client "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/extract" - "github.com/cortexproject/cortex/pkg/util/limiter" - util_log "github.com/cortexproject/cortex/pkg/util/log" - util_math "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - emptyPreallocSeries = cortexpb.PreallocTimeseries{} - - supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} - - // Validation errors. - errInvalidShardingStrategy = errors.New("invalid sharding strategy") - errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") - - // Distributor instance limits errors. - errTooManyInflightPushRequests = errors.New("too many inflight push requests in distributor") - errMaxSamplesPushRateLimitReached = errors.New("distributor's samples push rate limit reached") -) - -const ( - // ringKey is the key under which we store the distributors ring in the KVStore. - ringKey = "distributor" - - typeSamples = "samples" - typeMetadata = "metadata" - - instanceIngestionRateTickInterval = time.Second -) - -// Distributor is a storage.SampleAppender and a client.Querier which -// forwards appends and queries to individual ingesters. -type Distributor struct { - services.Service - - cfg Config - log log.Logger - ingestersRing ring.ReadRing - ingesterPool *ring_client.Pool - limits *validation.Overrides - - // The global rate limiter requires a distributors ring to count - // the number of healthy instances - distributorsLifeCycler *ring.Lifecycler - distributorsRing *ring.Ring - - // For handling HA replicas. - HATracker *haTracker - - // Per-user rate limiter. - ingestionRateLimiter *limiter.RateLimiter - - // Manager for subservices (HA Tracker, distributor ring and client pool) - subservices *services.Manager - subservicesWatcher *services.FailureWatcher - - activeUsers *util.ActiveUsersCleanupService - - ingestionRate *util_math.EwmaRate - inflightPushRequests atomic.Int64 - - // Metrics - queryDuration *instrument.HistogramCollector - receivedSamples *prometheus.CounterVec - receivedExemplars *prometheus.CounterVec - receivedMetadata *prometheus.CounterVec - incomingSamples *prometheus.CounterVec - incomingExemplars *prometheus.CounterVec - incomingMetadata *prometheus.CounterVec - nonHASamples *prometheus.CounterVec - dedupedSamples *prometheus.CounterVec - labelsHistogram prometheus.Histogram - ingesterAppends *prometheus.CounterVec - ingesterAppendFailures *prometheus.CounterVec - ingesterQueries *prometheus.CounterVec - ingesterQueryFailures *prometheus.CounterVec - replicationFactor prometheus.Gauge - latestSeenSampleTimestampPerUser *prometheus.GaugeVec -} - -// Config contains the configuration required to -// create a Distributor -type Config struct { - PoolConfig PoolConfig `yaml:"pool"` - - HATrackerConfig HATrackerConfig `yaml:"ha_tracker"` - - MaxRecvMsgSize int `yaml:"max_recv_msg_size"` - RemoteTimeout time.Duration `yaml:"remote_timeout"` - ExtraQueryDelay time.Duration `yaml:"extra_queue_delay"` - - ShardingStrategy string `yaml:"sharding_strategy"` - ShardByAllLabels bool `yaml:"shard_by_all_labels"` - ExtendWrites bool `yaml:"extend_writes"` - - // Distributors ring - DistributorRing RingConfig `yaml:"ring"` - - // for testing and for extending the ingester by adding calls to the client - IngesterClientFactory ring_client.PoolFactory `yaml:"-"` - - // when true the distributor does not validate the label name, Cortex doesn't directly use - // this (and should never use it) but this feature is used by other projects built on top of it - SkipLabelNameValidation bool `yaml:"-"` - - // This config is dynamically injected because defined in the querier config. - ShuffleShardingLookbackPeriod time.Duration `yaml:"-"` - - // Limits for distributor - InstanceLimits InstanceLimits `yaml:"instance_limits"` -} - -type InstanceLimits struct { - MaxIngestionRate float64 `yaml:"max_ingestion_rate"` - MaxInflightPushRequests int `yaml:"max_inflight_push_requests"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.PoolConfig.RegisterFlags(f) - cfg.HATrackerConfig.RegisterFlags(f) - cfg.DistributorRing.RegisterFlags(f) - - f.IntVar(&cfg.MaxRecvMsgSize, "distributor.max-recv-msg-size", 100<<20, "remote_write API max receive message size (bytes).") - f.DurationVar(&cfg.RemoteTimeout, "distributor.remote-timeout", 2*time.Second, "Timeout for downstream ingesters.") - f.DurationVar(&cfg.ExtraQueryDelay, "distributor.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") - f.BoolVar(&cfg.ShardByAllLabels, "distributor.shard-by-all-labels", false, "Distribute samples based on all labels, as opposed to solely by user and metric name.") - f.StringVar(&cfg.ShardingStrategy, "distributor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) - f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") - - f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") - f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") -} - -// Validate config and returns error on failure -func (cfg *Config) Validate(limits validation.Limits) error { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { - return errInvalidShardingStrategy - } - - if cfg.ShardingStrategy == util.ShardingStrategyShuffle && limits.IngestionTenantShardSize <= 0 { - return errInvalidTenantShardSize - } - - return cfg.HATrackerConfig.Validate() -} - -const ( - instanceLimitsMetric = "cortex_distributor_instance_limits" - instanceLimitsMetricHelp = "Instance limits used by this distributor." // Must be same for all registrations. - limitLabel = "limit" -) - -// New constructs a new Distributor -func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Overrides, ingestersRing ring.ReadRing, canJoinDistributorsRing bool, reg prometheus.Registerer, log log.Logger) (*Distributor, error) { - if cfg.IngesterClientFactory == nil { - cfg.IngesterClientFactory = func(addr string) (ring_client.PoolClient, error) { - return ingester_client.MakeIngesterClient(addr, clientConfig) - } - } - - cfg.PoolConfig.RemoteTimeout = cfg.RemoteTimeout - - haTracker, err := newHATracker(cfg.HATrackerConfig, limits, reg, log) - if err != nil { - return nil, err - } - - subservices := []services.Service(nil) - subservices = append(subservices, haTracker) - - // Create the configured ingestion rate limit strategy (local or global). In case - // it's an internal dependency and can't join the distributors ring, we skip rate - // limiting. - var ingestionRateStrategy limiter.RateLimiterStrategy - var distributorsLifeCycler *ring.Lifecycler - var distributorsRing *ring.Ring - - if !canJoinDistributorsRing { - ingestionRateStrategy = newInfiniteIngestionRateStrategy() - } else if limits.IngestionRateStrategy() == validation.GlobalIngestionRateStrategy { - distributorsLifeCycler, err = ring.NewLifecycler(cfg.DistributorRing.ToLifecyclerConfig(), nil, "distributor", ringKey, true, log, prometheus.WrapRegistererWithPrefix("cortex_", reg)) - if err != nil { - return nil, err - } - - distributorsRing, err = ring.New(cfg.DistributorRing.ToRingConfig(), "distributor", ringKey, log, prometheus.WrapRegistererWithPrefix("cortex_", reg)) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize distributors' ring client") - } - subservices = append(subservices, distributorsLifeCycler, distributorsRing) - - ingestionRateStrategy = newGlobalIngestionRateStrategy(limits, distributorsLifeCycler) - } else { - ingestionRateStrategy = newLocalIngestionRateStrategy(limits) - } - - d := &Distributor{ - cfg: cfg, - log: log, - ingestersRing: ingestersRing, - ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, log), - distributorsLifeCycler: distributorsLifeCycler, - distributorsRing: distributorsRing, - limits: limits, - ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), - HATracker: haTracker, - ingestionRate: util_math.NewEWMARate(0.2, instanceIngestionRateTickInterval), - - queryDuration: instrument.NewHistogramCollector(promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "distributor_query_duration_seconds", - Help: "Time spent executing expression and exemplar queries.", - Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30}, - }, []string{"method", "status_code"})), - receivedSamples: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_received_samples_total", - Help: "The total number of received samples, excluding rejected and deduped samples.", - }, []string{"user"}), - receivedExemplars: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_received_exemplars_total", - Help: "The total number of received exemplars, excluding rejected and deduped exemplars.", - }, []string{"user"}), - receivedMetadata: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_received_metadata_total", - Help: "The total number of received metadata, excluding rejected.", - }, []string{"user"}), - incomingSamples: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_samples_in_total", - Help: "The total number of samples that have come in to the distributor, including rejected or deduped samples.", - }, []string{"user"}), - incomingExemplars: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_exemplars_in_total", - Help: "The total number of exemplars that have come in to the distributor, including rejected or deduped exemplars.", - }, []string{"user"}), - incomingMetadata: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_metadata_in_total", - Help: "The total number of metadata the have come in to the distributor, including rejected.", - }, []string{"user"}), - nonHASamples: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_non_ha_samples_received_total", - Help: "The total number of received samples for a user that has HA tracking turned on, but the sample didn't contain both HA labels.", - }, []string{"user"}), - dedupedSamples: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_deduped_samples_total", - Help: "The total number of deduplicated samples.", - }, []string{"user", "cluster"}), - labelsHistogram: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "labels_per_sample", - Help: "Number of labels per sample.", - Buckets: []float64{5, 10, 15, 20, 25}, - }), - ingesterAppends: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_ingester_appends_total", - Help: "The total number of batch appends sent to ingesters.", - }, []string{"ingester", "type"}), - ingesterAppendFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_ingester_append_failures_total", - Help: "The total number of failed batch appends sent to ingesters.", - }, []string{"ingester", "type", "status"}), - ingesterQueries: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_ingester_queries_total", - Help: "The total number of queries sent to ingesters.", - }, []string{"ingester"}), - ingesterQueryFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "distributor_ingester_query_failures_total", - Help: "The total number of failed queries sent to ingesters.", - }, []string{"ingester"}), - replicationFactor: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "distributor_replication_factor", - Help: "The configured replication factor.", - }), - latestSeenSampleTimestampPerUser: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_distributor_latest_seen_sample_timestamp_seconds", - Help: "Unix timestamp of latest received sample per user.", - }, []string{"user"}), - } - - promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: instanceLimitsMetric, - Help: instanceLimitsMetricHelp, - ConstLabels: map[string]string{limitLabel: "max_inflight_push_requests"}, - }).Set(float64(cfg.InstanceLimits.MaxInflightPushRequests)) - promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: instanceLimitsMetric, - Help: instanceLimitsMetricHelp, - ConstLabels: map[string]string{limitLabel: "max_ingestion_rate"}, - }).Set(cfg.InstanceLimits.MaxIngestionRate) - - promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_distributor_inflight_push_requests", - Help: "Current number of inflight push requests in distributor.", - }, func() float64 { - return float64(d.inflightPushRequests.Load()) - }) - promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_distributor_ingestion_rate_samples_per_second", - Help: "Current ingestion rate in samples/sec that distributor is using to limit access.", - }, func() float64 { - return d.ingestionRate.Rate() - }) - - d.replicationFactor.Set(float64(ingestersRing.ReplicationFactor())) - d.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(d.cleanupInactiveUser) - - subservices = append(subservices, d.ingesterPool, d.activeUsers) - d.subservices, err = services.NewManager(subservices...) - if err != nil { - return nil, err - } - d.subservicesWatcher = services.NewFailureWatcher() - d.subservicesWatcher.WatchManager(d.subservices) - - d.Service = services.NewBasicService(d.starting, d.running, d.stopping) - return d, nil -} - -func (d *Distributor) starting(ctx context.Context) error { - if d.cfg.InstanceLimits != (InstanceLimits{}) { - util_log.WarnExperimentalUse("distributor instance limits") - } - - // Only report success if all sub-services start properly - return services.StartManagerAndAwaitHealthy(ctx, d.subservices) -} - -func (d *Distributor) running(ctx context.Context) error { - ingestionRateTicker := time.NewTicker(instanceIngestionRateTickInterval) - defer ingestionRateTicker.Stop() - - for { - select { - case <-ctx.Done(): - return nil - - case <-ingestionRateTicker.C: - d.ingestionRate.Tick() - - case err := <-d.subservicesWatcher.Chan(): - return errors.Wrap(err, "distributor subservice failed") - } - } -} - -func (d *Distributor) cleanupInactiveUser(userID string) { - d.ingestersRing.CleanupShuffleShardCache(userID) - - d.HATracker.cleanupHATrackerMetricsForUser(userID) - - d.receivedSamples.DeleteLabelValues(userID) - d.receivedExemplars.DeleteLabelValues(userID) - d.receivedMetadata.DeleteLabelValues(userID) - d.incomingSamples.DeleteLabelValues(userID) - d.incomingExemplars.DeleteLabelValues(userID) - d.incomingMetadata.DeleteLabelValues(userID) - d.nonHASamples.DeleteLabelValues(userID) - d.latestSeenSampleTimestampPerUser.DeleteLabelValues(userID) - - if err := util.DeleteMatchingLabels(d.dedupedSamples, map[string]string{"user": userID}); err != nil { - level.Warn(d.log).Log("msg", "failed to remove cortex_distributor_deduped_samples_total metric for user", "user", userID, "err", err) - } - - validation.DeletePerUserValidationMetrics(userID, d.log) -} - -// Called after distributor is asked to stop via StopAsync. -func (d *Distributor) stopping(_ error) error { - return services.StopManagerAndAwaitStopped(context.Background(), d.subservices) -} - -func (d *Distributor) tokenForLabels(userID string, labels []cortexpb.LabelAdapter) (uint32, error) { - if d.cfg.ShardByAllLabels { - return shardByAllLabels(userID, labels), nil - } - - unsafeMetricName, err := extract.UnsafeMetricNameFromLabelAdapters(labels) - if err != nil { - return 0, err - } - return shardByMetricName(userID, unsafeMetricName), nil -} - -func (d *Distributor) tokenForMetadata(userID string, metricName string) uint32 { - if d.cfg.ShardByAllLabels { - return shardByMetricName(userID, metricName) - } - - return shardByUser(userID) -} - -// shardByMetricName returns the token for the given metric. The provided metricName -// is guaranteed to not be retained. -func shardByMetricName(userID string, metricName string) uint32 { - h := shardByUser(userID) - h = ingester_client.HashAdd32(h, metricName) - return h -} - -func shardByUser(userID string) uint32 { - h := ingester_client.HashNew32() - h = ingester_client.HashAdd32(h, userID) - return h -} - -// This function generates different values for different order of same labels. -func shardByAllLabels(userID string, labels []cortexpb.LabelAdapter) uint32 { - h := shardByUser(userID) - for _, label := range labels { - h = ingester_client.HashAdd32(h, label.Name) - h = ingester_client.HashAdd32(h, label.Value) - } - return h -} - -// Remove the label labelname from a slice of LabelPairs if it exists. -func removeLabel(labelName string, labels *[]cortexpb.LabelAdapter) { - for i := 0; i < len(*labels); i++ { - pair := (*labels)[i] - if pair.Name == labelName { - *labels = append((*labels)[:i], (*labels)[i+1:]...) - return - } - } -} - -// Returns a boolean that indicates whether or not we want to remove the replica label going forward, -// and an error that indicates whether we want to accept samples based on the cluster/replica found in ts. -// nil for the error means accept the sample. -func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica string) (removeReplicaLabel bool, _ error) { - // If the sample doesn't have either HA label, accept it. - // At the moment we want to accept these samples by default. - if cluster == "" || replica == "" { - return false, nil - } - - // If replica label is too long, don't use it. We accept the sample here, but it will fail validation later anyway. - if len(replica) > d.limits.MaxLabelValueLength(userID) { - return false, nil - } - - // At this point we know we have both HA labels, we should lookup - // the cluster/instance here to see if we want to accept this sample. - err := d.HATracker.checkReplica(ctx, userID, cluster, replica, time.Now()) - // checkReplica should only have returned an error if there was a real error talking to Consul, or if the replica labels don't match. - if err != nil { // Don't accept the sample. - return false, err - } - return true, nil -} - -// Validates a single series from a write request. Will remove labels if -// any are configured to be dropped for the user ID. -// Returns the validated series with it's labels/samples, and any error. -// The returned error may retain the series labels. -func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID string, skipLabelNameValidation bool) (cortexpb.PreallocTimeseries, validation.ValidationError) { - d.labelsHistogram.Observe(float64(len(ts.Labels))) - if err := validation.ValidateLabels(d.limits, userID, ts.Labels, skipLabelNameValidation); err != nil { - return emptyPreallocSeries, err - } - - var samples []cortexpb.Sample - if len(ts.Samples) > 0 { - // Only alloc when data present - samples = make([]cortexpb.Sample, 0, len(ts.Samples)) - for _, s := range ts.Samples { - if err := validation.ValidateSample(d.limits, userID, ts.Labels, s); err != nil { - return emptyPreallocSeries, err - } - samples = append(samples, s) - } - } - - var exemplars []cortexpb.Exemplar - if len(ts.Exemplars) > 0 { - // Only alloc when data present - exemplars = make([]cortexpb.Exemplar, 0, len(ts.Exemplars)) - for _, e := range ts.Exemplars { - if err := validation.ValidateExemplar(userID, ts.Labels, e); err != nil { - // An exemplar validation error prevents ingesting samples - // in the same series object. However because the current Prometheus - // remote write implementation only populates one or the other, - // there never will be any. - return emptyPreallocSeries, err - } - exemplars = append(exemplars, e) - } - } - - return cortexpb.PreallocTimeseries{ - TimeSeries: &cortexpb.TimeSeries{ - Labels: ts.Labels, - Samples: samples, - Exemplars: exemplars, - }, - }, - nil -} - -// Push implements client.IngesterServer -func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - // We will report *this* request in the error too. - inflight := d.inflightPushRequests.Inc() - defer d.inflightPushRequests.Dec() - - if d.cfg.InstanceLimits.MaxInflightPushRequests > 0 && inflight > int64(d.cfg.InstanceLimits.MaxInflightPushRequests) { - return nil, errTooManyInflightPushRequests - } - - if d.cfg.InstanceLimits.MaxIngestionRate > 0 { - if rate := d.ingestionRate.Rate(); rate >= d.cfg.InstanceLimits.MaxIngestionRate { - return nil, errMaxSamplesPushRateLimitReached - } - } - - now := time.Now() - d.activeUsers.UpdateUserTimestamp(userID, now) - - source := util.GetSourceIPsFromOutgoingCtx(ctx) - - var firstPartialErr error - removeReplica := false - - numSamples := 0 - numExemplars := 0 - for _, ts := range req.Timeseries { - numSamples += len(ts.Samples) - numExemplars += len(ts.Exemplars) - } - // Count the total samples in, prior to validation or deduplication, for comparison with other metrics. - d.incomingSamples.WithLabelValues(userID).Add(float64(numSamples)) - d.incomingExemplars.WithLabelValues(userID).Add(float64(numExemplars)) - // Count the total number of metadata in. - d.incomingMetadata.WithLabelValues(userID).Add(float64(len(req.Metadata))) - - // A WriteRequest can only contain series or metadata but not both. This might change in the future. - // For each timeseries or samples, we compute a hash to distribute across ingesters; - // check each sample/metadata and discard if outside limits. - validatedTimeseries := make([]cortexpb.PreallocTimeseries, 0, len(req.Timeseries)) - validatedMetadata := make([]*cortexpb.MetricMetadata, 0, len(req.Metadata)) - metadataKeys := make([]uint32, 0, len(req.Metadata)) - seriesKeys := make([]uint32, 0, len(req.Timeseries)) - validatedSamples := 0 - validatedExemplars := 0 - - if d.limits.AcceptHASamples(userID) && len(req.Timeseries) > 0 { - cluster, replica := findHALabels(d.limits.HAReplicaLabel(userID), d.limits.HAClusterLabel(userID), req.Timeseries[0].Labels) - removeReplica, err = d.checkSample(ctx, userID, cluster, replica) - if err != nil { - // Ensure the request slice is reused if the series get deduped. - cortexpb.ReuseSlice(req.Timeseries) - - if errors.Is(err, replicasNotMatchError{}) { - // These samples have been deduped. - d.dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numSamples)) - return nil, httpgrpc.Errorf(http.StatusAccepted, err.Error()) - } - - if errors.Is(err, tooManyClustersError{}) { - validation.DiscardedSamples.WithLabelValues(validation.TooManyHAClusters, userID).Add(float64(numSamples)) - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - return nil, err - } - // If there wasn't an error but removeReplica is false that means we didn't find both HA labels. - if !removeReplica { - d.nonHASamples.WithLabelValues(userID).Add(float64(numSamples)) - } - } - - latestSampleTimestampMs := int64(0) - defer func() { - // Update this metric even in case of errors. - if latestSampleTimestampMs > 0 { - d.latestSeenSampleTimestampPerUser.WithLabelValues(userID).Set(float64(latestSampleTimestampMs) / 1000) - } - }() - - // For each timeseries, compute a hash to distribute across ingesters; - // check each sample and discard if outside limits. - for _, ts := range req.Timeseries { - // Use timestamp of latest sample in the series. If samples for series are not ordered, metric for user may be wrong. - if len(ts.Samples) > 0 { - latestSampleTimestampMs = util_math.Max64(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs) - } - - if mrc := d.limits.MetricRelabelConfigs(userID); len(mrc) > 0 { - l := relabel.Process(cortexpb.FromLabelAdaptersToLabels(ts.Labels), mrc...) - if len(l) == 0 { - // all labels are gone, samples will be discarded - validation.DiscardedSamples.WithLabelValues( - validation.DroppedByRelabelConfiguration, - userID, - ).Add(float64(len(ts.Samples))) - continue - } - ts.Labels = cortexpb.FromLabelsToLabelAdapters(l) - } - - // If we found both the cluster and replica labels, we only want to include the cluster label when - // storing series in Cortex. If we kept the replica label we would end up with another series for the same - // series we're trying to dedupe when HA tracking moves over to a different replica. - if removeReplica { - removeLabel(d.limits.HAReplicaLabel(userID), &ts.Labels) - } - - for _, labelName := range d.limits.DropLabels(userID) { - removeLabel(labelName, &ts.Labels) - } - - if len(ts.Labels) == 0 { - validation.DiscardedExemplars.WithLabelValues( - validation.DroppedByUserConfigurationOverride, - userID, - ).Add(float64(len(ts.Samples))) - - continue - } - - // We rely on sorted labels in different places: - // 1) When computing token for labels, and sharding by all labels. Here different order of labels returns - // different tokens, which is bad. - // 2) In validation code, when checking for duplicate label names. As duplicate label names are rejected - // later in the validation phase, we ignore them here. - sortLabelsIfNeeded(ts.Labels) - - // Generate the sharding token based on the series labels without the HA replica - // label and dropped labels (if any) - key, err := d.tokenForLabels(userID, ts.Labels) - if err != nil { - return nil, err - } - - skipLabelNameValidation := d.cfg.SkipLabelNameValidation || req.GetSkipLabelNameValidation() - validatedSeries, validationErr := d.validateSeries(ts, userID, skipLabelNameValidation) - - // Errors in validation are considered non-fatal, as one series in a request may contain - // invalid data but all the remaining series could be perfectly valid. - if validationErr != nil && firstPartialErr == nil { - // The series labels may be retained by validationErr but that's not a problem for this - // use case because we format it calling Error() and then we discard it. - firstPartialErr = httpgrpc.Errorf(http.StatusBadRequest, validationErr.Error()) - } - - // validateSeries would have returned an emptyPreallocSeries if there were no valid samples. - if validatedSeries == emptyPreallocSeries { - continue - } - - seriesKeys = append(seriesKeys, key) - validatedTimeseries = append(validatedTimeseries, validatedSeries) - validatedSamples += len(ts.Samples) - validatedExemplars += len(ts.Exemplars) - } - - for _, m := range req.Metadata { - err := validation.ValidateMetadata(d.limits, userID, m) - - if err != nil { - if firstPartialErr == nil { - firstPartialErr = err - } - - continue - } - - metadataKeys = append(metadataKeys, d.tokenForMetadata(userID, m.MetricFamilyName)) - validatedMetadata = append(validatedMetadata, m) - } - - d.receivedSamples.WithLabelValues(userID).Add(float64(validatedSamples)) - d.receivedExemplars.WithLabelValues(userID).Add((float64(validatedExemplars))) - d.receivedMetadata.WithLabelValues(userID).Add(float64(len(validatedMetadata))) - - if len(seriesKeys) == 0 && len(metadataKeys) == 0 { - // Ensure the request slice is reused if there's no series or metadata passing the validation. - cortexpb.ReuseSlice(req.Timeseries) - - return &cortexpb.WriteResponse{}, firstPartialErr - } - - totalN := validatedSamples + validatedExemplars + len(validatedMetadata) - if !d.ingestionRateLimiter.AllowN(now, userID, totalN) { - // Ensure the request slice is reused if the request is rate limited. - cortexpb.ReuseSlice(req.Timeseries) - - validation.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamples)) - validation.DiscardedExemplars.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedExemplars)) - validation.DiscardedMetadata.WithLabelValues(validation.RateLimited, userID).Add(float64(len(validatedMetadata))) - // Return a 429 here to tell the client it is going too fast. - // Client may discard the data or slow down and re-send. - // Prometheus v2.26 added a remote-write option 'retry_on_http_429'. - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), validatedSamples, len(validatedMetadata)) - } - - // totalN included samples and metadata. Ingester follows this pattern when computing its ingestion rate. - d.ingestionRate.Add(int64(totalN)) - - subRing := d.ingestersRing - - // Obtain a subring if required. - if d.cfg.ShardingStrategy == util.ShardingStrategyShuffle { - subRing = d.ingestersRing.ShuffleShard(userID, d.limits.IngestionTenantShardSize(userID)) - } - - keys := append(seriesKeys, metadataKeys...) - initialMetadataIndex := len(seriesKeys) - - op := ring.WriteNoExtend - if d.cfg.ExtendWrites { - op = ring.Write - } - - err = ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error { - timeseries := make([]cortexpb.PreallocTimeseries, 0, len(indexes)) - var metadata []*cortexpb.MetricMetadata - - for _, i := range indexes { - if i >= initialMetadataIndex { - metadata = append(metadata, validatedMetadata[i-initialMetadataIndex]) - } else { - timeseries = append(timeseries, validatedTimeseries[i]) - } - } - - // Use a background context to make sure all ingesters get samples even if we return early - localCtx, cancel := context.WithTimeout(context.Background(), d.cfg.RemoteTimeout) - defer cancel() - localCtx = user.InjectOrgID(localCtx, userID) - if sp := opentracing.SpanFromContext(ctx); sp != nil { - localCtx = opentracing.ContextWithSpan(localCtx, sp) - } - - // Get clientIP(s) from Context and add it to localCtx - localCtx = util.AddSourceIPsToOutgoingContext(localCtx, source) - - return d.send(localCtx, ingester, timeseries, metadata, req.Source) - }, func() { cortexpb.ReuseSlice(req.Timeseries) }) - if err != nil { - return nil, err - } - return &cortexpb.WriteResponse{}, firstPartialErr -} - -func sortLabelsIfNeeded(labels []cortexpb.LabelAdapter) { - // no need to run sort.Slice, if labels are already sorted, which is most of the time. - // we can avoid extra memory allocations (mostly interface-related) this way. - sorted := true - last := "" - for _, l := range labels { - if strings.Compare(last, l.Name) > 0 { - sorted = false - break - } - last = l.Name - } - - if sorted { - return - } - - sort.Slice(labels, func(i, j int) bool { - return strings.Compare(labels[i].Name, labels[j].Name) < 0 - }) -} - -func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.WriteRequest_SourceEnum) error { - h, err := d.ingesterPool.GetClientFor(ingester.Addr) - if err != nil { - return err - } - c := h.(ingester_client.IngesterClient) - - req := cortexpb.WriteRequest{ - Timeseries: timeseries, - Metadata: metadata, - Source: source, - } - _, err = c.Push(ctx, &req) - - if len(metadata) > 0 { - d.ingesterAppends.WithLabelValues(ingester.Addr, typeMetadata).Inc() - if err != nil { - d.ingesterAppendFailures.WithLabelValues(ingester.Addr, typeMetadata, getErrorStatus(err)).Inc() - } - } - if len(timeseries) > 0 { - d.ingesterAppends.WithLabelValues(ingester.Addr, typeSamples).Inc() - if err != nil { - d.ingesterAppendFailures.WithLabelValues(ingester.Addr, typeSamples, getErrorStatus(err)).Inc() - } - } - - return err -} - -func getErrorStatus(err error) string { - status := "5xx" - httpResp, ok := httpgrpc.HTTPResponseFromError(err) - if ok && httpResp.Code/100 == 4 { - status = "4xx" - } - - return status -} - -// ForReplicationSet runs f, in parallel, for all ingesters in the input replication set. -func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, f func(context.Context, ingester_client.IngesterClient) (interface{}, error)) ([]interface{}, error) { - return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { - client, err := d.ingesterPool.GetClientFor(ing.Addr) - if err != nil { - return nil, err - } - - return f(ctx, client.(ingester_client.IngesterClient)) - }) -} - -func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, to model.Time, labelName model.LabelName, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error), matchers ...*labels.Matcher) ([]string, error) { - replicationSet, err := d.GetIngestersForMetadata(ctx) - if err != nil { - return nil, err - } - - req, err := ingester_client.ToLabelValuesRequest(labelName, from, to, matchers) - if err != nil { - return nil, err - } - - resps, err := f(ctx, replicationSet, req) - if err != nil { - return nil, err - } - - valueSet := map[string]struct{}{} - for _, resp := range resps { - for _, v := range resp.([]string) { - valueSet[v] = struct{}{} - } - } - - values := make([]string, 0, len(valueSet)) - for v := range valueSet { - values = append(values, v) - } - - // We need the values returned to be sorted. - sort.Strings(values) - - return values, nil -} - -// LabelValuesForLabelName returns all of the label values that are associated with a given label name. -func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, labelName model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - resp, err := client.LabelValues(ctx, req) - if err != nil { - return nil, err - } - return resp.LabelValues, nil - }) - }, matchers...) -} - -// LabelValuesForLabelName returns all of the label values that are associated with a given label name. -func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, labelName model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - stream, err := client.LabelValuesStream(ctx, req) - if err != nil { - return nil, err - } - defer stream.CloseSend() //nolint:errcheck - allLabelValues := []string{} - for { - resp, err := stream.Recv() - - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - allLabelValues = append(allLabelValues, resp.LabelValues...) - } - - return allLabelValues, nil - }) - }, matchers...) -} - -func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error)) ([]string, error) { - replicationSet, err := d.GetIngestersForMetadata(ctx) - if err != nil { - return nil, err - } - - req := &ingester_client.LabelNamesRequest{ - StartTimestampMs: int64(from), - EndTimestampMs: int64(to), - } - resps, err := f(ctx, replicationSet, req) - if err != nil { - return nil, err - } - - valueSet := map[string]struct{}{} - for _, resp := range resps { - for _, v := range resp.([]string) { - valueSet[v] = struct{}{} - } - } - - values := make([]string, 0, len(valueSet)) - for v := range valueSet { - values = append(values, v) - } - - sort.Strings(values) - - return values, nil -} - -func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - stream, err := client.LabelNamesStream(ctx, req) - if err != nil { - return nil, err - } - defer stream.CloseSend() //nolint:errcheck - allLabelNames := []string{} - for { - resp, err := stream.Recv() - - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - allLabelNames = append(allLabelNames, resp.LabelNames...) - } - - return allLabelNames, nil - }) - }) -} - -// LabelNames returns all of the label names. -func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - resp, err := client.LabelNames(ctx, req) - if err != nil { - return nil, err - } - return resp.LabelNames, nil - }) - }) -} - -// MetricsForLabelMatchers gets the metrics that match said matchers -func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) { - return d.metricsForLabelMatchersCommon(ctx, from, through, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]model.Metric, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { - _, err := d.ForReplicationSet(ctx, rs, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - resp, err := client.MetricsForLabelMatchers(ctx, req) - if err != nil { - return nil, err - } - ms := ingester_client.FromMetricsForLabelMatchersResponse(resp) - for _, m := range ms { - if err := queryLimiter.AddSeries(cortexpb.FromMetricsToLabelAdapters(m)); err != nil { - return nil, err - } - fingerprint := m.Fingerprint() - mutex.Lock() - (*metrics)[fingerprint] = m - mutex.Unlock() - } - - return nil, nil - }) - - return err - }, matchers...) -} - -func (d *Distributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) { - return d.metricsForLabelMatchersCommon(ctx, from, through, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]model.Metric, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { - _, err := d.ForReplicationSet(ctx, rs, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - stream, err := client.MetricsForLabelMatchersStream(ctx, req) - if err != nil { - return nil, err - } - defer stream.CloseSend() //nolint:errcheck - for { - resp, err := stream.Recv() - - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - for _, metric := range resp.Metric { - m := cortexpb.FromLabelAdaptersToMetricWithCopy(metric.Labels) - - if err := queryLimiter.AddSeries(metric.Labels); err != nil { - return nil, err - } - - fingerprint := m.Fingerprint() - mutex.Lock() - (*metrics)[fingerprint] = m - mutex.Unlock() - } - } - - return nil, nil - }) - - return err - }, matchers...) -} - -func (d *Distributor) metricsForLabelMatchersCommon(ctx context.Context, from, through model.Time, f func(context.Context, ring.ReplicationSet, *ingester_client.MetricsForLabelMatchersRequest, *map[model.Fingerprint]model.Metric, *sync.Mutex, *limiter.QueryLimiter) error, matchers ...*labels.Matcher) ([]metric.Metric, error) { - replicationSet, err := d.GetIngestersForMetadata(ctx) - queryLimiter := limiter.QueryLimiterFromContextWithFallback(ctx) - if err != nil { - return nil, err - } - - req, err := ingester_client.ToMetricsForLabelMatchersRequest(from, through, matchers) - if err != nil { - return nil, err - } - mutex := sync.Mutex{} - metrics := map[model.Fingerprint]model.Metric{} - - err = f(ctx, replicationSet, req, &metrics, &mutex, queryLimiter) - - if err != nil { - return nil, err - } - - mutex.Lock() - result := make([]metric.Metric, 0, len(metrics)) - for _, m := range metrics { - result = append(result, metric.Metric{ - Metric: m, - }) - } - mutex.Unlock() - return result, nil -} - -// MetricsMetadata returns all metric metadata of a user. -func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { - replicationSet, err := d.GetIngestersForMetadata(ctx) - if err != nil { - return nil, err - } - - req := &ingester_client.MetricsMetadataRequest{} - // TODO(gotjosh): We only need to look in all the ingesters if shardByAllLabels is enabled. - resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - return client.MetricsMetadata(ctx, req) - }) - if err != nil { - return nil, err - } - - result := []scrape.MetricMetadata{} - dedupTracker := map[cortexpb.MetricMetadata]struct{}{} - for _, resp := range resps { - r := resp.(*ingester_client.MetricsMetadataResponse) - for _, m := range r.Metadata { - // Given we look across all ingesters - dedup the metadata. - _, ok := dedupTracker[*m] - if ok { - continue - } - dedupTracker[*m] = struct{}{} - - result = append(result, scrape.MetricMetadata{ - Metric: m.MetricFamilyName, - Help: m.Help, - Unit: m.Unit, - Type: cortexpb.MetricMetadataMetricTypeToMetricType(m.GetType()), - }) - } - } - - return result, nil -} - -// UserStats returns statistics about the current user. -func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { - replicationSet, err := d.GetIngestersForMetadata(ctx) - if err != nil { - return nil, err - } - - // Make sure we get a successful response from all of them. - replicationSet.MaxErrors = 0 - - req := &ingester_client.UserStatsRequest{} - resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { - return client.UserStats(ctx, req) - }) - if err != nil { - return nil, err - } - - totalStats := &UserStats{} - for _, resp := range resps { - r := resp.(*ingester_client.UserStatsResponse) - totalStats.IngestionRate += r.IngestionRate - totalStats.APIIngestionRate += r.ApiIngestionRate - totalStats.RuleIngestionRate += r.RuleIngestionRate - totalStats.NumSeries += r.NumSeries - } - - totalStats.IngestionRate /= float64(d.ingestersRing.ReplicationFactor()) - totalStats.NumSeries /= uint64(d.ingestersRing.ReplicationFactor()) - - return totalStats, nil -} - -// UserIDStats models ingestion statistics for one user, including the user ID -type UserIDStats struct { - UserID string `json:"userID"` - UserStats -} - -// AllUserStats returns statistics about all users. -// Note it does not divide by the ReplicationFactor like UserStats() -func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { - // Add up by user, across all responses from ingesters - perUserTotals := make(map[string]UserStats) - - req := &ingester_client.UserStatsRequest{} - ctx = user.InjectOrgID(ctx, "1") // fake: ingester insists on having an org ID - // Not using d.ForReplicationSet(), so we can fail after first error. - replicationSet, err := d.ingestersRing.GetAllHealthy(ring.Read) - if err != nil { - return nil, err - } - for _, ingester := range replicationSet.Instances { - client, err := d.ingesterPool.GetClientFor(ingester.Addr) - if err != nil { - return nil, err - } - resp, err := client.(ingester_client.IngesterClient).AllUserStats(ctx, req) - if err != nil { - return nil, err - } - for _, u := range resp.Stats { - s := perUserTotals[u.UserId] - s.IngestionRate += u.Data.IngestionRate - s.APIIngestionRate += u.Data.ApiIngestionRate - s.RuleIngestionRate += u.Data.RuleIngestionRate - s.NumSeries += u.Data.NumSeries - perUserTotals[u.UserId] = s - } - } - - // Turn aggregated map into a slice for return - response := make([]UserIDStats, 0, len(perUserTotals)) - for id, stats := range perUserTotals { - response = append(response, UserIDStats{ - UserID: id, - UserStats: UserStats{ - IngestionRate: stats.IngestionRate, - APIIngestionRate: stats.APIIngestionRate, - RuleIngestionRate: stats.RuleIngestionRate, - NumSeries: stats.NumSeries, - }, - }) - } - - return response, nil -} - -func (d *Distributor) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if d.distributorsRing != nil { - d.distributorsRing.ServeHTTP(w, req) - } else { - var ringNotEnabledPage = ` - - - - - Cortex Distributor Status - - -

Cortex Distributor Status

-

Distributor is not running with global limits enabled

- - ` - util.WriteHTMLResponse(w, ringNotEnabledPage) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go deleted file mode 100644 index f1b0fa2fb..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go +++ /dev/null @@ -1,99 +0,0 @@ -package distributor - -import ( - "flag" - "os" - "time" - - "github.com/go-kit/log/level" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// RingConfig masks the ring lifecycler config which contains -// many options not really required by the distributors ring. This config -// is used to strip down the config to the minimum, and avoid confusion -// to the user. -type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - - // Instance details - InstanceID string `yaml:"instance_id" doc:"hidden"` - InstanceInterfaceNames []string `yaml:"instance_interface_names"` - InstancePort int `yaml:"instance_port" doc:"hidden"` - InstanceAddr string `yaml:"instance_addr" doc:"hidden"` - - // Injected internally - ListenPort int `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { - hostname, err := os.Hostname() - if err != nil { - level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) - os.Exit(1) - } - - // Ring flags - cfg.KVStore.RegisterFlagsWithPrefix("distributor.ring.", "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") - f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).") - - // Instance flags - cfg.InstanceInterfaceNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), "distributor.ring.instance-interface-names", "Name of network interface to read address from.") - f.StringVar(&cfg.InstanceAddr, "distributor.ring.instance-addr", "", "IP address to advertise in the ring.") - f.IntVar(&cfg.InstancePort, "distributor.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") - f.StringVar(&cfg.InstanceID, "distributor.ring.instance-id", hostname, "Instance ID to register in the ring.") -} - -// ToLifecyclerConfig returns a LifecyclerConfig based on the distributor -// ring config. -func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { - // We have to make sure that the ring.LifecyclerConfig and ring.Config - // defaults are preserved - lc := ring.LifecyclerConfig{} - rc := ring.Config{} - - flagext.DefaultValues(&lc) - flagext.DefaultValues(&rc) - - // Configure ring - rc.KVStore = cfg.KVStore - rc.HeartbeatTimeout = cfg.HeartbeatTimeout - rc.ReplicationFactor = 1 - - // Configure lifecycler - lc.RingConfig = rc - lc.ListenPort = cfg.ListenPort - lc.Addr = cfg.InstanceAddr - lc.Port = cfg.InstancePort - lc.ID = cfg.InstanceID - lc.InfNames = cfg.InstanceInterfaceNames - lc.UnregisterOnShutdown = true - lc.HeartbeatPeriod = cfg.HeartbeatPeriod - lc.ObservePeriod = 0 - lc.NumTokens = 1 - lc.JoinAfter = 0 - lc.MinReadyDuration = 0 - lc.FinalSleep = 0 - - return lc -} - -func (cfg *RingConfig) ToRingConfig() ring.Config { - rc := ring.Config{} - flagext.DefaultValues(&rc) - - rc.KVStore = cfg.KVStore - rc.HeartbeatTimeout = cfg.HeartbeatTimeout - rc.ReplicationFactor = 1 - - return rc -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.pb.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.pb.go deleted file mode 100644 index 8fe9bcf02..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.pb.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: distributor.proto - -package distributorpb - -import ( - context "context" - fmt "fmt" - cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { proto.RegisterFile("distributor.proto", fileDescriptor_c518e33639ca565d) } - -var fileDescriptor_c518e33639ca565d = []byte{ - // 221 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0xc9, 0x2c, 0x2e, - 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x46, - 0x12, 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, - 0x4f, 0xcf, 0xd7, 0x07, 0xab, 0x49, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x57, - 0xca, 0x12, 0x49, 0x79, 0x72, 0x7e, 0x51, 0x49, 0x6a, 0x45, 0x41, 0x51, 0x7e, 0x56, 0x6a, 0x72, - 0x09, 0x94, 0xa7, 0x5f, 0x90, 0x9d, 0x0e, 0x93, 0x48, 0x82, 0x32, 0x20, 0x5a, 0x8d, 0x3c, 0xb8, - 0xb8, 0x5d, 0x10, 0x16, 0x0b, 0x59, 0x72, 0xb1, 0x04, 0x94, 0x16, 0x67, 0x08, 0x89, 0xe9, 0xc1, - 0x94, 0xeb, 0x85, 0x17, 0x65, 0x96, 0xa4, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x48, 0x89, - 0x63, 0x88, 0x17, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x2a, 0x31, 0x38, 0x39, 0x5f, 0x78, 0x28, 0xc7, - 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, - 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, - 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, - 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x45, 0xf2, 0x76, 0x41, 0x52, 0x12, 0x1b, 0xd8, 0x55, 0xc6, 0x80, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0xcd, 0x1b, 0x85, 0x21, 0x01, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// DistributorClient is the client API for Distributor service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type DistributorClient interface { - Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) -} - -type distributorClient struct { - cc *grpc.ClientConn -} - -func NewDistributorClient(cc *grpc.ClientConn) DistributorClient { - return &distributorClient{cc} -} - -func (c *distributorClient) Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) { - out := new(cortexpb.WriteResponse) - err := c.cc.Invoke(ctx, "/distributor.Distributor/Push", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DistributorServer is the server API for Distributor service. -type DistributorServer interface { - Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) -} - -// UnimplementedDistributorServer can be embedded to have forward compatible implementations. -type UnimplementedDistributorServer struct { -} - -func (*UnimplementedDistributorServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") -} - -func RegisterDistributorServer(s *grpc.Server, srv DistributorServer) { - s.RegisterService(&_Distributor_serviceDesc, srv) -} - -func _Distributor_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(cortexpb.WriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributorServer).Push(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distributor.Distributor/Push", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributorServer).Push(ctx, req.(*cortexpb.WriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Distributor_serviceDesc = grpc.ServiceDesc{ - ServiceName: "distributor.Distributor", - HandlerType: (*DistributorServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Push", - Handler: _Distributor_Push_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "distributor.proto", -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.proto b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.proto deleted file mode 100644 index d473c38be..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributorpb/distributor.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package distributor; - -option go_package = "distributorpb"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service Distributor { - rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {}; -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go deleted file mode 100644 index dd0f74d9c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go +++ /dev/null @@ -1,495 +0,0 @@ -package distributor - -import ( - "context" - "errors" - "flag" - "fmt" - "math/rand" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/model/timestamp" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/ring/kv/codec" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/services" -) - -var ( - errNegativeUpdateTimeoutJitterMax = errors.New("HA tracker max update timeout jitter shouldn't be negative") - errInvalidFailoverTimeout = "HA Tracker failover timeout (%v) must be at least 1s greater than update timeout - max jitter (%v)" -) - -type haTrackerLimits interface { - // MaxHAClusters returns max number of clusters that HA tracker should track for a user. - // Samples from additional clusters are rejected. - MaxHAClusters(user string) int -} - -// ProtoReplicaDescFactory makes new InstanceDescs -func ProtoReplicaDescFactory() proto.Message { - return NewReplicaDesc() -} - -// NewReplicaDesc returns an empty *distributor.ReplicaDesc. -func NewReplicaDesc() *ReplicaDesc { - return &ReplicaDesc{} -} - -// HATrackerConfig contains the configuration require to -// create a HA Tracker. -type HATrackerConfig struct { - EnableHATracker bool `yaml:"enable_ha_tracker"` - // We should only update the timestamp if the difference - // between the stored timestamp and the time we received a sample at - // is more than this duration. - UpdateTimeout time.Duration `yaml:"ha_tracker_update_timeout"` - UpdateTimeoutJitterMax time.Duration `yaml:"ha_tracker_update_timeout_jitter_max"` - // We should only failover to accepting samples from a replica - // other than the replica written in the KVStore if the difference - // between the stored timestamp and the time we received a sample is - // more than this duration - FailoverTimeout time.Duration `yaml:"ha_tracker_failover_timeout"` - - KVStore kv.Config `yaml:"kvstore" doc:"description=Backend storage to use for the ring. Please be aware that memberlist is not supported by the HA tracker since gossip propagation is too slow for HA purposes."` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *HATrackerConfig) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.EnableHATracker, "distributor.ha-tracker.enable", false, "Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).") - f.DurationVar(&cfg.UpdateTimeout, "distributor.ha-tracker.update-timeout", 15*time.Second, "Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.") - f.DurationVar(&cfg.UpdateTimeoutJitterMax, "distributor.ha-tracker.update-timeout-jitter-max", 5*time.Second, "Maximum jitter applied to the update timeout, in order to spread the HA heartbeats over time.") - f.DurationVar(&cfg.FailoverTimeout, "distributor.ha-tracker.failover-timeout", 30*time.Second, "If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout") - - // We want the ability to use different Consul instances for the ring and - // for HA cluster tracking. We also customize the default keys prefix, in - // order to not clash with the ring key if they both share the same KVStore - // backend (ie. run on the same consul cluster). - cfg.KVStore.RegisterFlagsWithPrefix("distributor.ha-tracker.", "ha-tracker/", f) -} - -// Validate config and returns error on failure -func (cfg *HATrackerConfig) Validate() error { - if cfg.UpdateTimeoutJitterMax < 0 { - return errNegativeUpdateTimeoutJitterMax - } - - minFailureTimeout := cfg.UpdateTimeout + cfg.UpdateTimeoutJitterMax + time.Second - if cfg.FailoverTimeout < minFailureTimeout { - return fmt.Errorf(errInvalidFailoverTimeout, cfg.FailoverTimeout, minFailureTimeout) - } - - return nil -} - -func GetReplicaDescCodec() codec.Proto { - return codec.NewProtoCodec("replicaDesc", ProtoReplicaDescFactory) -} - -// Track the replica we're accepting samples from -// for each HA cluster we know about. -type haTracker struct { - services.Service - - logger log.Logger - cfg HATrackerConfig - client kv.Client - updateTimeoutJitter time.Duration - limits haTrackerLimits - - electedLock sync.RWMutex - elected map[string]ReplicaDesc // Replicas we are accepting samples from. Key = "user/cluster". - clusters map[string]map[string]struct{} // Known clusters with elected replicas per user. First key = user, second key = cluster name. - - electedReplicaChanges *prometheus.CounterVec - electedReplicaTimestamp *prometheus.GaugeVec - electedReplicaPropagationTime prometheus.Histogram - kvCASCalls *prometheus.CounterVec - - cleanupRuns prometheus.Counter - replicasMarkedForDeletion prometheus.Counter - deletedReplicas prometheus.Counter - markingForDeletionsFailed prometheus.Counter -} - -// NewClusterTracker returns a new HA cluster tracker using either Consul -// or in-memory KV store. Tracker must be started via StartAsync(). -func newHATracker(cfg HATrackerConfig, limits haTrackerLimits, reg prometheus.Registerer, logger log.Logger) (*haTracker, error) { - var jitter time.Duration - if cfg.UpdateTimeoutJitterMax > 0 { - jitter = time.Duration(rand.Int63n(int64(2*cfg.UpdateTimeoutJitterMax))) - cfg.UpdateTimeoutJitterMax - } - - t := &haTracker{ - logger: logger, - cfg: cfg, - updateTimeoutJitter: jitter, - limits: limits, - elected: map[string]ReplicaDesc{}, - clusters: map[string]map[string]struct{}{}, - - electedReplicaChanges: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ha_tracker_elected_replica_changes_total", - Help: "The total number of times the elected replica has changed for a user ID/cluster.", - }, []string{"user", "cluster"}), - electedReplicaTimestamp: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_ha_tracker_elected_replica_timestamp_seconds", - Help: "The timestamp stored for the currently elected replica, from the KVStore.", - }, []string{"user", "cluster"}), - electedReplicaPropagationTime: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ha_tracker_elected_replica_change_propagation_time_seconds", - Help: "The time it for the distributor to update the replica change.", - Buckets: prometheus.DefBuckets, - }), - kvCASCalls: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ha_tracker_kv_store_cas_total", - Help: "The total number of CAS calls to the KV store for a user ID/cluster.", - }, []string{"user", "cluster"}), - - cleanupRuns: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ha_tracker_replicas_cleanup_started_total", - Help: "Number of elected replicas cleanup loops started.", - }), - replicasMarkedForDeletion: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ha_tracker_replicas_cleanup_marked_for_deletion_total", - Help: "Number of elected replicas marked for deletion.", - }), - deletedReplicas: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ha_tracker_replicas_cleanup_deleted_total", - Help: "Number of elected replicas deleted from KV store.", - }), - markingForDeletionsFailed: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ha_tracker_replicas_cleanup_delete_failed_total", - Help: "Number of elected replicas that failed to be marked for deletion, or deleted.", - }), - } - - if cfg.EnableHATracker { - client, err := kv.NewClient( - cfg.KVStore, - GetReplicaDescCodec(), - kv.RegistererWithKVName(prometheus.WrapRegistererWithPrefix("cortex_", reg), "distributor-hatracker"), - logger, - ) - if err != nil { - return nil, err - } - t.client = client - } - - t.Service = services.NewBasicService(nil, t.loop, nil) - return t, nil -} - -// Follows pattern used by ring for WatchKey. -func (c *haTracker) loop(ctx context.Context) error { - if !c.cfg.EnableHATracker { - // don't do anything, but wait until asked to stop. - <-ctx.Done() - return nil - } - - // Start cleanup loop. It will stop when context is done. - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - c.cleanupOldReplicasLoop(ctx) - }() - - // The KVStore config we gave when creating c should have contained a prefix, - // which would have given us a prefixed KVStore client. So, we can pass empty string here. - c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool { - replica := value.(*ReplicaDesc) - segments := strings.SplitN(key, "/", 2) - - // Valid key would look like cluster/replica, and a key without a / such as `ring` would be invalid. - if len(segments) != 2 { - return true - } - - user := segments[0] - cluster := segments[1] - - c.electedLock.Lock() - defer c.electedLock.Unlock() - - if replica.DeletedAt > 0 { - delete(c.elected, key) - c.electedReplicaChanges.DeleteLabelValues(user, cluster) - c.electedReplicaTimestamp.DeleteLabelValues(user, cluster) - - userClusters := c.clusters[user] - if userClusters != nil { - delete(userClusters, cluster) - if len(userClusters) == 0 { - delete(c.clusters, user) - } - } - return true - } - - elected, exists := c.elected[key] - if replica.Replica != elected.Replica { - c.electedReplicaChanges.WithLabelValues(user, cluster).Inc() - } - if !exists { - if c.clusters[user] == nil { - c.clusters[user] = map[string]struct{}{} - } - c.clusters[user][cluster] = struct{}{} - } - c.elected[key] = *replica - c.electedReplicaTimestamp.WithLabelValues(user, cluster).Set(float64(replica.ReceivedAt / 1000)) - c.electedReplicaPropagationTime.Observe(time.Since(timestamp.Time(replica.ReceivedAt)).Seconds()) - return true - }) - - wg.Wait() - return nil -} - -const ( - cleanupCyclePeriod = 30 * time.Minute - cleanupCycleJitterVariance = 0.2 // for 30 minutes, this is ±6 min - - // If we have received last sample for given cluster before this timeout, we will mark selected replica for deletion. - // If selected replica is marked for deletion for this time, it is deleted completely. - deletionTimeout = 30 * time.Minute -) - -func (c *haTracker) cleanupOldReplicasLoop(ctx context.Context) { - tick := time.NewTicker(util.DurationWithJitter(cleanupCyclePeriod, cleanupCycleJitterVariance)) - defer tick.Stop() - - for { - select { - case <-ctx.Done(): - return - case t := <-tick.C: - c.cleanupRuns.Inc() - c.cleanupOldReplicas(ctx, t.Add(-deletionTimeout)) - } - } -} - -// Replicas marked for deletion before deadline will be deleted. -// Replicas with last-received timestamp before deadline will be marked for deletion. -func (c *haTracker) cleanupOldReplicas(ctx context.Context, deadline time.Time) { - keys, err := c.client.List(ctx, "") - if err != nil { - level.Warn(c.logger).Log("msg", "cleanup: failed to list replica keys", "err", err) - return - } - - for _, key := range keys { - if ctx.Err() != nil { - return - } - - val, err := c.client.Get(ctx, key) - if err != nil { - level.Warn(c.logger).Log("msg", "cleanup: failed to get replica value", "key", key, "err", err) - continue - } - - desc, ok := val.(*ReplicaDesc) - if !ok { - level.Error(c.logger).Log("msg", "cleanup: got invalid replica descriptor", "key", key) - continue - } - - if desc.DeletedAt > 0 { - if timestamp.Time(desc.DeletedAt).After(deadline) { - continue - } - - // We're blindly deleting a key here. It may happen that value was updated since we have read it few lines above, - // in which case Distributors will have updated value in memory, but Delete will remove it from KV store anyway. - // That's not great, but should not be a problem. If KV store sends Watch notification for Delete, distributors will - // delete it from memory, and recreate on next sample with matching replica. - // - // If KV store doesn't send Watch notification for Delete, distributors *with* replica in memory will keep using it, - // while distributors *without* replica in memory will try to write it to KV store -- which will update *all* - // watching distributors. - err = c.client.Delete(ctx, key) - if err != nil { - level.Error(c.logger).Log("msg", "cleanup: failed to delete old replica", "key", key, "err", err) - c.markingForDeletionsFailed.Inc() - } else { - level.Info(c.logger).Log("msg", "cleanup: deleted old replica", "key", key) - c.deletedReplicas.Inc() - } - continue - } - - // Not marked as deleted yet. - if desc.DeletedAt == 0 && timestamp.Time(desc.ReceivedAt).Before(deadline) { - err := c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { - d, ok := in.(*ReplicaDesc) - if !ok || d == nil || d.DeletedAt > 0 || !timestamp.Time(desc.ReceivedAt).Before(deadline) { - return nil, false, nil - } - - d.DeletedAt = timestamp.FromTime(time.Now()) - return d, true, nil - }) - - if err != nil { - c.markingForDeletionsFailed.Inc() - level.Error(c.logger).Log("msg", "cleanup: failed to mark replica as deleted", "key", key, "err", err) - } else { - c.replicasMarkedForDeletion.Inc() - level.Info(c.logger).Log("msg", "cleanup: marked replica as deleted", "key", key) - } - } - } -} - -// CheckReplica checks the cluster and replica against the backing KVStore and local cache in the -// tracker c to see if we should accept the incomming sample. It will return an error if the sample -// should not be accepted. Note that internally this function does checks against the stored values -// and may modify the stored data, for example to failover between replicas after a certain period of time. -// replicasNotMatchError is returned (from checkKVStore) if we shouldn't store this sample but are -// accepting samples from another replica for the cluster, so that there isn't a bunch of error's returned -// to customers clients. -func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica string, now time.Time) error { - // If HA tracking isn't enabled then accept the sample - if !c.cfg.EnableHATracker { - return nil - } - key := fmt.Sprintf("%s/%s", userID, cluster) - - c.electedLock.RLock() - entry, ok := c.elected[key] - clusters := len(c.clusters[userID]) - c.electedLock.RUnlock() - - if ok && now.Sub(timestamp.Time(entry.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter { - if entry.Replica != replica { - return replicasNotMatchError{replica: replica, elected: entry.Replica} - } - return nil - } - - if !ok { - // If we don't know about this cluster yet and we have reached the limit for number of clusters, we error out now. - if limit := c.limits.MaxHAClusters(userID); limit > 0 && clusters+1 > limit { - return tooManyClustersError{limit: limit} - } - } - - err := c.checkKVStore(ctx, key, replica, now) - c.kvCASCalls.WithLabelValues(userID, cluster).Inc() - if err != nil { - // The callback within checkKVStore will return a replicasNotMatchError if the sample is being deduped, - // otherwise there may have been an actual error CAS'ing that we should log. - if !errors.Is(err, replicasNotMatchError{}) { - level.Error(c.logger).Log("msg", "rejecting sample", "err", err) - } - } - return err -} - -func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error { - return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { - if desc, ok := in.(*ReplicaDesc); ok && desc.DeletedAt == 0 { - // We don't need to CAS and update the timestamp in the KV store if the timestamp we've received - // this sample at is less than updateTimeout amount of time since the timestamp in the KV store. - if desc.Replica == replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter { - return nil, false, nil - } - - // We shouldn't failover to accepting a new replica if the timestamp we've received this sample at - // is less than failover timeout amount of time since the timestamp in the KV store. - if desc.Replica != replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.FailoverTimeout { - return nil, false, replicasNotMatchError{replica: replica, elected: desc.Replica} - } - } - - // There was either invalid or no data for the key, so we now accept samples - // from this replica. Invalid could mean that the timestamp in the KV store was - // out of date based on the update and failover timeouts when compared to now. - return &ReplicaDesc{ - Replica: replica, - ReceivedAt: timestamp.FromTime(now), - DeletedAt: 0, - }, true, nil - }) -} - -type replicasNotMatchError struct { - replica, elected string -} - -func (e replicasNotMatchError) Error() string { - return fmt.Sprintf("replicas did not mach, rejecting sample: replica=%s, elected=%s", e.replica, e.elected) -} - -// Needed for errors.Is to work properly. -func (e replicasNotMatchError) Is(err error) bool { - _, ok1 := err.(replicasNotMatchError) - _, ok2 := err.(*replicasNotMatchError) - return ok1 || ok2 -} - -// IsOperationAborted returns whether the error has been caused by an operation intentionally aborted. -func (e replicasNotMatchError) IsOperationAborted() bool { - return true -} - -type tooManyClustersError struct { - limit int -} - -func (e tooManyClustersError) Error() string { - return fmt.Sprintf("too many HA clusters (limit: %d)", e.limit) -} - -// Needed for errors.Is to work properly. -func (e tooManyClustersError) Is(err error) bool { - _, ok1 := err.(tooManyClustersError) - _, ok2 := err.(*tooManyClustersError) - return ok1 || ok2 -} - -func findHALabels(replicaLabel, clusterLabel string, labels []cortexpb.LabelAdapter) (string, string) { - var cluster, replica string - var pair cortexpb.LabelAdapter - - for _, pair = range labels { - if pair.Name == replicaLabel { - replica = pair.Value - } - if pair.Name == clusterLabel { - // cluster label is unmarshalled into yoloString, which retains original remote write request body in memory. - // Hence, we clone the yoloString to allow the request body to be garbage collected. - cluster = util.StringsClone(pair.Value) - } - } - - return cluster, replica -} - -func (c *haTracker) cleanupHATrackerMetricsForUser(userID string) { - filter := map[string]string{"user": userID} - - if err := util.DeleteMatchingLabels(c.electedReplicaChanges, filter); err != nil { - level.Warn(c.logger).Log("msg", "failed to remove cortex_ha_tracker_elected_replica_changes_total metric for user", "user", userID, "err", err) - } - if err := util.DeleteMatchingLabels(c.electedReplicaTimestamp, filter); err != nil { - level.Warn(c.logger).Log("msg", "failed to remove cortex_ha_tracker_elected_replica_timestamp_seconds metric for user", "user", userID, "err", err) - } - if err := util.DeleteMatchingLabels(c.kvCASCalls, filter); err != nil { - level.Warn(c.logger).Log("msg", "failed to remove cortex_ha_tracker_kv_store_cas_total metric for user", "user", userID, "err", err) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go deleted file mode 100644 index 0e9d494eb..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go +++ /dev/null @@ -1,494 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ha_tracker.proto - -package distributor - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ReplicaDesc struct { - Replica string `protobuf:"bytes,1,opt,name=replica,proto3" json:"replica,omitempty"` - ReceivedAt int64 `protobuf:"varint,2,opt,name=received_at,json=receivedAt,proto3" json:"received_at,omitempty"` - // Unix timestamp in millseconds when this entry was marked for deletion. - // Reason for doing marking first, and delete later, is to make sure that distributors - // watching the prefix will receive notification on "marking" -- at which point they can - // already remove entry from memory. Actual deletion from KV store does *not* trigger - // "watch" notification with a key for all KV stores. - DeletedAt int64 `protobuf:"varint,3,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"` -} - -func (m *ReplicaDesc) Reset() { *m = ReplicaDesc{} } -func (*ReplicaDesc) ProtoMessage() {} -func (*ReplicaDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_86f0e7bcf71d860b, []int{0} -} -func (m *ReplicaDesc) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReplicaDesc.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReplicaDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaDesc.Merge(m, src) -} -func (m *ReplicaDesc) XXX_Size() int { - return m.Size() -} -func (m *ReplicaDesc) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaDesc proto.InternalMessageInfo - -func (m *ReplicaDesc) GetReplica() string { - if m != nil { - return m.Replica - } - return "" -} - -func (m *ReplicaDesc) GetReceivedAt() int64 { - if m != nil { - return m.ReceivedAt - } - return 0 -} - -func (m *ReplicaDesc) GetDeletedAt() int64 { - if m != nil { - return m.DeletedAt - } - return 0 -} - -func init() { - proto.RegisterType((*ReplicaDesc)(nil), "distributor.ReplicaDesc") -} - -func init() { proto.RegisterFile("ha_tracker.proto", fileDescriptor_86f0e7bcf71d860b) } - -var fileDescriptor_86f0e7bcf71d860b = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0x8c, 0x2f, - 0x29, 0x4a, 0x4c, 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0xc9, - 0x2c, 0x2e, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, - 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0xab, 0x49, 0x2a, 0x4d, - 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x57, 0x29, 0x9d, 0x8b, 0x3b, 0x28, 0xb5, 0x20, 0x27, - 0x33, 0x39, 0xd1, 0x25, 0xb5, 0x38, 0x59, 0x48, 0x82, 0x8b, 0xbd, 0x08, 0xc2, 0x95, 0x60, 0x54, - 0x60, 0xd4, 0xe0, 0x0c, 0x82, 0x71, 0x85, 0xe4, 0xb9, 0xb8, 0x8b, 0x52, 0x93, 0x53, 0x33, 0xcb, - 0x52, 0x53, 0xe2, 0x13, 0x4b, 0x24, 0x98, 0x14, 0x18, 0x35, 0x98, 0x83, 0xb8, 0x60, 0x42, 0x8e, - 0x25, 0x42, 0xb2, 0x5c, 0x5c, 0x29, 0xa9, 0x39, 0xa9, 0x25, 0x10, 0x79, 0x66, 0xb0, 0x3c, 0x27, - 0x54, 0xc4, 0xb1, 0xc4, 0xc9, 0xe4, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, - 0x94, 0x63, 0x6c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, - 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, 0x87, 0x47, 0x72, 0x8c, - 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0xd8, - 0x95, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xd1, 0xdd, 0x8d, 0xf5, 0x00, 0x00, 0x00, -} - -func (this *ReplicaDesc) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReplicaDesc) - if !ok { - that2, ok := that.(ReplicaDesc) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Replica != that1.Replica { - return false - } - if this.ReceivedAt != that1.ReceivedAt { - return false - } - if this.DeletedAt != that1.DeletedAt { - return false - } - return true -} -func (this *ReplicaDesc) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&distributor.ReplicaDesc{") - s = append(s, "Replica: "+fmt.Sprintf("%#v", this.Replica)+",\n") - s = append(s, "ReceivedAt: "+fmt.Sprintf("%#v", this.ReceivedAt)+",\n") - s = append(s, "DeletedAt: "+fmt.Sprintf("%#v", this.DeletedAt)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringHaTracker(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *ReplicaDesc) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaDesc) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeletedAt != 0 { - i = encodeVarintHaTracker(dAtA, i, uint64(m.DeletedAt)) - i-- - dAtA[i] = 0x18 - } - if m.ReceivedAt != 0 { - i = encodeVarintHaTracker(dAtA, i, uint64(m.ReceivedAt)) - i-- - dAtA[i] = 0x10 - } - if len(m.Replica) > 0 { - i -= len(m.Replica) - copy(dAtA[i:], m.Replica) - i = encodeVarintHaTracker(dAtA, i, uint64(len(m.Replica))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintHaTracker(dAtA []byte, offset int, v uint64) int { - offset -= sovHaTracker(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ReplicaDesc) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Replica) - if l > 0 { - n += 1 + l + sovHaTracker(uint64(l)) - } - if m.ReceivedAt != 0 { - n += 1 + sovHaTracker(uint64(m.ReceivedAt)) - } - if m.DeletedAt != 0 { - n += 1 + sovHaTracker(uint64(m.DeletedAt)) - } - return n -} - -func sovHaTracker(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozHaTracker(x uint64) (n int) { - return sovHaTracker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ReplicaDesc) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaDesc{`, - `Replica:` + fmt.Sprintf("%v", this.Replica) + `,`, - `ReceivedAt:` + fmt.Sprintf("%v", this.ReceivedAt) + `,`, - `DeletedAt:` + fmt.Sprintf("%v", this.DeletedAt) + `,`, - `}`, - }, "") - return s -} -func valueToStringHaTracker(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ReplicaDesc) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHaTracker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaDesc: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaDesc: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replica", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHaTracker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHaTracker - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHaTracker - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Replica = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReceivedAt", wireType) - } - m.ReceivedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHaTracker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReceivedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletedAt", wireType) - } - m.DeletedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHaTracker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DeletedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipHaTracker(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthHaTracker - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthHaTracker - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipHaTracker(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHaTracker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHaTracker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHaTracker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthHaTracker - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthHaTracker - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHaTracker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipHaTracker(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthHaTracker - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthHaTracker = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHaTracker = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto deleted file mode 100644 index f58758ee5..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package distributor; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -message ReplicaDesc { - string replica = 1; - int64 received_at = 2; - - // Unix timestamp in millseconds when this entry was marked for deletion. - // Reason for doing marking first, and delete later, is to make sure that distributors - // watching the prefix will receive notification on "marking" -- at which point they can - // already remove entry from memory. Actual deletion from KV store does *not* trigger - // "watch" notification with a key for all KV stores. - int64 deleted_at = 3; -} \ No newline at end of file diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go deleted file mode 100644 index 8eb430f01..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go +++ /dev/null @@ -1,101 +0,0 @@ -package distributor - -import ( - "html/template" - "net/http" - "sort" - "strings" - "time" - - "github.com/prometheus/prometheus/model/timestamp" - - "github.com/cortexproject/cortex/pkg/util" -) - -const trackerTpl = ` - - - - - Cortex HA Tracker Status - - -

Cortex HA Tracker Status

-

Current time: {{ .Now }}

- - - - - - - - - - - - - {{ range .Elected }} - - - - - - - - - {{ end }} - -
User IDClusterReplicaElected TimeTime Until UpdateTime Until Failover
{{ .UserID }}{{ .Cluster }}{{ .Replica }}{{ .ElectedAt }}{{ .UpdateTime }}{{ .FailoverTime }}
- -` - -var trackerTmpl *template.Template - -func init() { - trackerTmpl = template.Must(template.New("ha-tracker").Parse(trackerTpl)) -} - -func (h *haTracker) ServeHTTP(w http.ResponseWriter, req *http.Request) { - h.electedLock.RLock() - type replica struct { - UserID string `json:"userID"` - Cluster string `json:"cluster"` - Replica string `json:"replica"` - ElectedAt time.Time `json:"electedAt"` - UpdateTime time.Duration `json:"updateDuration"` - FailoverTime time.Duration `json:"failoverDuration"` - } - - electedReplicas := []replica{} - for key, desc := range h.elected { - chunks := strings.SplitN(key, "/", 2) - - electedReplicas = append(electedReplicas, replica{ - UserID: chunks[0], - Cluster: chunks[1], - Replica: desc.Replica, - ElectedAt: timestamp.Time(desc.ReceivedAt), - UpdateTime: time.Until(timestamp.Time(desc.ReceivedAt).Add(h.cfg.UpdateTimeout)), - FailoverTime: time.Until(timestamp.Time(desc.ReceivedAt).Add(h.cfg.FailoverTimeout)), - }) - } - h.electedLock.RUnlock() - - sort.Slice(electedReplicas, func(i, j int) bool { - first := electedReplicas[i] - second := electedReplicas[j] - - if first.UserID != second.UserID { - return first.UserID < second.UserID - } - return first.Cluster < second.Cluster - }) - - util.RenderHTTPResponse(w, struct { - Elected []replica `json:"elected"` - Now time.Time `json:"now"` - }{ - Elected: electedReplicas, - Now: time.Now(), - }, trackerTmpl, req) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go deleted file mode 100644 index ed6c12cb8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go +++ /dev/null @@ -1,97 +0,0 @@ -package distributor - -import ( - "encoding/json" - "fmt" - "html/template" - "net/http" - "sort" - "strings" - "time" - - "github.com/cortexproject/cortex/pkg/util" -) - -const tpl = ` - - - - - Cortex Ingester Stats - - -

Cortex Ingester Stats

-

Current time: {{ .Now }}

-

NB stats do not account for replication factor, which is currently set to {{ .ReplicationFactor }}

-
- - - - - - - - - - - - - {{ range .Stats }} - - - - - - - - {{ end }} - -
User# SeriesTotal Ingest RateAPI Ingest RateRule Ingest Rate
{{ .UserID }}{{ .UserStats.NumSeries }}{{ printf "%.2f" .UserStats.IngestionRate }}{{ printf "%.2f" .UserStats.APIIngestionRate }}{{ printf "%.2f" .UserStats.RuleIngestionRate }}
-
- -` - -var tmpl *template.Template - -func init() { - tmpl = template.Must(template.New("webpage").Parse(tpl)) -} - -type userStatsByTimeseries []UserIDStats - -func (s userStatsByTimeseries) Len() int { return len(s) } -func (s userStatsByTimeseries) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s userStatsByTimeseries) Less(i, j int) bool { - return s[i].NumSeries > s[j].NumSeries || - (s[i].NumSeries == s[j].NumSeries && s[i].UserID < s[j].UserID) -} - -// AllUserStatsHandler shows stats for all users. -func (d *Distributor) AllUserStatsHandler(w http.ResponseWriter, r *http.Request) { - stats, err := d.AllUserStats(r.Context()) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - sort.Sort(userStatsByTimeseries(stats)) - - if encodings, found := r.Header["Accept"]; found && - len(encodings) > 0 && strings.Contains(encodings[0], "json") { - if err := json.NewEncoder(w).Encode(stats); err != nil { - http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError) - } - return - } - - util.RenderHTTPResponse(w, struct { - Now time.Time `json:"now"` - Stats []UserIDStats `json:"stats"` - ReplicationFactor int `json:"replicationFactor"` - }{ - Now: time.Now(), - Stats: stats, - ReplicationFactor: d.ingestersRing.ReplicationFactor(), - }, tmpl, r) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go deleted file mode 100644 index ba0fb5913..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go +++ /dev/null @@ -1,26 +0,0 @@ -package distributor - -import ( - "net/http" - - "github.com/cortexproject/cortex/pkg/util" -) - -// UserStats models ingestion statistics for one user. -type UserStats struct { - IngestionRate float64 `json:"ingestionRate"` - NumSeries uint64 `json:"numSeries"` - APIIngestionRate float64 `json:"APIIngestionRate"` - RuleIngestionRate float64 `json:"RuleIngestionRate"` -} - -// UserStatsHandler handles user stats to the Distributor. -func (d *Distributor) UserStatsHandler(w http.ResponseWriter, r *http.Request) { - stats, err := d.UserStats(r.Context()) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - util.WriteJSONResponse(w, stats) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ingester_client_pool.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ingester_client_pool.go deleted file mode 100644 index 007d8872e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ingester_client_pool.go +++ /dev/null @@ -1,42 +0,0 @@ -package distributor - -import ( - "flag" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/ring" - ring_client "github.com/cortexproject/cortex/pkg/ring/client" -) - -var clients = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "distributor_ingester_clients", - Help: "The current number of ingester clients.", -}) - -// PoolConfig is config for creating a Pool. -type PoolConfig struct { - ClientCleanupPeriod time.Duration `yaml:"client_cleanup_period"` - HealthCheckIngesters bool `yaml:"health_check_ingesters"` - RemoteTimeout time.Duration `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *PoolConfig) RegisterFlags(f *flag.FlagSet) { - f.DurationVar(&cfg.ClientCleanupPeriod, "distributor.client-cleanup-period", 15*time.Second, "How frequently to clean up clients for ingesters that have gone away.") - f.BoolVar(&cfg.HealthCheckIngesters, "distributor.health-check-ingesters", true, "Run a health check on each ingester client during periodic cleanup.") -} - -func NewPool(cfg PoolConfig, ring ring.ReadRing, factory ring_client.PoolFactory, logger log.Logger) *ring_client.Pool { - poolCfg := ring_client.PoolConfig{ - CheckInterval: cfg.ClientCleanupPeriod, - HealthCheckEnabled: cfg.HealthCheckIngesters, - HealthCheckTimeout: cfg.RemoteTimeout, - } - - return ring_client.NewPool("ingester", poolCfg, ring_client.NewRingServiceDiscovery(ring), factory, clients, logger) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go deleted file mode 100644 index cc3e5dd24..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go +++ /dev/null @@ -1,74 +0,0 @@ -package distributor - -import ( - "golang.org/x/time/rate" - - "github.com/cortexproject/cortex/pkg/util/limiter" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// ReadLifecycler represents the read interface to the lifecycler. -type ReadLifecycler interface { - HealthyInstancesCount() int -} - -type localStrategy struct { - limits *validation.Overrides -} - -func newLocalIngestionRateStrategy(limits *validation.Overrides) limiter.RateLimiterStrategy { - return &localStrategy{ - limits: limits, - } -} - -func (s *localStrategy) Limit(tenantID string) float64 { - return s.limits.IngestionRate(tenantID) -} - -func (s *localStrategy) Burst(tenantID string) int { - return s.limits.IngestionBurstSize(tenantID) -} - -type globalStrategy struct { - limits *validation.Overrides - ring ReadLifecycler -} - -func newGlobalIngestionRateStrategy(limits *validation.Overrides, ring ReadLifecycler) limiter.RateLimiterStrategy { - return &globalStrategy{ - limits: limits, - ring: ring, - } -} - -func (s *globalStrategy) Limit(tenantID string) float64 { - numDistributors := s.ring.HealthyInstancesCount() - - if numDistributors == 0 { - return s.limits.IngestionRate(tenantID) - } - - return s.limits.IngestionRate(tenantID) / float64(numDistributors) -} - -func (s *globalStrategy) Burst(tenantID string) int { - // The meaning of burst doesn't change for the global strategy, in order - // to keep it easier to understand for users / operators. - return s.limits.IngestionBurstSize(tenantID) -} - -type infiniteStrategy struct{} - -func newInfiniteIngestionRateStrategy() limiter.RateLimiterStrategy { - return &infiniteStrategy{} -} - -func (s *infiniteStrategy) Limit(tenantID string) float64 { - return float64(rate.Inf) -} - -func (s *infiniteStrategy) Burst(tenantID string) int { - // Burst is ignored when limit = rate.Inf - return 0 -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go deleted file mode 100644 index 0c2145cef..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go +++ /dev/null @@ -1,437 +0,0 @@ -package distributor - -import ( - "context" - "io" - "sort" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/weaveworks/common/instrument" - - "github.com/cortexproject/cortex/pkg/cortexpb" - ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/extract" - "github.com/cortexproject/cortex/pkg/util/grpcutil" - "github.com/cortexproject/cortex/pkg/util/limiter" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// Query multiple ingesters and returns a Matrix of samples. -func (d *Distributor) Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) { - var matrix model.Matrix - err := instrument.CollectedRequest(ctx, "Distributor.Query", d.queryDuration, instrument.ErrorCode, func(ctx context.Context) error { - req, err := ingester_client.ToQueryRequest(from, to, matchers) - if err != nil { - return err - } - - replicationSet, err := d.GetIngestersForQuery(ctx, matchers...) - if err != nil { - return err - } - - matrix, err = d.queryIngesters(ctx, replicationSet, req) - if err != nil { - return err - } - - if s := opentracing.SpanFromContext(ctx); s != nil { - s.LogKV("series", len(matrix)) - } - return nil - }) - return matrix, err -} - -func (d *Distributor) QueryExemplars(ctx context.Context, from, to model.Time, matchers ...[]*labels.Matcher) (*ingester_client.ExemplarQueryResponse, error) { - var result *ingester_client.ExemplarQueryResponse - err := instrument.CollectedRequest(ctx, "Distributor.QueryExemplars", d.queryDuration, instrument.ErrorCode, func(ctx context.Context) error { - req, err := ingester_client.ToExemplarQueryRequest(from, to, matchers...) - if err != nil { - return err - } - - // We ask for all ingesters without passing matchers because exemplar queries take in an array of array of label matchers. - replicationSet, err := d.GetIngestersForQuery(ctx) - if err != nil { - return err - } - - result, err = d.queryIngestersExemplars(ctx, replicationSet, req) - if err != nil { - return err - } - - if s := opentracing.SpanFromContext(ctx); s != nil { - s.LogKV("series", len(result.Timeseries)) - } - return nil - }) - return result, err -} - -// QueryStream multiple ingesters via the streaming interface and returns big ol' set of chunks. -func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (*ingester_client.QueryStreamResponse, error) { - var result *ingester_client.QueryStreamResponse - err := instrument.CollectedRequest(ctx, "Distributor.QueryStream", d.queryDuration, instrument.ErrorCode, func(ctx context.Context) error { - req, err := ingester_client.ToQueryRequest(from, to, matchers) - if err != nil { - return err - } - - replicationSet, err := d.GetIngestersForQuery(ctx, matchers...) - if err != nil { - return err - } - - result, err = d.queryIngesterStream(ctx, replicationSet, req) - if err != nil { - return err - } - - if s := opentracing.SpanFromContext(ctx); s != nil { - s.LogKV("chunk-series", len(result.GetChunkseries()), "time-series", len(result.GetTimeseries())) - } - return nil - }) - return result, err -} - -// GetIngestersForQuery returns a replication set including all ingesters that should be queried -// to fetch series matching input label matchers. -func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*labels.Matcher) (ring.ReplicationSet, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return ring.ReplicationSet{}, err - } - - // If shuffle sharding is enabled we should only query ingesters which are - // part of the tenant's subring. - if d.cfg.ShardingStrategy == util.ShardingStrategyShuffle { - shardSize := d.limits.IngestionTenantShardSize(userID) - lookbackPeriod := d.cfg.ShuffleShardingLookbackPeriod - - if shardSize > 0 && lookbackPeriod > 0 { - return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetReplicationSetForOperation(ring.Read) - } - } - - // If "shard by all labels" is disabled, we can get ingesters by metricName if exists. - if !d.cfg.ShardByAllLabels && len(matchers) > 0 { - metricNameMatcher, _, ok := extract.MetricNameMatcherFromMatchers(matchers) - - if ok && metricNameMatcher.Type == labels.MatchEqual { - return d.ingestersRing.Get(shardByMetricName(userID, metricNameMatcher.Value), ring.Read, nil, nil, nil) - } - } - - return d.ingestersRing.GetReplicationSetForOperation(ring.Read) -} - -// GetIngestersForMetadata returns a replication set including all ingesters that should be queried -// to fetch metadata (eg. label names/values or series). -func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.ReplicationSet, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return ring.ReplicationSet{}, err - } - - // If shuffle sharding is enabled we should only query ingesters which are - // part of the tenant's subring. - if d.cfg.ShardingStrategy == util.ShardingStrategyShuffle { - shardSize := d.limits.IngestionTenantShardSize(userID) - lookbackPeriod := d.cfg.ShuffleShardingLookbackPeriod - - if shardSize > 0 && lookbackPeriod > 0 { - return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetReplicationSetForOperation(ring.Read) - } - } - - return d.ingestersRing.GetReplicationSetForOperation(ring.Read) -} - -// queryIngesters queries the ingesters via the older, sample-based API. -func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (model.Matrix, error) { - // Fetch samples from multiple ingesters in parallel, using the replicationSet - // to deal with consistency. - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { - client, err := d.ingesterPool.GetClientFor(ing.Addr) - if err != nil { - return nil, err - } - - resp, err := client.(ingester_client.IngesterClient).Query(ctx, req) - d.ingesterQueries.WithLabelValues(ing.Addr).Inc() - if err != nil { - d.ingesterQueryFailures.WithLabelValues(ing.Addr).Inc() - return nil, err - } - - return ingester_client.FromQueryResponse(resp), nil - }) - if err != nil { - return nil, err - } - - // Merge the results into a single matrix. - fpToSampleStream := map[model.Fingerprint]*model.SampleStream{} - for _, result := range results { - for _, ss := range result.(model.Matrix) { - fp := ss.Metric.Fingerprint() - mss, ok := fpToSampleStream[fp] - if !ok { - mss = &model.SampleStream{ - Metric: ss.Metric, - } - fpToSampleStream[fp] = mss - } - mss.Values = util.MergeSampleSets(mss.Values, ss.Values) - } - } - result := model.Matrix{} - for _, ss := range fpToSampleStream { - result = append(result, ss) - } - - return result, nil -} - -// mergeExemplarSets merges and dedupes two sets of already sorted exemplar pairs. -// Both a and b should be lists of exemplars from the same series. -// Defined here instead of pkg/util to avoid a import cycle. -func mergeExemplarSets(a, b []cortexpb.Exemplar) []cortexpb.Exemplar { - result := make([]cortexpb.Exemplar, 0, len(a)+len(b)) - i, j := 0, 0 - for i < len(a) && j < len(b) { - if a[i].TimestampMs < b[j].TimestampMs { - result = append(result, a[i]) - i++ - } else if a[i].TimestampMs > b[j].TimestampMs { - result = append(result, b[j]) - j++ - } else { - result = append(result, a[i]) - i++ - j++ - } - } - // Add the rest of a or b. One of them is empty now. - result = append(result, a[i:]...) - result = append(result, b[j:]...) - return result -} - -// queryIngestersExemplars queries the ingesters for exemplars. -func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.ExemplarQueryRequest) (*ingester_client.ExemplarQueryResponse, error) { - // Fetch exemplars from multiple ingesters in parallel, using the replicationSet - // to deal with consistency. - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { - client, err := d.ingesterPool.GetClientFor(ing.Addr) - if err != nil { - return nil, err - } - - resp, err := client.(ingester_client.IngesterClient).QueryExemplars(ctx, req) - d.ingesterQueries.WithLabelValues(ing.Addr).Inc() - if err != nil { - d.ingesterQueryFailures.WithLabelValues(ing.Addr).Inc() - return nil, err - } - - return resp, nil - }) - if err != nil { - return nil, err - } - - return mergeExemplarQueryResponses(results), nil -} - -func mergeExemplarQueryResponses(results []interface{}) *ingester_client.ExemplarQueryResponse { - var keys []string - exemplarResults := make(map[string]cortexpb.TimeSeries) - for _, result := range results { - r := result.(*ingester_client.ExemplarQueryResponse) - for _, ts := range r.Timeseries { - lbls := cortexpb.FromLabelAdaptersToLabels(ts.Labels).String() - e, ok := exemplarResults[lbls] - if !ok { - exemplarResults[lbls] = ts - keys = append(keys, lbls) - } else { - // Merge in any missing values from another ingesters exemplars for this series. - e.Exemplars = mergeExemplarSets(e.Exemplars, ts.Exemplars) - exemplarResults[lbls] = e - } - } - } - - // Query results from each ingester were sorted, but are not necessarily still sorted after merging. - sort.Strings(keys) - - result := make([]cortexpb.TimeSeries, len(exemplarResults)) - for i, k := range keys { - result[i] = exemplarResults[k] - } - - return &ingester_client.ExemplarQueryResponse{Timeseries: result} -} - -// queryIngesterStream queries the ingesters using the new streaming API. -func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) { - var ( - queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) - reqStats = stats.FromContext(ctx) - ) - - // Fetch samples from multiple ingesters - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { - client, err := d.ingesterPool.GetClientFor(ing.Addr) - if err != nil { - return nil, err - } - d.ingesterQueries.WithLabelValues(ing.Addr).Inc() - - stream, err := client.(ingester_client.IngesterClient).QueryStream(ctx, req) - if err != nil { - d.ingesterQueryFailures.WithLabelValues(ing.Addr).Inc() - return nil, err - } - defer stream.CloseSend() //nolint:errcheck - - result := &ingester_client.QueryStreamResponse{} - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } else if err != nil { - // Do not track a failure if the context was canceled. - if !grpcutil.IsGRPCContextCanceled(err) { - d.ingesterQueryFailures.WithLabelValues(ing.Addr).Inc() - } - - return nil, err - } - - // Enforce the max chunks limits. - if chunkLimitErr := queryLimiter.AddChunks(resp.ChunksCount()); chunkLimitErr != nil { - return nil, validation.LimitError(chunkLimitErr.Error()) - } - - for _, series := range resp.Chunkseries { - if limitErr := queryLimiter.AddSeries(series.Labels); limitErr != nil { - return nil, validation.LimitError(limitErr.Error()) - } - } - - if chunkBytesLimitErr := queryLimiter.AddChunkBytes(resp.ChunksSize()); chunkBytesLimitErr != nil { - return nil, validation.LimitError(chunkBytesLimitErr.Error()) - } - - for _, series := range resp.Timeseries { - if limitErr := queryLimiter.AddSeries(series.Labels); limitErr != nil { - return nil, validation.LimitError(limitErr.Error()) - } - } - - result.Chunkseries = append(result.Chunkseries, resp.Chunkseries...) - result.Timeseries = append(result.Timeseries, resp.Timeseries...) - } - return result, nil - }) - if err != nil { - return nil, err - } - - hashToChunkseries := map[string]ingester_client.TimeSeriesChunk{} - hashToTimeSeries := map[string]cortexpb.TimeSeries{} - - for _, result := range results { - response := result.(*ingester_client.QueryStreamResponse) - - // Parse any chunk series - for _, series := range response.Chunkseries { - key := ingester_client.LabelsToKeyString(cortexpb.FromLabelAdaptersToLabels(series.Labels)) - existing := hashToChunkseries[key] - existing.Labels = series.Labels - existing.Chunks = append(existing.Chunks, series.Chunks...) - hashToChunkseries[key] = existing - } - - // Parse any time series - for _, series := range response.Timeseries { - key := ingester_client.LabelsToKeyString(cortexpb.FromLabelAdaptersToLabels(series.Labels)) - existing := hashToTimeSeries[key] - existing.Labels = series.Labels - if existing.Samples == nil { - existing.Samples = series.Samples - } else { - existing.Samples = mergeSamples(existing.Samples, series.Samples) - } - hashToTimeSeries[key] = existing - } - } - - resp := &ingester_client.QueryStreamResponse{ - Chunkseries: make([]ingester_client.TimeSeriesChunk, 0, len(hashToChunkseries)), - Timeseries: make([]cortexpb.TimeSeries, 0, len(hashToTimeSeries)), - } - for _, series := range hashToChunkseries { - resp.Chunkseries = append(resp.Chunkseries, series) - } - for _, series := range hashToTimeSeries { - resp.Timeseries = append(resp.Timeseries, series) - } - - reqStats.AddFetchedSeries(uint64(len(resp.Chunkseries) + len(resp.Timeseries))) - reqStats.AddFetchedChunkBytes(uint64(resp.ChunksSize())) - - return resp, nil -} - -// Merges and dedupes two sorted slices with samples together. -func mergeSamples(a, b []cortexpb.Sample) []cortexpb.Sample { - if sameSamples(a, b) { - return a - } - - result := make([]cortexpb.Sample, 0, len(a)+len(b)) - i, j := 0, 0 - for i < len(a) && j < len(b) { - if a[i].TimestampMs < b[j].TimestampMs { - result = append(result, a[i]) - i++ - } else if a[i].TimestampMs > b[j].TimestampMs { - result = append(result, b[j]) - j++ - } else { - result = append(result, a[i]) - i++ - j++ - } - } - // Add the rest of a or b. One of them is empty now. - result = append(result, a[i:]...) - result = append(result, b[j:]...) - return result -} - -func sameSamples(a, b []cortexpb.Sample) bool { - if len(a) != len(b) { - return false - } - - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go deleted file mode 100644 index ee0992a3a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go +++ /dev/null @@ -1,107 +0,0 @@ -package flusher - -import ( - "context" - "flag" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/ingester" - "github.com/cortexproject/cortex/pkg/util/modules" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// Config for an Ingester. -type Config struct { - WALDir string `yaml:"wal_dir"` - ConcurrentFlushes int `yaml:"concurrent_flushes"` - FlushOpTimeout time.Duration `yaml:"flush_op_timeout"` - ExitAfterFlush bool `yaml:"exit_after_flush"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.WALDir, "flusher.wal-dir", "wal", "Directory to read WAL from (chunks storage engine only).") - f.IntVar(&cfg.ConcurrentFlushes, "flusher.concurrent-flushes", 50, "Number of concurrent goroutines flushing to storage (chunks storage engine only).") - f.DurationVar(&cfg.FlushOpTimeout, "flusher.flush-op-timeout", 2*time.Minute, "Timeout for individual flush operations (chunks storage engine only).") - f.BoolVar(&cfg.ExitAfterFlush, "flusher.exit-after-flush", true, "Stop Cortex after flush has finished. If false, Cortex process will keep running, doing nothing.") -} - -// Flusher is designed to be used as a job to flush the data from the WAL on disk. -// Flusher works with both chunks-based and blocks-based ingesters. -type Flusher struct { - services.Service - - cfg Config - ingesterConfig ingester.Config - chunkStore ingester.ChunkStore - limits *validation.Overrides - registerer prometheus.Registerer - logger log.Logger -} - -const ( - postFlushSleepTime = 1 * time.Minute -) - -// New constructs a new Flusher and flushes the data from the WAL. -// The returned Flusher has no other operations. -func New( - cfg Config, - ingesterConfig ingester.Config, - chunkStore ingester.ChunkStore, - limits *validation.Overrides, - registerer prometheus.Registerer, - logger log.Logger, -) (*Flusher, error) { - - // These are ignored by blocks-ingester, but that's fine. - ingesterConfig.WALConfig.Dir = cfg.WALDir - ingesterConfig.ConcurrentFlushes = cfg.ConcurrentFlushes - ingesterConfig.FlushOpTimeout = cfg.FlushOpTimeout - - f := &Flusher{ - cfg: cfg, - ingesterConfig: ingesterConfig, - chunkStore: chunkStore, - limits: limits, - registerer: registerer, - logger: logger, - } - f.Service = services.NewBasicService(nil, f.running, nil) - return f, nil -} - -func (f *Flusher) running(ctx context.Context) error { - ing, err := ingester.NewForFlusher(f.ingesterConfig, f.chunkStore, f.limits, f.registerer, f.logger) - if err != nil { - return errors.Wrap(err, "create ingester") - } - - if err := services.StartAndAwaitRunning(ctx, ing); err != nil { - return errors.Wrap(err, "start and await running ingester") - } - - ing.Flush() - - // Sleeping to give a chance to Prometheus - // to collect the metrics. - level.Info(f.logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String()) - time.Sleep(postFlushSleepTime) - - if err := services.StopAndAwaitTerminated(ctx, ing); err != nil { - return errors.Wrap(err, "stop and await terminated ingester") - } - - if f.cfg.ExitAfterFlush { - return modules.ErrStopProcess - } - - // Return normally -- this keep Cortex running. - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/config.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/config.go deleted file mode 100644 index feba79549..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/config.go +++ /dev/null @@ -1,73 +0,0 @@ -package frontend - -import ( - "flag" - "net/http" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/frontend/transport" - v1 "github.com/cortexproject/cortex/pkg/frontend/v1" - v2 "github.com/cortexproject/cortex/pkg/frontend/v2" - "github.com/cortexproject/cortex/pkg/util" -) - -// This struct combines several configuration options together to preserve backwards compatibility. -type CombinedFrontendConfig struct { - Handler transport.HandlerConfig `yaml:",inline"` - FrontendV1 v1.Config `yaml:",inline"` - FrontendV2 v2.Config `yaml:",inline"` - - DownstreamURL string `yaml:"downstream_url"` -} - -func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { - cfg.Handler.RegisterFlags(f) - cfg.FrontendV1.RegisterFlags(f) - cfg.FrontendV2.RegisterFlags(f) - - f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") -} - -// InitFrontend initializes frontend (either V1 -- without scheduler, or V2 -- with scheduler) or no frontend at -// all if downstream Prometheus URL is used instead. -// -// Returned RoundTripper can be wrapped in more round-tripper middlewares, and then eventually registered -// into HTTP server using the Handler from this package. Returned RoundTripper is always non-nil -// (if there are no errors), and it uses the returned frontend (if any). -func InitFrontend(cfg CombinedFrontendConfig, limits v1.Limits, grpcListenPort int, log log.Logger, reg prometheus.Registerer) (http.RoundTripper, *v1.Frontend, *v2.Frontend, error) { - switch { - case cfg.DownstreamURL != "": - // If the user has specified a downstream Prometheus, then we should use that. - rt, err := NewDownstreamRoundTripper(cfg.DownstreamURL, http.DefaultTransport) - return rt, nil, nil, err - - case cfg.FrontendV2.SchedulerAddress != "": - // If query-scheduler address is configured, use Frontend. - if cfg.FrontendV2.Addr == "" { - addr, err := util.GetFirstAddressOf(cfg.FrontendV2.InfNames) - if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to get frontend address") - } - - cfg.FrontendV2.Addr = addr - } - - if cfg.FrontendV2.Port == 0 { - cfg.FrontendV2.Port = grpcListenPort - } - - fr, err := v2.NewFrontend(cfg.FrontendV2, log, reg) - return transport.AdaptGrpcRoundTripperToHTTPRoundTripper(fr), nil, fr, err - - default: - // No scheduler = use original frontend. - fr, err := v1.New(cfg.FrontendV1, limits, log, reg) - if err != nil { - return nil, nil, nil, err - } - return transport.AdaptGrpcRoundTripperToHTTPRoundTripper(fr), fr, nil, nil - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go deleted file mode 100644 index d52ced819..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go +++ /dev/null @@ -1,41 +0,0 @@ -package frontend - -import ( - "net/http" - "net/url" - "path" - - "github.com/opentracing/opentracing-go" -) - -// RoundTripper that forwards requests to downstream URL. -type downstreamRoundTripper struct { - downstreamURL *url.URL - transport http.RoundTripper -} - -func NewDownstreamRoundTripper(downstreamURL string, transport http.RoundTripper) (http.RoundTripper, error) { - u, err := url.Parse(downstreamURL) - if err != nil { - return nil, err - } - - return &downstreamRoundTripper{downstreamURL: u, transport: transport}, nil -} - -func (d downstreamRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(r.Context()) - if tracer != nil && span != nil { - carrier := opentracing.HTTPHeadersCarrier(r.Header) - err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) - if err != nil { - return nil, err - } - } - - r.URL.Scheme = d.downstreamURL.Scheme - r.URL.Host = d.downstreamURL.Host - r.URL.Path = path.Join(d.downstreamURL.Path, r.URL.Path) - r.Host = "" - return d.transport.RoundTrip(r) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go deleted file mode 100644 index a3b3c9d20..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go +++ /dev/null @@ -1,259 +0,0 @@ -package transport - -import ( - "bytes" - "context" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "syscall" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/httpgrpc/server" - - querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -const ( - // StatusClientClosedRequest is the status code for when a client request cancellation of an http request - StatusClientClosedRequest = 499 - ServiceTimingHeaderName = "Server-Timing" -) - -var ( - errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, context.Canceled.Error()) - errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, context.DeadlineExceeded.Error()) - errRequestEntityTooLarge = httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "http: request body too large") -) - -// Config for a Handler. -type HandlerConfig struct { - LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` - MaxBodySize int64 `yaml:"max_body_size"` - QueryStatsEnabled bool `yaml:"query_stats_enabled"` -} - -func (cfg *HandlerConfig) RegisterFlags(f *flag.FlagSet) { - f.DurationVar(&cfg.LogQueriesLongerThan, "frontend.log-queries-longer-than", 0, "Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries.") - f.Int64Var(&cfg.MaxBodySize, "frontend.max-body-size", 10*1024*1024, "Max body size for downstream prometheus.") - f.BoolVar(&cfg.QueryStatsEnabled, "frontend.query-stats-enabled", false, "True to enable query statistics tracking. When enabled, a message with some statistics is logged for every query.") -} - -// Handler accepts queries and forwards them to RoundTripper. It can log slow queries, -// but all other logic is inside the RoundTripper. -type Handler struct { - cfg HandlerConfig - log log.Logger - roundTripper http.RoundTripper - - // Metrics. - querySeconds *prometheus.CounterVec - querySeries *prometheus.CounterVec - queryBytes *prometheus.CounterVec - activeUsers *util.ActiveUsersCleanupService -} - -// NewHandler creates a new frontend handler. -func NewHandler(cfg HandlerConfig, roundTripper http.RoundTripper, log log.Logger, reg prometheus.Registerer) http.Handler { - h := &Handler{ - cfg: cfg, - log: log, - roundTripper: roundTripper, - } - - if cfg.QueryStatsEnabled { - h.querySeconds = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_query_seconds_total", - Help: "Total amount of wall clock time spend processing queries.", - }, []string{"user"}) - - h.querySeries = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_query_fetched_series_total", - Help: "Number of series fetched to execute a query.", - }, []string{"user"}) - - h.queryBytes = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_query_fetched_chunks_bytes_total", - Help: "Size of all chunks fetched to execute a query in bytes.", - }, []string{"user"}) - - h.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(func(user string) { - h.querySeconds.DeleteLabelValues(user) - h.querySeries.DeleteLabelValues(user) - h.queryBytes.DeleteLabelValues(user) - }) - // If cleaner stops or fail, we will simply not clean the metrics for inactive users. - _ = h.activeUsers.StartAsync(context.Background()) - } - - return h -} - -func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var ( - stats *querier_stats.Stats - queryString url.Values - ) - - // Initialise the stats in the context and make sure it's propagated - // down the request chain. - if f.cfg.QueryStatsEnabled { - var ctx context.Context - stats, ctx = querier_stats.ContextWithEmptyStats(r.Context()) - r = r.WithContext(ctx) - } - - defer func() { - _ = r.Body.Close() - }() - - // Buffer the body for later use to track slow queries. - var buf bytes.Buffer - r.Body = http.MaxBytesReader(w, r.Body, f.cfg.MaxBodySize) - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &buf)) - - startTime := time.Now() - resp, err := f.roundTripper.RoundTrip(r) - queryResponseTime := time.Since(startTime) - - if err != nil { - writeError(w, err) - return - } - - hs := w.Header() - for h, vs := range resp.Header { - hs[h] = vs - } - - if f.cfg.QueryStatsEnabled { - writeServiceTimingHeader(queryResponseTime, hs, stats) - } - - w.WriteHeader(resp.StatusCode) - // log copy response body error so that we will know even though success response code returned - bytesCopied, err := io.Copy(w, resp.Body) - if err != nil && !errors.Is(err, syscall.EPIPE) { - level.Error(util_log.WithContext(r.Context(), f.log)).Log("msg", "write response body error", "bytesCopied", bytesCopied, "err", err) - } - - // Check whether we should parse the query string. - shouldReportSlowQuery := f.cfg.LogQueriesLongerThan != 0 && queryResponseTime > f.cfg.LogQueriesLongerThan - if shouldReportSlowQuery || f.cfg.QueryStatsEnabled { - queryString = f.parseRequestQueryString(r, buf) - } - - if shouldReportSlowQuery { - f.reportSlowQuery(r, queryString, queryResponseTime) - } - if f.cfg.QueryStatsEnabled { - f.reportQueryStats(r, queryString, queryResponseTime, stats) - } -} - -// reportSlowQuery reports slow queries. -func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, queryResponseTime time.Duration) { - logMessage := append([]interface{}{ - "msg", "slow query detected", - "method", r.Method, - "host", r.Host, - "path", r.URL.Path, - "time_taken", queryResponseTime.String(), - }, formatQueryString(queryString)...) - - level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) -} - -func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, queryResponseTime time.Duration, stats *querier_stats.Stats) { - tenantIDs, err := tenant.TenantIDs(r.Context()) - if err != nil { - return - } - userID := tenant.JoinTenantIDs(tenantIDs) - wallTime := stats.LoadWallTime() - numSeries := stats.LoadFetchedSeries() - numBytes := stats.LoadFetchedChunkBytes() - - // Track stats. - f.querySeconds.WithLabelValues(userID).Add(wallTime.Seconds()) - f.querySeries.WithLabelValues(userID).Add(float64(numSeries)) - f.queryBytes.WithLabelValues(userID).Add(float64(numBytes)) - f.activeUsers.UpdateUserTimestamp(userID, time.Now()) - - // Log stats. - logMessage := append([]interface{}{ - "msg", "query stats", - "component", "query-frontend", - "method", r.Method, - "path", r.URL.Path, - "response_time", queryResponseTime, - "query_wall_time_seconds", wallTime.Seconds(), - "fetched_series_count", numSeries, - "fetched_chunks_bytes", numBytes, - }, formatQueryString(queryString)...) - - level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) -} - -func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) url.Values { - // Use previously buffered body. - r.Body = ioutil.NopCloser(&bodyBuf) - - // Ensure the form has been parsed so all the parameters are present - err := r.ParseForm() - if err != nil { - level.Warn(util_log.WithContext(r.Context(), f.log)).Log("msg", "unable to parse request form", "err", err) - return nil - } - - return r.Form -} - -func formatQueryString(queryString url.Values) (fields []interface{}) { - for k, v := range queryString { - fields = append(fields, fmt.Sprintf("param_%s", k), strings.Join(v, ",")) - } - return fields -} - -func writeError(w http.ResponseWriter, err error) { - switch err { - case context.Canceled: - err = errCanceled - case context.DeadlineExceeded: - err = errDeadlineExceeded - default: - if util.IsRequestBodyTooLarge(err) { - err = errRequestEntityTooLarge - } - } - server.WriteError(w, err) -} - -func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.Stats) { - if stats != nil { - parts := make([]string, 0) - parts = append(parts, statsValue("querier_wall_time", stats.LoadWallTime())) - parts = append(parts, statsValue("response_time", queryResponseTime)) - headers.Set(ServiceTimingHeaderName, strings.Join(parts, ", ")) - } -} - -func statsValue(name string, d time.Duration) string { - durationInMs := strconv.FormatFloat(float64(d)/float64(time.Millisecond), 'f', -1, 64) - return name + ";dur=" + durationInMs -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go deleted file mode 100644 index d9ba57ccb..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go +++ /dev/null @@ -1,58 +0,0 @@ -package transport - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "net/http" - - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/httpgrpc/server" -) - -// GrpcRoundTripper is similar to http.RoundTripper, but works with HTTP requests converted to protobuf messages. -type GrpcRoundTripper interface { - RoundTripGRPC(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) -} - -func AdaptGrpcRoundTripperToHTTPRoundTripper(r GrpcRoundTripper) http.RoundTripper { - return &grpcRoundTripperAdapter{roundTripper: r} -} - -// This adapter wraps GrpcRoundTripper and converted it into http.RoundTripper -type grpcRoundTripperAdapter struct { - roundTripper GrpcRoundTripper -} - -type buffer struct { - buff []byte - io.ReadCloser -} - -func (b *buffer) Bytes() []byte { - return b.buff -} - -func (a *grpcRoundTripperAdapter) RoundTrip(r *http.Request) (*http.Response, error) { - req, err := server.HTTPRequest(r) - if err != nil { - return nil, err - } - - resp, err := a.roundTripper.RoundTripGRPC(r.Context(), req) - if err != nil { - return nil, err - } - - httpResp := &http.Response{ - StatusCode: int(resp.Code), - Body: &buffer{buff: resp.Body, ReadCloser: ioutil.NopCloser(bytes.NewReader(resp.Body))}, - Header: http.Header{}, - ContentLength: int64(len(resp.Body)), - } - for _, h := range resp.Headers { - httpResp.Header[h.Key] = h.Values - } - return httpResp, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go deleted file mode 100644 index 93eaf4b73..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go +++ /dev/null @@ -1,353 +0,0 @@ -package v1 - -import ( - "context" - "flag" - "fmt" - "net/http" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - - "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" - "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/scheduler/queue" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests") -) - -// Config for a Frontend. -type Config struct { - MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` - QuerierForgetDelay time.Duration `yaml:"querier_forget_delay"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxOutstandingPerTenant, "querier.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per frontend; requests beyond this error with HTTP 429.") - f.DurationVar(&cfg.QuerierForgetDelay, "query-frontend.querier-forget-delay", 0, "If a querier disconnects without sending notification about graceful shutdown, the query-frontend will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.") -} - -type Limits interface { - // Returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. - MaxQueriersPerUser(user string) int -} - -// Frontend queues HTTP requests, dispatches them to backends, and handles retries -// for requests which failed. -type Frontend struct { - services.Service - - cfg Config - log log.Logger - limits Limits - - requestQueue *queue.RequestQueue - activeUsers *util.ActiveUsersCleanupService - - // Subservices manager. - subservices *services.Manager - subservicesWatcher *services.FailureWatcher - - // Metrics. - queueLength *prometheus.GaugeVec - discardedRequests *prometheus.CounterVec - numClients prometheus.GaugeFunc - queueDuration prometheus.Histogram -} - -type request struct { - enqueueTime time.Time - queueSpan opentracing.Span - originalCtx context.Context - - request *httpgrpc.HTTPRequest - err chan error - response chan *httpgrpc.HTTPResponse -} - -// New creates a new frontend. Frontend implements service, and must be started and stopped. -func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer) (*Frontend, error) { - f := &Frontend{ - cfg: cfg, - log: log, - limits: limits, - queueLength: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_query_frontend_queue_length", - Help: "Number of queries in the queue.", - }, []string{"user"}), - discardedRequests: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_query_frontend_discarded_requests_total", - Help: "Total number of query requests discarded.", - }, []string{"user"}), - queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_query_frontend_queue_duration_seconds", - Help: "Time spend by requests queued.", - Buckets: prometheus.DefBuckets, - }), - } - - f.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, f.queueLength, f.discardedRequests) - f.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(f.cleanupInactiveUserMetrics) - - var err error - f.subservices, err = services.NewManager(f.requestQueue, f.activeUsers) - if err != nil { - return nil, err - } - - f.numClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_query_frontend_connected_clients", - Help: "Number of worker clients currently connected to the frontend.", - }, f.requestQueue.GetConnectedQuerierWorkersMetric) - - f.Service = services.NewBasicService(f.starting, f.running, f.stopping) - return f, nil -} - -func (f *Frontend) starting(ctx context.Context) error { - f.subservicesWatcher.WatchManager(f.subservices) - - if err := services.StartManagerAndAwaitHealthy(ctx, f.subservices); err != nil { - return errors.Wrap(err, "unable to start frontend subservices") - } - - return nil -} - -func (f *Frontend) running(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return nil - case err := <-f.subservicesWatcher.Chan(): - return errors.Wrap(err, "frontend subservice failed") - } - } -} - -func (f *Frontend) stopping(_ error) error { - // This will also stop the requests queue, which stop accepting new requests and errors out any pending requests. - return services.StopManagerAndAwaitStopped(context.Background(), f.subservices) -} - -func (f *Frontend) cleanupInactiveUserMetrics(user string) { - f.queueLength.DeleteLabelValues(user) - f.discardedRequests.DeleteLabelValues(user) -} - -// RoundTripGRPC round trips a proto (instead of a HTTP request). -func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - // Propagate trace context in gRPC too - this will be ignored if using HTTP. - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) - if tracer != nil && span != nil { - carrier := (*httpgrpcutil.HttpgrpcHeadersCarrier)(req) - err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) - if err != nil { - return nil, err - } - } - - request := request{ - request: req, - originalCtx: ctx, - - // Buffer of 1 to ensure response can be written by the server side - // of the Process stream, even if this goroutine goes away due to - // client context cancellation. - err: make(chan error, 1), - response: make(chan *httpgrpc.HTTPResponse, 1), - } - - if err := f.queueRequest(ctx, &request); err != nil { - return nil, err - } - - select { - case <-ctx.Done(): - return nil, ctx.Err() - - case resp := <-request.response: - return resp, nil - - case err := <-request.err: - return nil, err - } -} - -// Process allows backends to pull requests from the frontend. -func (f *Frontend) Process(server frontendv1pb.Frontend_ProcessServer) error { - querierID, err := getQuerierID(server) - if err != nil { - return err - } - - f.requestQueue.RegisterQuerierConnection(querierID) - defer f.requestQueue.UnregisterQuerierConnection(querierID) - - // If the downstream request(from querier -> frontend) is cancelled, - // we need to ping the condition variable to unblock getNextRequestForQuerier. - // Ideally we'd have ctx aware condition variables... - go func() { - <-server.Context().Done() - f.requestQueue.QuerierDisconnecting() - }() - - lastUserIndex := queue.FirstUser() - - for { - reqWrapper, idx, err := f.requestQueue.GetNextRequestForQuerier(server.Context(), lastUserIndex, querierID) - if err != nil { - return err - } - lastUserIndex = idx - - req := reqWrapper.(*request) - - f.queueDuration.Observe(time.Since(req.enqueueTime).Seconds()) - req.queueSpan.Finish() - - /* - We want to dequeue the next unexpired request from the chosen tenant queue. - The chance of choosing a particular tenant for dequeueing is (1/active_tenants). - This is problematic under load, especially with other middleware enabled such as - querier.split-by-interval, where one request may fan out into many. - If expired requests aren't exhausted before checking another tenant, it would take - n_active_tenants * n_expired_requests_at_front_of_queue requests being processed - before an active request was handled for the tenant in question. - If this tenant meanwhile continued to queue requests, - it's possible that it's own queue would perpetually contain only expired requests. - */ - if req.originalCtx.Err() != nil { - lastUserIndex = lastUserIndex.ReuseLastUser() - continue - } - - // Handle the stream sending & receiving on a goroutine so we can - // monitoring the contexts in a select and cancel things appropriately. - resps := make(chan *frontendv1pb.ClientToFrontend, 1) - errs := make(chan error, 1) - go func() { - err = server.Send(&frontendv1pb.FrontendToClient{ - Type: frontendv1pb.HTTP_REQUEST, - HttpRequest: req.request, - StatsEnabled: stats.IsEnabled(req.originalCtx), - }) - if err != nil { - errs <- err - return - } - - resp, err := server.Recv() - if err != nil { - errs <- err - return - } - - resps <- resp - }() - - select { - // If the upstream request is cancelled, we need to cancel the - // downstream req. Only way we can do that is to close the stream. - // The worker client is expecting this semantics. - case <-req.originalCtx.Done(): - return req.originalCtx.Err() - - // Is there was an error handling this request due to network IO, - // then error out this upstream request _and_ stream. - case err := <-errs: - req.err <- err - return err - - // Happy path: merge the stats and propagate the response. - case resp := <-resps: - if stats.ShouldTrackHTTPGRPCResponse(resp.HttpResponse) { - stats := stats.FromContext(req.originalCtx) - stats.Merge(resp.Stats) // Safe if stats is nil. - } - - req.response <- resp.HttpResponse - } - } -} - -func (f *Frontend) NotifyClientShutdown(_ context.Context, req *frontendv1pb.NotifyClientShutdownRequest) (*frontendv1pb.NotifyClientShutdownResponse, error) { - level.Info(f.log).Log("msg", "received shutdown notification from querier", "querier", req.GetClientID()) - f.requestQueue.NotifyQuerierShutdown(req.GetClientID()) - - return &frontendv1pb.NotifyClientShutdownResponse{}, nil -} - -func getQuerierID(server frontendv1pb.Frontend_ProcessServer) (string, error) { - err := server.Send(&frontendv1pb.FrontendToClient{ - Type: frontendv1pb.GET_ID, - // Old queriers don't support GET_ID, and will try to use the request. - // To avoid confusing them, include dummy request. - HttpRequest: &httpgrpc.HTTPRequest{ - Method: "GET", - Url: "/invalid_request_sent_by_frontend", - }, - }) - - if err != nil { - return "", err - } - - resp, err := server.Recv() - - // Old queriers will return empty string, which is fine. All old queriers will be - // treated as single querier with lot of connections. - // (Note: if resp is nil, GetClientID() returns "") - return resp.GetClientID(), err -} - -func (f *Frontend) queueRequest(ctx context.Context, req *request) error { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return err - } - - now := time.Now() - req.enqueueTime = now - req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued") - - // aggregate the max queriers limit in the case of a multi tenant query - maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, f.limits.MaxQueriersPerUser) - - joinedTenantID := tenant.JoinTenantIDs(tenantIDs) - f.activeUsers.UpdateUserTimestamp(joinedTenantID, now) - - err = f.requestQueue.EnqueueRequest(joinedTenantID, req, maxQueriers, nil) - if err == queue.ErrTooManyRequests { - return errTooManyRequest - } - return err -} - -// CheckReady determines if the query frontend is ready. Function parameters/return -// chosen to match the same method in the ingester -func (f *Frontend) CheckReady(_ context.Context) error { - // if we have more than one querier connected we will consider ourselves ready - connectedClients := f.requestQueue.GetConnectedQuerierWorkersMetric() - if connectedClients > 0 { - return nil - } - - msg := fmt.Sprintf("not ready: number of queriers connected to query-frontend is %d", int64(connectedClients)) - level.Info(f.log).Log("msg", msg) - return errors.New(msg) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go deleted file mode 100644 index b8480261e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go +++ /dev/null @@ -1,1446 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: frontend.proto - -// Protobuf package should not be changed when moving around go packages -// in order to not break backward compatibility. - -package frontendv1pb - -import ( - context "context" - fmt "fmt" - stats "github.com/cortexproject/cortex/pkg/querier/stats" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - httpgrpc "github.com/weaveworks/common/httpgrpc" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Type int32 - -const ( - HTTP_REQUEST Type = 0 - GET_ID Type = 1 -) - -var Type_name = map[int32]string{ - 0: "HTTP_REQUEST", - 1: "GET_ID", -} - -var Type_value = map[string]int32{ - "HTTP_REQUEST": 0, - "GET_ID": 1, -} - -func (Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_eca3873955a29cfe, []int{0} -} - -type FrontendToClient struct { - HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,1,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` - Type Type `protobuf:"varint,2,opt,name=type,proto3,enum=frontend.Type" json:"type,omitempty"` - // Whether query statistics tracking should be enabled. The response will include - // statistics only when this option is enabled. - StatsEnabled bool `protobuf:"varint,3,opt,name=statsEnabled,proto3" json:"statsEnabled,omitempty"` -} - -func (m *FrontendToClient) Reset() { *m = FrontendToClient{} } -func (*FrontendToClient) ProtoMessage() {} -func (*FrontendToClient) Descriptor() ([]byte, []int) { - return fileDescriptor_eca3873955a29cfe, []int{0} -} -func (m *FrontendToClient) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FrontendToClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FrontendToClient.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FrontendToClient) XXX_Merge(src proto.Message) { - xxx_messageInfo_FrontendToClient.Merge(m, src) -} -func (m *FrontendToClient) XXX_Size() int { - return m.Size() -} -func (m *FrontendToClient) XXX_DiscardUnknown() { - xxx_messageInfo_FrontendToClient.DiscardUnknown(m) -} - -var xxx_messageInfo_FrontendToClient proto.InternalMessageInfo - -func (m *FrontendToClient) GetHttpRequest() *httpgrpc.HTTPRequest { - if m != nil { - return m.HttpRequest - } - return nil -} - -func (m *FrontendToClient) GetType() Type { - if m != nil { - return m.Type - } - return HTTP_REQUEST -} - -func (m *FrontendToClient) GetStatsEnabled() bool { - if m != nil { - return m.StatsEnabled - } - return false -} - -type ClientToFrontend struct { - HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,1,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` - ClientID string `protobuf:"bytes,2,opt,name=clientID,proto3" json:"clientID,omitempty"` - Stats *stats.Stats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *ClientToFrontend) Reset() { *m = ClientToFrontend{} } -func (*ClientToFrontend) ProtoMessage() {} -func (*ClientToFrontend) Descriptor() ([]byte, []int) { - return fileDescriptor_eca3873955a29cfe, []int{1} -} -func (m *ClientToFrontend) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientToFrontend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientToFrontend.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientToFrontend) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientToFrontend.Merge(m, src) -} -func (m *ClientToFrontend) XXX_Size() int { - return m.Size() -} -func (m *ClientToFrontend) XXX_DiscardUnknown() { - xxx_messageInfo_ClientToFrontend.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientToFrontend proto.InternalMessageInfo - -func (m *ClientToFrontend) GetHttpResponse() *httpgrpc.HTTPResponse { - if m != nil { - return m.HttpResponse - } - return nil -} - -func (m *ClientToFrontend) GetClientID() string { - if m != nil { - return m.ClientID - } - return "" -} - -func (m *ClientToFrontend) GetStats() *stats.Stats { - if m != nil { - return m.Stats - } - return nil -} - -type NotifyClientShutdownRequest struct { - ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` -} - -func (m *NotifyClientShutdownRequest) Reset() { *m = NotifyClientShutdownRequest{} } -func (*NotifyClientShutdownRequest) ProtoMessage() {} -func (*NotifyClientShutdownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_eca3873955a29cfe, []int{2} -} -func (m *NotifyClientShutdownRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NotifyClientShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NotifyClientShutdownRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NotifyClientShutdownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NotifyClientShutdownRequest.Merge(m, src) -} -func (m *NotifyClientShutdownRequest) XXX_Size() int { - return m.Size() -} -func (m *NotifyClientShutdownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NotifyClientShutdownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NotifyClientShutdownRequest proto.InternalMessageInfo - -func (m *NotifyClientShutdownRequest) GetClientID() string { - if m != nil { - return m.ClientID - } - return "" -} - -type NotifyClientShutdownResponse struct { -} - -func (m *NotifyClientShutdownResponse) Reset() { *m = NotifyClientShutdownResponse{} } -func (*NotifyClientShutdownResponse) ProtoMessage() {} -func (*NotifyClientShutdownResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_eca3873955a29cfe, []int{3} -} -func (m *NotifyClientShutdownResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NotifyClientShutdownResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NotifyClientShutdownResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NotifyClientShutdownResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NotifyClientShutdownResponse.Merge(m, src) -} -func (m *NotifyClientShutdownResponse) XXX_Size() int { - return m.Size() -} -func (m *NotifyClientShutdownResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NotifyClientShutdownResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NotifyClientShutdownResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("frontend.Type", Type_name, Type_value) - proto.RegisterType((*FrontendToClient)(nil), "frontend.FrontendToClient") - proto.RegisterType((*ClientToFrontend)(nil), "frontend.ClientToFrontend") - proto.RegisterType((*NotifyClientShutdownRequest)(nil), "frontend.NotifyClientShutdownRequest") - proto.RegisterType((*NotifyClientShutdownResponse)(nil), "frontend.NotifyClientShutdownResponse") -} - -func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } - -var fileDescriptor_eca3873955a29cfe = []byte{ - // 496 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0x3d, 0x50, 0x4a, 0x98, 0x44, 0x91, 0xb5, 0x02, 0x14, 0x19, 0xb4, 0x8a, 0x2c, 0x40, - 0x11, 0x12, 0x36, 0x04, 0x24, 0x04, 0x12, 0x97, 0xd2, 0x50, 0x7a, 0x41, 0xc5, 0x31, 0x17, 0x2e, - 0x55, 0xec, 0x6c, 0x9c, 0xd0, 0xc6, 0xeb, 0xda, 0xeb, 0x86, 0xdc, 0x78, 0x02, 0x84, 0xc4, 0x4b, - 0xf0, 0x0c, 0x3c, 0x01, 0xc7, 0x1c, 0x7b, 0x24, 0xce, 0x85, 0x63, 0x1f, 0x01, 0x65, 0xd7, 0x71, - 0x9d, 0xa8, 0x82, 0xcb, 0x6a, 0xc7, 0x33, 0xff, 0xcc, 0x37, 0xbf, 0x17, 0xeb, 0x83, 0x98, 0x87, - 0x82, 0x85, 0x7d, 0x2b, 0x8a, 0xb9, 0xe0, 0xa4, 0xb2, 0x8a, 0x8d, 0x47, 0xc1, 0x48, 0x0c, 0x53, - 0xcf, 0xf2, 0xf9, 0xd8, 0x0e, 0x78, 0xc0, 0x6d, 0x59, 0xe0, 0xa5, 0x03, 0x19, 0xc9, 0x40, 0xde, - 0x94, 0xd0, 0x78, 0x56, 0x2a, 0x9f, 0xb0, 0xde, 0x29, 0x9b, 0xf0, 0xf8, 0x28, 0xb1, 0x7d, 0x3e, - 0x1e, 0xf3, 0xd0, 0x1e, 0x0a, 0x11, 0x05, 0x71, 0xe4, 0x17, 0x97, 0x5c, 0xf5, 0xaa, 0xa4, 0xf2, - 0x79, 0x2c, 0xd8, 0xe7, 0x28, 0xe6, 0x9f, 0x98, 0x2f, 0xf2, 0xc8, 0x8e, 0x8e, 0x02, 0xfb, 0x24, - 0x65, 0xf1, 0x88, 0xc5, 0x76, 0x22, 0x7a, 0x22, 0x51, 0xa7, 0x92, 0x9b, 0xdf, 0x01, 0xf5, 0x37, - 0x39, 0xb0, 0xcb, 0x5f, 0x1f, 0x8f, 0x58, 0x28, 0xc8, 0x73, 0xac, 0x2e, 0xa7, 0x38, 0xec, 0x24, - 0x65, 0x89, 0x68, 0x40, 0x13, 0x5a, 0xd5, 0xf6, 0x2d, 0xab, 0x98, 0xfc, 0xd6, 0x75, 0x0f, 0xf2, - 0xa4, 0x53, 0xae, 0x24, 0x26, 0x6e, 0x89, 0x69, 0xc4, 0x1a, 0x57, 0x9a, 0xd0, 0xaa, 0xb7, 0xeb, - 0x56, 0x61, 0x8d, 0x3b, 0x8d, 0x98, 0x23, 0x73, 0xc4, 0xc4, 0x9a, 0x04, 0xe8, 0x84, 0x3d, 0xef, - 0x98, 0xf5, 0x1b, 0x57, 0x9b, 0xd0, 0xaa, 0x38, 0x6b, 0xdf, 0xcc, 0xaf, 0x80, 0xba, 0x62, 0x71, - 0xf9, 0x8a, 0x8e, 0xbc, 0xc4, 0x9a, 0x9a, 0x95, 0x44, 0x3c, 0x4c, 0x58, 0x8e, 0x75, 0x7b, 0x13, - 0x4b, 0x65, 0x9d, 0xb5, 0x5a, 0x62, 0x60, 0xc5, 0x97, 0xfd, 0xf6, 0x77, 0x25, 0xdc, 0x0d, 0xa7, - 0x88, 0x89, 0x89, 0xd7, 0xe4, 0x70, 0x49, 0x52, 0x6d, 0xd7, 0x2c, 0xe5, 0x4f, 0x77, 0x79, 0x3a, - 0x2a, 0x65, 0xbe, 0xc0, 0x3b, 0xef, 0xb8, 0x18, 0x0d, 0xa6, 0x8a, 0xaa, 0x3b, 0x4c, 0x45, 0x9f, - 0x4f, 0xc2, 0xd5, 0xde, 0xe5, 0xf6, 0xb0, 0xde, 0xde, 0xa4, 0x78, 0xf7, 0x72, 0xa9, 0x42, 0x7b, - 0x78, 0x0f, 0xb7, 0x96, 0xee, 0x10, 0x1d, 0x6b, 0xcb, 0x05, 0x0e, 0x9d, 0xce, 0xfb, 0x0f, 0x9d, - 0xae, 0xab, 0x6b, 0x04, 0x71, 0x7b, 0xaf, 0xe3, 0x1e, 0xee, 0xef, 0xea, 0xd0, 0xfe, 0x09, 0x58, - 0x29, 0x9c, 0xd8, 0xc3, 0xeb, 0x07, 0x31, 0xf7, 0x59, 0x92, 0x10, 0xe3, 0xc2, 0xe3, 0x4d, 0xc3, - 0x8c, 0x52, 0x6e, 0xf3, 0x17, 0x9b, 0x5a, 0x0b, 0x1e, 0x03, 0x61, 0x78, 0xf3, 0x32, 0x36, 0x72, - 0xff, 0x42, 0xf9, 0x8f, 0xb5, 0x8d, 0x07, 0xff, 0x2b, 0x53, 0x2b, 0xee, 0xec, 0xcc, 0xe6, 0x54, - 0x3b, 0x9b, 0x53, 0xed, 0x7c, 0x4e, 0xe1, 0x4b, 0x46, 0xe1, 0x47, 0x46, 0xe1, 0x57, 0x46, 0x61, - 0x96, 0x51, 0xf8, 0x9d, 0x51, 0xf8, 0x93, 0x51, 0xed, 0x3c, 0xa3, 0xf0, 0x6d, 0x41, 0xb5, 0xd9, - 0x82, 0x6a, 0x67, 0x0b, 0xaa, 0x7d, 0xac, 0xad, 0x9a, 0x9f, 0x3e, 0x89, 0x3c, 0x6f, 0x5b, 0xbe, - 0xd7, 0xa7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xe0, 0x35, 0xe6, 0x6f, 0x03, 0x00, 0x00, -} - -func (x Type) String() string { - s, ok := Type_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *FrontendToClient) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*FrontendToClient) - if !ok { - that2, ok := that.(FrontendToClient) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.HttpRequest.Equal(that1.HttpRequest) { - return false - } - if this.Type != that1.Type { - return false - } - if this.StatsEnabled != that1.StatsEnabled { - return false - } - return true -} -func (this *ClientToFrontend) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ClientToFrontend) - if !ok { - that2, ok := that.(ClientToFrontend) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.HttpResponse.Equal(that1.HttpResponse) { - return false - } - if this.ClientID != that1.ClientID { - return false - } - if !this.Stats.Equal(that1.Stats) { - return false - } - return true -} -func (this *NotifyClientShutdownRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*NotifyClientShutdownRequest) - if !ok { - that2, ok := that.(NotifyClientShutdownRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ClientID != that1.ClientID { - return false - } - return true -} -func (this *NotifyClientShutdownResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*NotifyClientShutdownResponse) - if !ok { - that2, ok := that.(NotifyClientShutdownResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *FrontendToClient) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&frontendv1pb.FrontendToClient{") - if this.HttpRequest != nil { - s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") - } - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "StatsEnabled: "+fmt.Sprintf("%#v", this.StatsEnabled)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ClientToFrontend) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&frontendv1pb.ClientToFrontend{") - if this.HttpResponse != nil { - s = append(s, "HttpResponse: "+fmt.Sprintf("%#v", this.HttpResponse)+",\n") - } - s = append(s, "ClientID: "+fmt.Sprintf("%#v", this.ClientID)+",\n") - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *NotifyClientShutdownRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&frontendv1pb.NotifyClientShutdownRequest{") - s = append(s, "ClientID: "+fmt.Sprintf("%#v", this.ClientID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *NotifyClientShutdownResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&frontendv1pb.NotifyClientShutdownResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringFrontend(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// FrontendClient is the client API for Frontend service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type FrontendClient interface { - // After calling this method, client enters a loop, in which it waits for - // a "FrontendToClient" message and replies with single "ClientToFrontend" message. - Process(ctx context.Context, opts ...grpc.CallOption) (Frontend_ProcessClient, error) - // The client notifies the query-frontend that it started a graceful shutdown. - NotifyClientShutdown(ctx context.Context, in *NotifyClientShutdownRequest, opts ...grpc.CallOption) (*NotifyClientShutdownResponse, error) -} - -type frontendClient struct { - cc *grpc.ClientConn -} - -func NewFrontendClient(cc *grpc.ClientConn) FrontendClient { - return &frontendClient{cc} -} - -func (c *frontendClient) Process(ctx context.Context, opts ...grpc.CallOption) (Frontend_ProcessClient, error) { - stream, err := c.cc.NewStream(ctx, &_Frontend_serviceDesc.Streams[0], "/frontend.Frontend/Process", opts...) - if err != nil { - return nil, err - } - x := &frontendProcessClient{stream} - return x, nil -} - -type Frontend_ProcessClient interface { - Send(*ClientToFrontend) error - Recv() (*FrontendToClient, error) - grpc.ClientStream -} - -type frontendProcessClient struct { - grpc.ClientStream -} - -func (x *frontendProcessClient) Send(m *ClientToFrontend) error { - return x.ClientStream.SendMsg(m) -} - -func (x *frontendProcessClient) Recv() (*FrontendToClient, error) { - m := new(FrontendToClient) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *frontendClient) NotifyClientShutdown(ctx context.Context, in *NotifyClientShutdownRequest, opts ...grpc.CallOption) (*NotifyClientShutdownResponse, error) { - out := new(NotifyClientShutdownResponse) - err := c.cc.Invoke(ctx, "/frontend.Frontend/NotifyClientShutdown", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// FrontendServer is the server API for Frontend service. -type FrontendServer interface { - // After calling this method, client enters a loop, in which it waits for - // a "FrontendToClient" message and replies with single "ClientToFrontend" message. - Process(Frontend_ProcessServer) error - // The client notifies the query-frontend that it started a graceful shutdown. - NotifyClientShutdown(context.Context, *NotifyClientShutdownRequest) (*NotifyClientShutdownResponse, error) -} - -// UnimplementedFrontendServer can be embedded to have forward compatible implementations. -type UnimplementedFrontendServer struct { -} - -func (*UnimplementedFrontendServer) Process(srv Frontend_ProcessServer) error { - return status.Errorf(codes.Unimplemented, "method Process not implemented") -} -func (*UnimplementedFrontendServer) NotifyClientShutdown(ctx context.Context, req *NotifyClientShutdownRequest) (*NotifyClientShutdownResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NotifyClientShutdown not implemented") -} - -func RegisterFrontendServer(s *grpc.Server, srv FrontendServer) { - s.RegisterService(&_Frontend_serviceDesc, srv) -} - -func _Frontend_Process_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(FrontendServer).Process(&frontendProcessServer{stream}) -} - -type Frontend_ProcessServer interface { - Send(*FrontendToClient) error - Recv() (*ClientToFrontend, error) - grpc.ServerStream -} - -type frontendProcessServer struct { - grpc.ServerStream -} - -func (x *frontendProcessServer) Send(m *FrontendToClient) error { - return x.ServerStream.SendMsg(m) -} - -func (x *frontendProcessServer) Recv() (*ClientToFrontend, error) { - m := new(ClientToFrontend) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Frontend_NotifyClientShutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NotifyClientShutdownRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FrontendServer).NotifyClientShutdown(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/frontend.Frontend/NotifyClientShutdown", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FrontendServer).NotifyClientShutdown(ctx, req.(*NotifyClientShutdownRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Frontend_serviceDesc = grpc.ServiceDesc{ - ServiceName: "frontend.Frontend", - HandlerType: (*FrontendServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NotifyClientShutdown", - Handler: _Frontend_NotifyClientShutdown_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Process", - Handler: _Frontend_Process_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "frontend.proto", -} - -func (m *FrontendToClient) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FrontendToClient) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FrontendToClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StatsEnabled { - i-- - if m.StatsEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.Type != 0 { - i = encodeVarintFrontend(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 - } - if m.HttpRequest != nil { - { - size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintFrontend(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ClientToFrontend) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientToFrontend) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClientToFrontend) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Stats != nil { - { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintFrontend(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.ClientID) > 0 { - i -= len(m.ClientID) - copy(dAtA[i:], m.ClientID) - i = encodeVarintFrontend(dAtA, i, uint64(len(m.ClientID))) - i-- - dAtA[i] = 0x12 - } - if m.HttpResponse != nil { - { - size, err := m.HttpResponse.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintFrontend(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NotifyClientShutdownRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NotifyClientShutdownRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NotifyClientShutdownRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ClientID) > 0 { - i -= len(m.ClientID) - copy(dAtA[i:], m.ClientID) - i = encodeVarintFrontend(dAtA, i, uint64(len(m.ClientID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NotifyClientShutdownResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NotifyClientShutdownResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NotifyClientShutdownResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintFrontend(dAtA []byte, offset int, v uint64) int { - offset -= sovFrontend(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *FrontendToClient) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.HttpRequest != nil { - l = m.HttpRequest.Size() - n += 1 + l + sovFrontend(uint64(l)) - } - if m.Type != 0 { - n += 1 + sovFrontend(uint64(m.Type)) - } - if m.StatsEnabled { - n += 2 - } - return n -} - -func (m *ClientToFrontend) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.HttpResponse != nil { - l = m.HttpResponse.Size() - n += 1 + l + sovFrontend(uint64(l)) - } - l = len(m.ClientID) - if l > 0 { - n += 1 + l + sovFrontend(uint64(l)) - } - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovFrontend(uint64(l)) - } - return n -} - -func (m *NotifyClientShutdownRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ClientID) - if l > 0 { - n += 1 + l + sovFrontend(uint64(l)) - } - return n -} - -func (m *NotifyClientShutdownResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovFrontend(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozFrontend(x uint64) (n int) { - return sovFrontend(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *FrontendToClient) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FrontendToClient{`, - `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `StatsEnabled:` + fmt.Sprintf("%v", this.StatsEnabled) + `,`, - `}`, - }, "") - return s -} -func (this *ClientToFrontend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClientToFrontend{`, - `HttpResponse:` + strings.Replace(fmt.Sprintf("%v", this.HttpResponse), "HTTPResponse", "httpgrpc.HTTPResponse", 1) + `,`, - `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Stats", "stats.Stats", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NotifyClientShutdownRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NotifyClientShutdownRequest{`, - `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, - `}`, - }, "") - return s -} -func (this *NotifyClientShutdownResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NotifyClientShutdownResponse{`, - `}`, - }, "") - return s -} -func valueToStringFrontend(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *FrontendToClient) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FrontendToClient: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FrontendToClient: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthFrontend - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthFrontend - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HttpRequest == nil { - m.HttpRequest = &httpgrpc.HTTPRequest{} - } - if err := m.HttpRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StatsEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StatsEnabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipFrontend(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientToFrontend) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientToFrontend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientToFrontend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpResponse", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthFrontend - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthFrontend - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HttpResponse == nil { - m.HttpResponse = &httpgrpc.HTTPResponse{} - } - if err := m.HttpResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthFrontend - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthFrontend - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthFrontend - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthFrontend - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stats == nil { - m.Stats = &stats.Stats{} - } - if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipFrontend(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NotifyClientShutdownRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NotifyClientShutdownRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NotifyClientShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthFrontend - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthFrontend - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipFrontend(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NotifyClientShutdownResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NotifyClientShutdownResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NotifyClientShutdownResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipFrontend(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipFrontend(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthFrontend - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthFrontend - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipFrontend(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthFrontend - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthFrontend = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowFrontend = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto deleted file mode 100644 index 231f918da..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -// Protobuf package should not be changed when moving around go packages -// in order to not break backward compatibility. -package frontend; - -option go_package = "frontendv1pb"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; -import "github.com/cortexproject/cortex/pkg/querier/stats/stats.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service Frontend { - // After calling this method, client enters a loop, in which it waits for - // a "FrontendToClient" message and replies with single "ClientToFrontend" message. - rpc Process(stream ClientToFrontend) returns (stream FrontendToClient) {}; - - // The client notifies the query-frontend that it started a graceful shutdown. - rpc NotifyClientShutdown(NotifyClientShutdownRequest) returns (NotifyClientShutdownResponse); -} - -enum Type { - HTTP_REQUEST = 0; - GET_ID = 1; -} - -message FrontendToClient { - httpgrpc.HTTPRequest httpRequest = 1; - Type type = 2; - - // Whether query statistics tracking should be enabled. The response will include - // statistics only when this option is enabled. - bool statsEnabled = 3; -} - -message ClientToFrontend { - httpgrpc.HTTPResponse httpResponse = 1; - string clientID = 2; - stats.Stats stats = 3; -} - -message NotifyClientShutdownRequest { - string clientID = 1; -} - -message NotifyClientShutdownResponse {} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go deleted file mode 100644 index fc36774b3..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go +++ /dev/null @@ -1,319 +0,0 @@ -package v2 - -import ( - "context" - "flag" - "fmt" - "math/rand" - "net/http" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" - "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// Config for a Frontend. -type Config struct { - SchedulerAddress string `yaml:"scheduler_address"` - DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` - WorkerConcurrency int `yaml:"scheduler_worker_concurrency"` - GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` - - // Used to find local IP address, that is sent to scheduler and querier-worker. - InfNames []string `yaml:"instance_interface_names"` - - // If set, address is not computed from interfaces. - Addr string `yaml:"address" doc:"hidden"` - Port int `doc:"hidden"` -} - -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.SchedulerAddress, "frontend.scheduler-address", "", "DNS hostname used for finding query-schedulers.") - f.DurationVar(&cfg.DNSLookupPeriod, "frontend.scheduler-dns-lookup-period", 10*time.Second, "How often to resolve the scheduler-address, in order to look for new query-scheduler instances.") - f.IntVar(&cfg.WorkerConcurrency, "frontend.scheduler-worker-concurrency", 5, "Number of concurrent workers forwarding queries to single query-scheduler.") - - cfg.InfNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InfNames), "frontend.instance-interface-names", "Name of network interface to read address from. This address is sent to query-scheduler and querier, which uses it to send the query response back to query-frontend.") - f.StringVar(&cfg.Addr, "frontend.instance-addr", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") - f.IntVar(&cfg.Port, "frontend.instance-port", 0, "Port to advertise to querier (via scheduler) (defaults to server.grpc-listen-port).") - - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("frontend.grpc-client-config", f) -} - -// Frontend implements GrpcRoundTripper. It queues HTTP requests, -// dispatches them to backends via gRPC, and handles retries for requests which failed. -type Frontend struct { - services.Service - - cfg Config - log log.Logger - - lastQueryID atomic.Uint64 - - // frontend workers will read from this channel, and send request to scheduler. - requestsCh chan *frontendRequest - - schedulerWorkers *frontendSchedulerWorkers - requests *requestsInProgress -} - -type frontendRequest struct { - queryID uint64 - request *httpgrpc.HTTPRequest - userID string - statsEnabled bool - - cancel context.CancelFunc - - enqueue chan enqueueResult - response chan *frontendv2pb.QueryResultRequest -} - -type enqueueStatus int - -const ( - // Sent to scheduler successfully, and frontend should wait for response now. - waitForResponse enqueueStatus = iota - - // Failed to forward request to scheduler, frontend will try again. - failed -) - -type enqueueResult struct { - status enqueueStatus - - cancelCh chan<- uint64 // Channel that can be used for request cancellation. If nil, cancellation is not possible. -} - -// NewFrontend creates a new frontend. -func NewFrontend(cfg Config, log log.Logger, reg prometheus.Registerer) (*Frontend, error) { - requestsCh := make(chan *frontendRequest) - - schedulerWorkers, err := newFrontendSchedulerWorkers(cfg, fmt.Sprintf("%s:%d", cfg.Addr, cfg.Port), requestsCh, log) - if err != nil { - return nil, err - } - - f := &Frontend{ - cfg: cfg, - log: log, - requestsCh: requestsCh, - schedulerWorkers: schedulerWorkers, - requests: newRequestsInProgress(), - } - // Randomize to avoid getting responses from queries sent before restart, which could lead to mixing results - // between different queries. Note that frontend verifies the user, so it cannot leak results between tenants. - // This isn't perfect, but better than nothing. - f.lastQueryID.Store(rand.Uint64()) - - promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_query_frontend_queries_in_progress", - Help: "Number of queries in progress handled by this frontend.", - }, func() float64 { - return float64(f.requests.count()) - }) - - promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_query_frontend_connected_schedulers", - Help: "Number of schedulers this frontend is connected to.", - }, func() float64 { - return float64(f.schedulerWorkers.getWorkersCount()) - }) - - f.Service = services.NewIdleService(f.starting, f.stopping) - return f, nil -} - -func (f *Frontend) starting(ctx context.Context) error { - return errors.Wrap(services.StartAndAwaitRunning(ctx, f.schedulerWorkers), "failed to start frontend scheduler workers") -} - -func (f *Frontend) stopping(_ error) error { - return errors.Wrap(services.StopAndAwaitTerminated(context.Background(), f.schedulerWorkers), "failed to stop frontend scheduler workers") -} - -// RoundTripGRPC round trips a proto (instead of a HTTP request). -func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - if s := f.State(); s != services.Running { - return nil, fmt.Errorf("frontend not running: %v", s) - } - - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, err - } - userID := tenant.JoinTenantIDs(tenantIDs) - - // Propagate trace context in gRPC too - this will be ignored if using HTTP. - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) - if tracer != nil && span != nil { - carrier := (*httpgrpcutil.HttpgrpcHeadersCarrier)(req) - if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier); err != nil { - return nil, err - } - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - freq := &frontendRequest{ - queryID: f.lastQueryID.Inc(), - request: req, - userID: userID, - statsEnabled: stats.IsEnabled(ctx), - - cancel: cancel, - - // Buffer of 1 to ensure response or error can be written to the channel - // even if this goroutine goes away due to client context cancellation. - enqueue: make(chan enqueueResult, 1), - response: make(chan *frontendv2pb.QueryResultRequest, 1), - } - - f.requests.put(freq) - defer f.requests.delete(freq.queryID) - - retries := f.cfg.WorkerConcurrency + 1 // To make sure we hit at least two different schedulers. - -enqueueAgain: - select { - case <-ctx.Done(): - return nil, ctx.Err() - - case f.requestsCh <- freq: - // Enqueued, let's wait for response. - } - - var cancelCh chan<- uint64 - - select { - case <-ctx.Done(): - return nil, ctx.Err() - - case enqRes := <-freq.enqueue: - if enqRes.status == waitForResponse { - cancelCh = enqRes.cancelCh - break // go wait for response. - } else if enqRes.status == failed { - retries-- - if retries > 0 { - goto enqueueAgain - } - } - - return nil, httpgrpc.Errorf(http.StatusInternalServerError, "failed to enqueue request") - } - - select { - case <-ctx.Done(): - if cancelCh != nil { - select { - case cancelCh <- freq.queryID: - // cancellation sent. - default: - // failed to cancel, ignore. - } - } - return nil, ctx.Err() - - case resp := <-freq.response: - if stats.ShouldTrackHTTPGRPCResponse(resp.HttpResponse) { - stats := stats.FromContext(ctx) - stats.Merge(resp.Stats) // Safe if stats is nil. - } - - return resp.HttpResponse, nil - } -} - -func (f *Frontend) QueryResult(ctx context.Context, qrReq *frontendv2pb.QueryResultRequest) (*frontendv2pb.QueryResultResponse, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, err - } - userID := tenant.JoinTenantIDs(tenantIDs) - - req := f.requests.get(qrReq.QueryID) - // It is possible that some old response belonging to different user was received, if frontend has restarted. - // To avoid leaking query results between users, we verify the user here. - // To avoid mixing results from different queries, we randomize queryID counter on start. - if req != nil && req.userID == userID { - select { - case req.response <- qrReq: - // Should always be possible, unless QueryResult is called multiple times with the same queryID. - default: - level.Warn(f.log).Log("msg", "failed to write query result to the response channel", "queryID", qrReq.QueryID, "user", userID) - } - } - - return &frontendv2pb.QueryResultResponse{}, nil -} - -// CheckReady determines if the query frontend is ready. Function parameters/return -// chosen to match the same method in the ingester -func (f *Frontend) CheckReady(_ context.Context) error { - workers := f.schedulerWorkers.getWorkersCount() - - // If frontend is connected to at least one scheduler, we are ready. - if workers > 0 { - return nil - } - - msg := fmt.Sprintf("not ready: number of schedulers this worker is connected to is %d", workers) - level.Info(f.log).Log("msg", msg) - return errors.New(msg) -} - -type requestsInProgress struct { - mu sync.Mutex - requests map[uint64]*frontendRequest -} - -func newRequestsInProgress() *requestsInProgress { - return &requestsInProgress{ - requests: map[uint64]*frontendRequest{}, - } -} - -func (r *requestsInProgress) count() int { - r.mu.Lock() - defer r.mu.Unlock() - - return len(r.requests) -} - -func (r *requestsInProgress) put(req *frontendRequest) { - r.mu.Lock() - defer r.mu.Unlock() - - r.requests[req.queryID] = req -} - -func (r *requestsInProgress) delete(queryID uint64) { - r.mu.Lock() - defer r.mu.Unlock() - - delete(r.requests, queryID) -} - -func (r *requestsInProgress) get(queryID uint64) *frontendRequest { - r.mu.Lock() - defer r.mu.Unlock() - - return r.requests[queryID] -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go deleted file mode 100644 index 45af3bc90..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go +++ /dev/null @@ -1,329 +0,0 @@ -package v2 - -import ( - "context" - "net/http" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/weaveworks/common/httpgrpc" - "google.golang.org/grpc" - - "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" - "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/backoff" - "github.com/cortexproject/cortex/pkg/util/services" -) - -type frontendSchedulerWorkers struct { - services.Service - - cfg Config - log log.Logger - frontendAddress string - - // Channel with requests that should be forwarded to the scheduler. - requestsCh <-chan *frontendRequest - - watcher services.Service - - mu sync.Mutex - // Set to nil when stop is called... no more workers are created afterwards. - workers map[string]*frontendSchedulerWorker -} - -func newFrontendSchedulerWorkers(cfg Config, frontendAddress string, requestsCh <-chan *frontendRequest, log log.Logger) (*frontendSchedulerWorkers, error) { - f := &frontendSchedulerWorkers{ - cfg: cfg, - log: log, - frontendAddress: frontendAddress, - requestsCh: requestsCh, - workers: map[string]*frontendSchedulerWorker{}, - } - - w, err := util.NewDNSWatcher(cfg.SchedulerAddress, cfg.DNSLookupPeriod, f) - if err != nil { - return nil, err - } - - f.watcher = w - f.Service = services.NewIdleService(f.starting, f.stopping) - return f, nil -} - -func (f *frontendSchedulerWorkers) starting(ctx context.Context) error { - return services.StartAndAwaitRunning(ctx, f.watcher) -} - -func (f *frontendSchedulerWorkers) stopping(_ error) error { - err := services.StopAndAwaitTerminated(context.Background(), f.watcher) - - f.mu.Lock() - defer f.mu.Unlock() - - for _, w := range f.workers { - w.stop() - } - f.workers = nil - - return err -} - -func (f *frontendSchedulerWorkers) AddressAdded(address string) { - f.mu.Lock() - ws := f.workers - w := f.workers[address] - f.mu.Unlock() - - // Already stopped or we already have worker for this address. - if ws == nil || w != nil { - return - } - - level.Info(f.log).Log("msg", "adding connection to scheduler", "addr", address) - conn, err := f.connectToScheduler(context.Background(), address) - if err != nil { - level.Error(f.log).Log("msg", "error connecting to scheduler", "addr", address, "err", err) - return - } - - // No worker for this address yet, start a new one. - w = newFrontendSchedulerWorker(conn, address, f.frontendAddress, f.requestsCh, f.cfg.WorkerConcurrency, f.log) - - f.mu.Lock() - defer f.mu.Unlock() - - // Can be nil if stopping has been called already. - if f.workers != nil { - f.workers[address] = w - w.start() - } -} - -func (f *frontendSchedulerWorkers) AddressRemoved(address string) { - level.Info(f.log).Log("msg", "removing connection to scheduler", "addr", address) - - f.mu.Lock() - // This works fine if f.workers is nil already. - w := f.workers[address] - delete(f.workers, address) - f.mu.Unlock() - - if w != nil { - w.stop() - } -} - -// Get number of workers. -func (f *frontendSchedulerWorkers) getWorkersCount() int { - f.mu.Lock() - defer f.mu.Unlock() - - return len(f.workers) -} - -func (f *frontendSchedulerWorkers) connectToScheduler(ctx context.Context, address string) (*grpc.ClientConn, error) { - // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. - opts, err := f.cfg.GRPCClientConfig.DialOption(nil, nil) - if err != nil { - return nil, err - } - - conn, err := grpc.DialContext(ctx, address, opts...) - if err != nil { - return nil, err - } - return conn, nil -} - -// Worker managing single gRPC connection to Scheduler. Each worker starts multiple goroutines for forwarding -// requests and cancellations to scheduler. -type frontendSchedulerWorker struct { - log log.Logger - - conn *grpc.ClientConn - concurrency int - schedulerAddr string - frontendAddr string - - // Context and cancellation used by individual goroutines. - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - - // Shared between all frontend workers. - requestCh <-chan *frontendRequest - - // Cancellation requests for this scheduler are received via this channel. It is passed to frontend after - // query has been enqueued to scheduler. - cancelCh chan uint64 -} - -func newFrontendSchedulerWorker(conn *grpc.ClientConn, schedulerAddr string, frontendAddr string, requestCh <-chan *frontendRequest, concurrency int, log log.Logger) *frontendSchedulerWorker { - w := &frontendSchedulerWorker{ - log: log, - conn: conn, - concurrency: concurrency, - schedulerAddr: schedulerAddr, - frontendAddr: frontendAddr, - requestCh: requestCh, - cancelCh: make(chan uint64), - } - w.ctx, w.cancel = context.WithCancel(context.Background()) - - return w -} - -func (w *frontendSchedulerWorker) start() { - client := schedulerpb.NewSchedulerForFrontendClient(w.conn) - for i := 0; i < w.concurrency; i++ { - w.wg.Add(1) - go func() { - defer w.wg.Done() - w.runOne(w.ctx, client) - }() - } -} - -func (w *frontendSchedulerWorker) stop() { - w.cancel() - w.wg.Wait() - if err := w.conn.Close(); err != nil { - level.Error(w.log).Log("msg", "error while closing connection to scheduler", "err", err) - } -} - -func (w *frontendSchedulerWorker) runOne(ctx context.Context, client schedulerpb.SchedulerForFrontendClient) { - backoffConfig := backoff.Config{ - MinBackoff: 50 * time.Millisecond, - MaxBackoff: 1 * time.Second, - } - - backoff := backoff.New(ctx, backoffConfig) - for backoff.Ongoing() { - loop, loopErr := client.FrontendLoop(ctx) - if loopErr != nil { - level.Error(w.log).Log("msg", "error contacting scheduler", "err", loopErr, "addr", w.schedulerAddr) - backoff.Wait() - continue - } - - loopErr = w.schedulerLoop(loop) - if closeErr := loop.CloseSend(); closeErr != nil { - level.Debug(w.log).Log("msg", "failed to close frontend loop", "err", loopErr, "addr", w.schedulerAddr) - } - - if loopErr != nil { - level.Error(w.log).Log("msg", "error sending requests to scheduler", "err", loopErr, "addr", w.schedulerAddr) - backoff.Wait() - continue - } - - backoff.Reset() - } -} - -func (w *frontendSchedulerWorker) schedulerLoop(loop schedulerpb.SchedulerForFrontend_FrontendLoopClient) error { - if err := loop.Send(&schedulerpb.FrontendToScheduler{ - Type: schedulerpb.INIT, - FrontendAddress: w.frontendAddr, - }); err != nil { - return err - } - - if resp, err := loop.Recv(); err != nil || resp.Status != schedulerpb.OK { - if err != nil { - return err - } - return errors.Errorf("unexpected status received for init: %v", resp.Status) - } - - ctx := loop.Context() - - for { - select { - case <-ctx.Done(): - // No need to report error if our internal context is canceled. This can happen during shutdown, - // or when scheduler is no longer resolvable. (It would be nice if this context reported "done" also when - // connection scheduler stops the call, but that doesn't seem to be the case). - // - // Reporting error here would delay reopening the stream (if the worker context is not done yet). - level.Debug(w.log).Log("msg", "stream context finished", "err", ctx.Err()) - return nil - - case req := <-w.requestCh: - err := loop.Send(&schedulerpb.FrontendToScheduler{ - Type: schedulerpb.ENQUEUE, - QueryID: req.queryID, - UserID: req.userID, - HttpRequest: req.request, - FrontendAddress: w.frontendAddr, - StatsEnabled: req.statsEnabled, - }) - - if err != nil { - req.enqueue <- enqueueResult{status: failed} - return err - } - - resp, err := loop.Recv() - if err != nil { - req.enqueue <- enqueueResult{status: failed} - return err - } - - switch resp.Status { - case schedulerpb.OK: - req.enqueue <- enqueueResult{status: waitForResponse, cancelCh: w.cancelCh} - // Response will come from querier. - - case schedulerpb.SHUTTING_DOWN: - // Scheduler is shutting down, report failure to enqueue and stop this loop. - req.enqueue <- enqueueResult{status: failed} - return errors.New("scheduler is shutting down") - - case schedulerpb.ERROR: - req.enqueue <- enqueueResult{status: waitForResponse} - req.response <- &frontendv2pb.QueryResultRequest{ - HttpResponse: &httpgrpc.HTTPResponse{ - Code: http.StatusInternalServerError, - Body: []byte(err.Error()), - }, - } - - case schedulerpb.TOO_MANY_REQUESTS_PER_TENANT: - req.enqueue <- enqueueResult{status: waitForResponse} - req.response <- &frontendv2pb.QueryResultRequest{ - HttpResponse: &httpgrpc.HTTPResponse{ - Code: http.StatusTooManyRequests, - Body: []byte("too many outstanding requests"), - }, - } - } - - case reqID := <-w.cancelCh: - err := loop.Send(&schedulerpb.FrontendToScheduler{ - Type: schedulerpb.CANCEL, - QueryID: reqID, - }) - - if err != nil { - return err - } - - resp, err := loop.Recv() - if err != nil { - return err - } - - // Scheduler may be shutting down, report that. - if resp.Status != schedulerpb.OK { - return errors.Errorf("unexpected status received for cancellation: %v", resp.Status) - } - } - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go deleted file mode 100644 index daeb50a8c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go +++ /dev/null @@ -1,782 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: frontend.proto - -package frontendv2pb - -import ( - context "context" - fmt "fmt" - stats "github.com/cortexproject/cortex/pkg/querier/stats" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - httpgrpc "github.com/weaveworks/common/httpgrpc" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type QueryResultRequest struct { - QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` - HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,2,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` - Stats *stats.Stats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *QueryResultRequest) Reset() { *m = QueryResultRequest{} } -func (*QueryResultRequest) ProtoMessage() {} -func (*QueryResultRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_eca3873955a29cfe, []int{0} -} -func (m *QueryResultRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryResultRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryResultRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryResultRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResultRequest.Merge(m, src) -} -func (m *QueryResultRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryResultRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResultRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResultRequest proto.InternalMessageInfo - -func (m *QueryResultRequest) GetQueryID() uint64 { - if m != nil { - return m.QueryID - } - return 0 -} - -func (m *QueryResultRequest) GetHttpResponse() *httpgrpc.HTTPResponse { - if m != nil { - return m.HttpResponse - } - return nil -} - -func (m *QueryResultRequest) GetStats() *stats.Stats { - if m != nil { - return m.Stats - } - return nil -} - -type QueryResultResponse struct { -} - -func (m *QueryResultResponse) Reset() { *m = QueryResultResponse{} } -func (*QueryResultResponse) ProtoMessage() {} -func (*QueryResultResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_eca3873955a29cfe, []int{1} -} -func (m *QueryResultResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryResultResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryResultResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryResultResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResultResponse.Merge(m, src) -} -func (m *QueryResultResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryResultResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResultResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResultResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*QueryResultRequest)(nil), "frontendv2pb.QueryResultRequest") - proto.RegisterType((*QueryResultResponse)(nil), "frontendv2pb.QueryResultResponse") -} - -func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } - -var fileDescriptor_eca3873955a29cfe = []byte{ - // 351 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcd, 0x4e, 0x3a, 0x31, - 0x14, 0xc5, 0xdb, 0xff, 0xdf, 0x8f, 0xa4, 0x10, 0x17, 0x35, 0x9a, 0x09, 0x8b, 0x06, 0x67, 0xc5, - 0xc6, 0x69, 0x82, 0xae, 0x4c, 0xdc, 0x10, 0x43, 0x74, 0x27, 0x23, 0x2b, 0x77, 0xcc, 0x58, 0x86, - 0x0f, 0x99, 0x96, 0xb6, 0x03, 0xb2, 0xf3, 0x09, 0x8c, 0x8f, 0xe1, 0xa3, 0xb8, 0x64, 0xc9, 0x52, - 0xca, 0xc6, 0x25, 0x8f, 0x60, 0x68, 0x81, 0x0c, 0x31, 0x71, 0xd3, 0xdc, 0x93, 0x7b, 0x7e, 0xb9, - 0xe7, 0xde, 0xa2, 0xa3, 0xb6, 0xe4, 0xa9, 0x66, 0xe9, 0x53, 0x20, 0x24, 0xd7, 0x1c, 0x17, 0x37, - 0x7a, 0x54, 0x15, 0x51, 0xe9, 0x3c, 0xe9, 0xea, 0x4e, 0x16, 0x05, 0x31, 0x1f, 0xd0, 0x84, 0x27, - 0x9c, 0x5a, 0x53, 0x94, 0xb5, 0xad, 0xb2, 0xc2, 0x56, 0x0e, 0x2e, 0x5d, 0xe6, 0xec, 0x63, 0xd6, - 0x1a, 0xb1, 0x31, 0x97, 0x7d, 0x45, 0x63, 0x3e, 0x18, 0xf0, 0x94, 0x76, 0xb4, 0x16, 0x89, 0x14, - 0xf1, 0xb6, 0x58, 0x53, 0xd7, 0x39, 0x2a, 0xe6, 0x52, 0xb3, 0x17, 0x21, 0x79, 0x8f, 0xc5, 0x7a, - 0xad, 0xa8, 0xe8, 0x27, 0x74, 0x98, 0x31, 0xd9, 0x65, 0x92, 0x2a, 0xdd, 0xd2, 0xca, 0xbd, 0x0e, - 0xf7, 0xdf, 0x20, 0xc2, 0x8d, 0x8c, 0xc9, 0x49, 0xc8, 0x54, 0xf6, 0xac, 0x43, 0x36, 0xcc, 0x98, - 0xd2, 0xd8, 0x43, 0x87, 0x2b, 0x66, 0x72, 0x77, 0xe3, 0xc1, 0x32, 0xac, 0xec, 0x85, 0x1b, 0x89, - 0xaf, 0x50, 0x71, 0x95, 0x20, 0x64, 0x4a, 0xf0, 0x54, 0x31, 0xef, 0x5f, 0x19, 0x56, 0x0a, 0xd5, - 0xd3, 0x60, 0x1b, 0xeb, 0xb6, 0xd9, 0xbc, 0xdf, 0x74, 0xc3, 0x1d, 0x2f, 0xf6, 0xd1, 0xbe, 0x9d, - 0xed, 0xfd, 0xb7, 0x50, 0x31, 0x70, 0x49, 0x1e, 0x56, 0x6f, 0xe8, 0x5a, 0xfe, 0x09, 0x3a, 0xde, - 0xc9, 0xe3, 0xd0, 0x6a, 0x0f, 0xe1, 0xfa, 0xfa, 0xb6, 0x75, 0x2e, 0x1b, 0x6e, 0x1f, 0xdc, 0x44, - 0x85, 0x9c, 0x19, 0x97, 0x83, 0xfc, 0xfd, 0x83, 0xdf, 0x7b, 0x95, 0xce, 0xfe, 0x70, 0xb8, 0x49, - 0x3e, 0xa8, 0xd5, 0xa6, 0x73, 0x02, 0x66, 0x73, 0x02, 0x96, 0x73, 0x02, 0x5f, 0x0d, 0x81, 0x1f, - 0x86, 0xc0, 0x4f, 0x43, 0xe0, 0xd4, 0x10, 0xf8, 0x65, 0x08, 0xfc, 0x36, 0x04, 0x2c, 0x0d, 0x81, - 0xef, 0x0b, 0x02, 0xa6, 0x0b, 0x02, 0x66, 0x0b, 0x02, 0x1e, 0x77, 0xfe, 0x3e, 0x3a, 0xb0, 0xe7, - 0xbd, 0xf8, 0x09, 0x00, 0x00, 0xff, 0xff, 0x02, 0xb0, 0x28, 0xb5, 0x22, 0x02, 0x00, 0x00, -} - -func (this *QueryResultRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryResultRequest) - if !ok { - that2, ok := that.(QueryResultRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.QueryID != that1.QueryID { - return false - } - if !this.HttpResponse.Equal(that1.HttpResponse) { - return false - } - if !this.Stats.Equal(that1.Stats) { - return false - } - return true -} -func (this *QueryResultResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryResultResponse) - if !ok { - that2, ok := that.(QueryResultResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *QueryResultRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&frontendv2pb.QueryResultRequest{") - s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") - if this.HttpResponse != nil { - s = append(s, "HttpResponse: "+fmt.Sprintf("%#v", this.HttpResponse)+",\n") - } - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryResultResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&frontendv2pb.QueryResultResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringFrontend(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// FrontendForQuerierClient is the client API for FrontendForQuerier service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type FrontendForQuerierClient interface { - QueryResult(ctx context.Context, in *QueryResultRequest, opts ...grpc.CallOption) (*QueryResultResponse, error) -} - -type frontendForQuerierClient struct { - cc *grpc.ClientConn -} - -func NewFrontendForQuerierClient(cc *grpc.ClientConn) FrontendForQuerierClient { - return &frontendForQuerierClient{cc} -} - -func (c *frontendForQuerierClient) QueryResult(ctx context.Context, in *QueryResultRequest, opts ...grpc.CallOption) (*QueryResultResponse, error) { - out := new(QueryResultResponse) - err := c.cc.Invoke(ctx, "/frontendv2pb.FrontendForQuerier/QueryResult", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// FrontendForQuerierServer is the server API for FrontendForQuerier service. -type FrontendForQuerierServer interface { - QueryResult(context.Context, *QueryResultRequest) (*QueryResultResponse, error) -} - -// UnimplementedFrontendForQuerierServer can be embedded to have forward compatible implementations. -type UnimplementedFrontendForQuerierServer struct { -} - -func (*UnimplementedFrontendForQuerierServer) QueryResult(ctx context.Context, req *QueryResultRequest) (*QueryResultResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryResult not implemented") -} - -func RegisterFrontendForQuerierServer(s *grpc.Server, srv FrontendForQuerierServer) { - s.RegisterService(&_FrontendForQuerier_serviceDesc, srv) -} - -func _FrontendForQuerier_QueryResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryResultRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FrontendForQuerierServer).QueryResult(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/frontendv2pb.FrontendForQuerier/QueryResult", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FrontendForQuerierServer).QueryResult(ctx, req.(*QueryResultRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _FrontendForQuerier_serviceDesc = grpc.ServiceDesc{ - ServiceName: "frontendv2pb.FrontendForQuerier", - HandlerType: (*FrontendForQuerierServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "QueryResult", - Handler: _FrontendForQuerier_QueryResult_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "frontend.proto", -} - -func (m *QueryResultRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryResultRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Stats != nil { - { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintFrontend(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.HttpResponse != nil { - { - size, err := m.HttpResponse.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintFrontend(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.QueryID != 0 { - i = encodeVarintFrontend(dAtA, i, uint64(m.QueryID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryResultResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryResultResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryResultResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintFrontend(dAtA []byte, offset int, v uint64) int { - offset -= sovFrontend(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryResultRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.QueryID != 0 { - n += 1 + sovFrontend(uint64(m.QueryID)) - } - if m.HttpResponse != nil { - l = m.HttpResponse.Size() - n += 1 + l + sovFrontend(uint64(l)) - } - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovFrontend(uint64(l)) - } - return n -} - -func (m *QueryResultResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovFrontend(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozFrontend(x uint64) (n int) { - return sovFrontend(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *QueryResultRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QueryResultRequest{`, - `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, - `HttpResponse:` + strings.Replace(fmt.Sprintf("%v", this.HttpResponse), "HTTPResponse", "httpgrpc.HTTPResponse", 1) + `,`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Stats", "stats.Stats", 1) + `,`, - `}`, - }, "") - return s -} -func (this *QueryResultResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QueryResultResponse{`, - `}`, - }, "") - return s -} -func valueToStringFrontend(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *QueryResultRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryResultRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryResultRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) - } - m.QueryID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QueryID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpResponse", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthFrontend - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthFrontend - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HttpResponse == nil { - m.HttpResponse = &httpgrpc.HTTPResponse{} - } - if err := m.HttpResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthFrontend - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthFrontend - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stats == nil { - m.Stats = &stats.Stats{} - } - if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipFrontend(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryResultResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFrontend - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryResultResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryResultResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipFrontend(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthFrontend - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipFrontend(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthFrontend - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthFrontend - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFrontend - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipFrontend(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthFrontend - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthFrontend = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowFrontend = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto deleted file mode 100644 index b93106d78..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package frontendv2pb; - -option go_package = "frontendv2pb"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; -import "github.com/cortexproject/cortex/pkg/querier/stats/stats.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// Frontend interface exposed to Queriers. Used by queriers to report back the result of the query. -service FrontendForQuerier { - rpc QueryResult (QueryResultRequest) returns (QueryResultResponse) { }; -} - -message QueryResultRequest { - uint64 queryID = 1; - httpgrpc.HTTPResponse httpResponse = 2; - stats.Stats stats = 3; - - // There is no userID field here, because Querier puts userID into the context when - // calling QueryResult, and that is where Frontend expects to find it. -} - -message QueryResultResponse { } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/active_series.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/active_series.go deleted file mode 100644 index 6b7655364..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/active_series.go +++ /dev/null @@ -1,244 +0,0 @@ -package ingester - -import ( - "hash" - "math" - "sync" - "time" - - "github.com/cespare/xxhash" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" -) - -const ( - numActiveSeriesStripes = 512 -) - -// ActiveSeries is keeping track of recently active series for a single tenant. -type ActiveSeries struct { - stripes [numActiveSeriesStripes]activeSeriesStripe -} - -// activeSeriesStripe holds a subset of the series timestamps for a single tenant. -type activeSeriesStripe struct { - // Unix nanoseconds. Only used by purge. Zero = unknown. - // Updated in purge and when old timestamp is used when updating series (in this case, oldestEntryTs is updated - // without holding the lock -- hence the atomic). - oldestEntryTs atomic.Int64 - - mu sync.RWMutex - refs map[uint64][]activeSeriesEntry - active int // Number of active entries in this stripe. Only decreased during purge or clear. -} - -// activeSeriesEntry holds a timestamp for single series. -type activeSeriesEntry struct { - lbs labels.Labels - nanos *atomic.Int64 // Unix timestamp in nanoseconds. Needs to be a pointer because we don't store pointers to entries in the stripe. -} - -func NewActiveSeries() *ActiveSeries { - c := &ActiveSeries{} - - // Stripes are pre-allocated so that we only read on them and no lock is required. - for i := 0; i < numActiveSeriesStripes; i++ { - c.stripes[i].refs = map[uint64][]activeSeriesEntry{} - } - - return c -} - -// Updates series timestamp to 'now'. Function is called to make a copy of labels if entry doesn't exist yet. -func (c *ActiveSeries) UpdateSeries(series labels.Labels, now time.Time, labelsCopy func(labels.Labels) labels.Labels) { - fp := fingerprint(series) - stripeID := fp % numActiveSeriesStripes - - c.stripes[stripeID].updateSeriesTimestamp(now, series, fp, labelsCopy) -} - -var sep = []byte{model.SeparatorByte} - -var hashPool = sync.Pool{New: func() interface{} { return xxhash.New() }} - -func fingerprint(series labels.Labels) uint64 { - sum := hashPool.Get().(hash.Hash64) - defer hashPool.Put(sum) - - sum.Reset() - for _, label := range series { - _, _ = sum.Write(util.YoloBuf(label.Name)) - _, _ = sum.Write(sep) - _, _ = sum.Write(util.YoloBuf(label.Value)) - _, _ = sum.Write(sep) - } - - return sum.Sum64() -} - -// Purge removes expired entries from the cache. This function should be called -// periodically to avoid memory leaks. -func (c *ActiveSeries) Purge(keepUntil time.Time) { - for s := 0; s < numActiveSeriesStripes; s++ { - c.stripes[s].purge(keepUntil) - } -} - -//nolint // Linter reports that this method is unused, but it is. -func (c *ActiveSeries) clear() { - for s := 0; s < numActiveSeriesStripes; s++ { - c.stripes[s].clear() - } -} - -func (c *ActiveSeries) Active() int { - total := 0 - for s := 0; s < numActiveSeriesStripes; s++ { - total += c.stripes[s].getActive() - } - return total -} - -func (s *activeSeriesStripe) updateSeriesTimestamp(now time.Time, series labels.Labels, fingerprint uint64, labelsCopy func(labels.Labels) labels.Labels) { - nowNanos := now.UnixNano() - - e := s.findEntryForSeries(fingerprint, series) - entryTimeSet := false - if e == nil { - e, entryTimeSet = s.findOrCreateEntryForSeries(fingerprint, series, nowNanos, labelsCopy) - } - - if !entryTimeSet { - if prev := e.Load(); nowNanos > prev { - entryTimeSet = e.CAS(prev, nowNanos) - } - } - - if entryTimeSet { - for prevOldest := s.oldestEntryTs.Load(); nowNanos < prevOldest; { - // If recent purge already removed entries older than "oldest entry timestamp", setting this to 0 will make - // sure that next purge doesn't take the shortcut route. - if s.oldestEntryTs.CAS(prevOldest, 0) { - break - } - } - } -} - -func (s *activeSeriesStripe) findEntryForSeries(fingerprint uint64, series labels.Labels) *atomic.Int64 { - s.mu.RLock() - defer s.mu.RUnlock() - - // Check if already exists within the entries. - for ix, entry := range s.refs[fingerprint] { - if labels.Equal(entry.lbs, series) { - return s.refs[fingerprint][ix].nanos - } - } - - return nil -} - -func (s *activeSeriesStripe) findOrCreateEntryForSeries(fingerprint uint64, series labels.Labels, nowNanos int64, labelsCopy func(labels.Labels) labels.Labels) (*atomic.Int64, bool) { - s.mu.Lock() - defer s.mu.Unlock() - - // Check if already exists within the entries. - for ix, entry := range s.refs[fingerprint] { - if labels.Equal(entry.lbs, series) { - return s.refs[fingerprint][ix].nanos, false - } - } - - s.active++ - e := activeSeriesEntry{ - lbs: labelsCopy(series), - nanos: atomic.NewInt64(nowNanos), - } - - s.refs[fingerprint] = append(s.refs[fingerprint], e) - - return e.nanos, true -} - -//nolint // Linter reports that this method is unused, but it is. -func (s *activeSeriesStripe) clear() { - s.mu.Lock() - defer s.mu.Unlock() - - s.oldestEntryTs.Store(0) - s.refs = map[uint64][]activeSeriesEntry{} - s.active = 0 -} - -func (s *activeSeriesStripe) purge(keepUntil time.Time) { - keepUntilNanos := keepUntil.UnixNano() - if oldest := s.oldestEntryTs.Load(); oldest > 0 && keepUntilNanos <= oldest { - // Nothing to do. - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - active := 0 - - oldest := int64(math.MaxInt64) - for fp, entries := range s.refs { - // Since we do expect very few fingerprint collisions, we - // have an optimized implementation for the common case. - if len(entries) == 1 { - ts := entries[0].nanos.Load() - if ts < keepUntilNanos { - delete(s.refs, fp) - continue - } - - active++ - if ts < oldest { - oldest = ts - } - continue - } - - // We have more entries, which means there's a collision, - // so we have to iterate over the entries. - for i := 0; i < len(entries); { - ts := entries[i].nanos.Load() - if ts < keepUntilNanos { - entries = append(entries[:i], entries[i+1:]...) - } else { - if ts < oldest { - oldest = ts - } - - i++ - } - } - - // Either update or delete the entries in the map - if cnt := len(entries); cnt == 0 { - delete(s.refs, fp) - } else { - active += cnt - s.refs[fp] = entries - } - } - - if oldest == math.MaxInt64 { - s.oldestEntryTs.Store(0) - } else { - s.oldestEntryTs.Store(oldest) - } - s.active = active -} - -func (s *activeSeriesStripe) getActive() int { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.active -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go deleted file mode 100644 index febdc1b4f..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go +++ /dev/null @@ -1,73 +0,0 @@ -package ingester - -import ( - "fmt" - "net/http" - - "github.com/prometheus/prometheus/model/labels" - "github.com/weaveworks/common/httpgrpc" -) - -type validationError struct { - err error // underlying error - errorType string - code int - noReport bool // if true, error will be counted but not reported to caller - labels labels.Labels -} - -func makeLimitError(errorType string, err error) error { - return &validationError{ - errorType: errorType, - err: err, - code: http.StatusBadRequest, - } -} - -func makeNoReportError(errorType string) error { - return &validationError{ - errorType: errorType, - noReport: true, - } -} - -func makeMetricValidationError(errorType string, labels labels.Labels, err error) error { - return &validationError{ - errorType: errorType, - err: err, - code: http.StatusBadRequest, - labels: labels, - } -} - -func makeMetricLimitError(errorType string, labels labels.Labels, err error) error { - return &validationError{ - errorType: errorType, - err: err, - code: http.StatusBadRequest, - labels: labels, - } -} - -func (e *validationError) Error() string { - if e.err == nil { - return e.errorType - } - if e.labels == nil { - return e.err.Error() - } - return fmt.Sprintf("%s for series %s", e.err.Error(), e.labels.String()) -} - -// returns a HTTP gRPC error than is correctly forwarded over gRPC, with no reference to `e` retained. -func grpcForwardableError(userID string, code int, e error) error { - return httpgrpc.ErrorFromHTTPResponse(&httpgrpc.HTTPResponse{ - Code: int32(code), - Body: []byte(wrapWithUser(e, userID).Error()), - }) -} - -// wrapWithUser prepends the user to the error. It does not retain a reference to err. -func wrapWithUser(err error, userID string) error { - return fmt.Errorf("user=%s: %s", userID, err) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go deleted file mode 100644 index b79e00809..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go +++ /dev/null @@ -1,430 +0,0 @@ -package ingester - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/go-kit/log/level" - ot "github.com/opentracing/opentracing-go" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "golang.org/x/time/rate" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/log" -) - -const ( - // Backoff for retrying 'immediate' flushes. Only counts for queue - // position, not wallclock time. - flushBackoff = 1 * time.Second - // Lower bound on flushes per check period for rate-limiter - minFlushes = 100 -) - -// Flush triggers a flush of all the chunks and closes the flush queues. -// Called from the Lifecycler as part of the ingester shutdown. -func (i *Ingester) Flush() { - if i.cfg.BlocksStorageEnabled { - i.v2LifecyclerFlush() - return - } - - level.Info(i.logger).Log("msg", "starting to flush all the chunks") - i.sweepUsers(true) - level.Info(i.logger).Log("msg", "chunks queued for flushing") - - // Close the flush queues, to unblock waiting workers. - for _, flushQueue := range i.flushQueues { - flushQueue.Close() - } - - i.flushQueuesDone.Wait() - level.Info(i.logger).Log("msg", "flushing of chunks complete") -} - -// FlushHandler triggers a flush of all in memory chunks. Mainly used for -// local testing. -func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) { - if i.cfg.BlocksStorageEnabled { - i.v2FlushHandler(w, r) - return - } - - level.Info(i.logger).Log("msg", "starting to flush all the chunks") - i.sweepUsers(true) - level.Info(i.logger).Log("msg", "chunks queued for flushing") - w.WriteHeader(http.StatusNoContent) -} - -type flushOp struct { - from model.Time - userID string - fp model.Fingerprint - immediate bool -} - -func (o *flushOp) Key() string { - return fmt.Sprintf("%s-%d-%v", o.userID, o.fp, o.immediate) -} - -func (o *flushOp) Priority() int64 { - return -int64(o.from) -} - -// sweepUsers periodically schedules series for flushing and garbage collects users with no series -func (i *Ingester) sweepUsers(immediate bool) { - if i.chunkStore == nil { - return - } - - oldest := model.Time(0) - - for id, state := range i.userStates.cp() { - for pair := range state.fpToSeries.iter() { - state.fpLocker.Lock(pair.fp) - i.sweepSeries(id, pair.fp, pair.series, immediate) - i.removeFlushedChunks(state, pair.fp, pair.series) - first := pair.series.firstUnflushedChunkTime() - state.fpLocker.Unlock(pair.fp) - - if first > 0 && (oldest == 0 || first < oldest) { - oldest = first - } - } - } - - i.metrics.oldestUnflushedChunkTimestamp.Set(float64(oldest.Unix())) - i.setFlushRate() -} - -// Compute a rate such to spread calls to the store over nearly all of the flush period, -// for example if we have 600 items in the queue and period 1 min we will send 10.5 per second. -// Note if the store can't keep up with this rate then it doesn't make any difference. -func (i *Ingester) setFlushRate() { - totalQueueLength := 0 - for _, q := range i.flushQueues { - totalQueueLength += q.Length() - } - const fudge = 1.05 // aim to finish a little bit before the end of the period - flushesPerSecond := float64(totalQueueLength) / i.cfg.FlushCheckPeriod.Seconds() * fudge - // Avoid going very slowly with tiny queues - if flushesPerSecond*i.cfg.FlushCheckPeriod.Seconds() < minFlushes { - flushesPerSecond = minFlushes / i.cfg.FlushCheckPeriod.Seconds() - } - level.Debug(i.logger).Log("msg", "computed flush rate", "rate", flushesPerSecond) - i.flushRateLimiter.SetLimit(rate.Limit(flushesPerSecond)) -} - -type flushReason int8 - -const ( - noFlush = iota - reasonImmediate - reasonMultipleChunksInSeries - reasonAged - reasonIdle - reasonStale - reasonSpreadFlush - // Following are flush outcomes - noUser - noSeries - noChunks - flushError - reasonDropped - maxFlushReason // Used for testing String() method. Should be last. -) - -func (f flushReason) String() string { - switch f { - case noFlush: - return "NoFlush" - case reasonImmediate: - return "Immediate" - case reasonMultipleChunksInSeries: - return "MultipleChunksInSeries" - case reasonAged: - return "Aged" - case reasonIdle: - return "Idle" - case reasonStale: - return "Stale" - case reasonSpreadFlush: - return "Spread" - case noUser: - return "NoUser" - case noSeries: - return "NoSeries" - case noChunks: - return "NoChunksToFlush" - case flushError: - return "FlushError" - case reasonDropped: - return "Dropped" - default: - panic("unrecognised flushReason") - } -} - -// sweepSeries schedules a series for flushing based on a set of criteria -// -// NB we don't close the head chunk here, as the series could wait in the queue -// for some time, and we want to encourage chunks to be as full as possible. -func (i *Ingester) sweepSeries(userID string, fp model.Fingerprint, series *memorySeries, immediate bool) { - if len(series.chunkDescs) <= 0 { - return - } - - firstTime := series.firstTime() - flush := i.shouldFlushSeries(series, fp, immediate) - if flush == noFlush { - return - } - - flushQueueIndex := int(uint64(fp) % uint64(i.cfg.ConcurrentFlushes)) - if i.flushQueues[flushQueueIndex].Enqueue(&flushOp{firstTime, userID, fp, immediate}) { - i.metrics.seriesEnqueuedForFlush.WithLabelValues(flush.String()).Inc() - util.Event().Log("msg", "add to flush queue", "userID", userID, "reason", flush, "firstTime", firstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex) - } -} - -func (i *Ingester) shouldFlushSeries(series *memorySeries, fp model.Fingerprint, immediate bool) flushReason { - if len(series.chunkDescs) == 0 { - return noFlush - } - if immediate { - for _, cd := range series.chunkDescs { - if !cd.flushed { - return reasonImmediate - } - } - return noFlush // Everything is flushed. - } - - // Flush if we have more than one chunk, and haven't already flushed the first chunk - if len(series.chunkDescs) > 1 && !series.chunkDescs[0].flushed { - if series.chunkDescs[0].flushReason != noFlush { - return series.chunkDescs[0].flushReason - } - return reasonMultipleChunksInSeries - } - // Otherwise look in more detail at the first chunk - return i.shouldFlushChunk(series.chunkDescs[0], fp, series.isStale()) -} - -func (i *Ingester) shouldFlushChunk(c *desc, fp model.Fingerprint, lastValueIsStale bool) flushReason { - if c.flushed { // don't flush chunks we've already flushed - return noFlush - } - - // Adjust max age slightly to spread flushes out over time - var jitter time.Duration - if i.cfg.ChunkAgeJitter != 0 { - jitter = time.Duration(fp) % i.cfg.ChunkAgeJitter - } - // Chunks should be flushed if they span longer than MaxChunkAge - if c.LastTime.Sub(c.FirstTime) > (i.cfg.MaxChunkAge - jitter) { - return reasonAged - } - - // Chunk should be flushed if their last update is older then MaxChunkIdle. - if model.Now().Sub(c.LastUpdate) > i.cfg.MaxChunkIdle { - return reasonIdle - } - - // A chunk that has a stale marker can be flushed if possible. - if i.cfg.MaxStaleChunkIdle > 0 && - lastValueIsStale && - model.Now().Sub(c.LastUpdate) > i.cfg.MaxStaleChunkIdle { - return reasonStale - } - - return noFlush -} - -func (i *Ingester) flushLoop(j int) { - defer func() { - level.Debug(i.logger).Log("msg", "Ingester.flushLoop() exited") - i.flushQueuesDone.Done() - }() - - for { - o := i.flushQueues[j].Dequeue() - if o == nil { - return - } - op := o.(*flushOp) - - if !op.immediate { - _ = i.flushRateLimiter.Wait(context.Background()) - } - outcome, err := i.flushUserSeries(j, op.userID, op.fp, op.immediate) - i.metrics.seriesDequeuedOutcome.WithLabelValues(outcome.String()).Inc() - if err != nil { - level.Error(log.WithUserID(op.userID, i.logger)).Log("msg", "failed to flush user", "err", err) - } - - // If we're exiting & we failed to flush, put the failed operation - // back in the queue at a later point. - if op.immediate && err != nil { - op.from = op.from.Add(flushBackoff) - i.flushQueues[j].Enqueue(op) - } - } -} - -// Returns flush outcome (either original reason, if series was flushed, noFlush if it doesn't need flushing anymore, or one of the errors) -func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.Fingerprint, immediate bool) (flushReason, error) { - i.metrics.flushSeriesInProgress.Inc() - defer i.metrics.flushSeriesInProgress.Dec() - - if i.preFlushUserSeries != nil { - i.preFlushUserSeries() - } - - userState, ok := i.userStates.get(userID) - if !ok { - return noUser, nil - } - - series, ok := userState.fpToSeries.get(fp) - if !ok { - return noSeries, nil - } - - userState.fpLocker.Lock(fp) - reason := i.shouldFlushSeries(series, fp, immediate) - if reason == noFlush { - userState.fpLocker.Unlock(fp) - return noFlush, nil - } - - // shouldFlushSeries() has told us we have at least one chunk. - // Make a copy of chunks descriptors slice, to avoid possible issues related to removing (and niling) elements from chunkDesc. - // This can happen if first chunk is already flushed -- removeFlushedChunks may set such chunk to nil. - // Since elements in the slice are pointers, we can still safely update chunk descriptors after the copy. - chunks := append([]*desc(nil), series.chunkDescs...) - if immediate { - series.closeHead(reasonImmediate) - } else if chunkReason := i.shouldFlushChunk(series.head(), fp, series.isStale()); chunkReason != noFlush { - series.closeHead(chunkReason) - } else { - // The head chunk doesn't need flushing; step back by one. - chunks = chunks[:len(chunks)-1] - } - - if (reason == reasonIdle || reason == reasonStale) && series.headChunkClosed { - if minChunkLength := i.limits.MinChunkLength(userID); minChunkLength > 0 { - chunkLength := 0 - for _, c := range chunks { - chunkLength += c.C.Len() - } - if chunkLength < minChunkLength { - userState.removeSeries(fp, series.metric) - i.metrics.memoryChunks.Sub(float64(len(chunks))) - i.metrics.droppedChunks.Add(float64(len(chunks))) - util.Event().Log( - "msg", "dropped chunks", - "userID", userID, - "numChunks", len(chunks), - "chunkLength", chunkLength, - "fp", fp, - "series", series.metric, - "queue", flushQueueIndex, - ) - chunks = nil - reason = reasonDropped - } - } - } - - userState.fpLocker.Unlock(fp) - - if reason == reasonDropped { - return reason, nil - } - - // No need to flush these chunks again. - for len(chunks) > 0 && chunks[0].flushed { - chunks = chunks[1:] - } - - if len(chunks) == 0 { - return noChunks, nil - } - - // flush the chunks without locking the series, as we don't want to hold the series lock for the duration of the dynamo/s3 rpcs. - ctx, cancel := context.WithTimeout(context.Background(), i.cfg.FlushOpTimeout) - defer cancel() // releases resources if slowOperation completes before timeout elapses - - sp, ctx := ot.StartSpanFromContext(ctx, "flushUserSeries") - defer sp.Finish() - sp.SetTag("organization", userID) - - util.Event().Log("msg", "flush chunks", "userID", userID, "reason", reason, "numChunks", len(chunks), "firstTime", chunks[0].FirstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex) - err := i.flushChunks(ctx, userID, fp, series.metric, chunks) - if err != nil { - return flushError, err - } - - userState.fpLocker.Lock(fp) - for i := 0; i < len(chunks); i++ { - // Mark the chunks as flushed, so we can remove them after the retention period. - // We can safely use chunks[i] here, because elements are pointers to chunk descriptors. - chunks[i].flushed = true - chunks[i].LastUpdate = model.Now() - } - userState.fpLocker.Unlock(fp) - return reason, err -} - -// must be called under fpLocker lock -func (i *Ingester) removeFlushedChunks(userState *userState, fp model.Fingerprint, series *memorySeries) { - now := model.Now() - for len(series.chunkDescs) > 0 { - if series.chunkDescs[0].flushed && now.Sub(series.chunkDescs[0].LastUpdate) > i.cfg.RetainPeriod { - series.chunkDescs[0] = nil // erase reference so the chunk can be garbage-collected - series.chunkDescs = series.chunkDescs[1:] - i.metrics.memoryChunks.Dec() - } else { - break - } - } - if len(series.chunkDescs) == 0 { - userState.removeSeries(fp, series.metric) - } -} - -func (i *Ingester) flushChunks(ctx context.Context, userID string, fp model.Fingerprint, metric labels.Labels, chunkDescs []*desc) error { - if i.preFlushChunks != nil { - i.preFlushChunks() - } - - wireChunks := make([]chunk.Chunk, 0, len(chunkDescs)) - for _, chunkDesc := range chunkDescs { - c := chunk.NewChunk(userID, fp, metric, chunkDesc.C, chunkDesc.FirstTime, chunkDesc.LastTime) - if err := c.Encode(); err != nil { - return err - } - wireChunks = append(wireChunks, c) - } - - if err := i.chunkStore.Put(ctx, wireChunks); err != nil { - return err - } - - // Record statistics only when actual put request did not return error. - for _, chunkDesc := range chunkDescs { - utilization, length, size := chunkDesc.C.Utilization(), chunkDesc.C.Len(), chunkDesc.C.Size() - util.Event().Log("msg", "chunk flushed", "userID", userID, "fp", fp, "series", metric, "nlabels", len(metric), "utilization", utilization, "length", length, "size", size, "firstTime", chunkDesc.FirstTime, "lastTime", chunkDesc.LastTime) - i.metrics.chunkUtilization.Observe(utilization) - i.metrics.chunkLength.Observe(float64(length)) - i.metrics.chunkSize.Observe(float64(size)) - i.metrics.chunkAge.Observe(model.Now().Sub(chunkDesc.FirstTime).Seconds()) - } - - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go deleted file mode 100644 index 09d9d84ee..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go +++ /dev/null @@ -1,324 +0,0 @@ -package index - -import ( - "sort" - "sync" - "unsafe" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util" -) - -const indexShards = 32 - -// InvertedIndex implements a in-memory inverter index from label pairs to fingerprints. -// It is sharded to reduce lock contention on writes. -type InvertedIndex struct { - shards []indexShard -} - -// New returns a new InvertedIndex. -func New() *InvertedIndex { - shards := make([]indexShard, indexShards) - for i := 0; i < indexShards; i++ { - shards[i].idx = map[string]indexEntry{} - } - return &InvertedIndex{ - shards: shards, - } -} - -// Add a fingerprint under the specified labels. -// NOTE: memory for `labels` is unsafe; anything retained beyond the -// life of this function must be copied -func (ii *InvertedIndex) Add(labels []cortexpb.LabelAdapter, fp model.Fingerprint) labels.Labels { - shard := &ii.shards[util.HashFP(fp)%indexShards] - return shard.add(labels, fp) // add() returns 'interned' values so the original labels are not retained -} - -// Lookup all fingerprints for the provided matchers. -func (ii *InvertedIndex) Lookup(matchers []*labels.Matcher) []model.Fingerprint { - if len(matchers) == 0 { - return nil - } - - result := []model.Fingerprint{} - for i := range ii.shards { - fps := ii.shards[i].lookup(matchers) - result = append(result, fps...) - } - - return result -} - -// LabelNames returns all label names. -func (ii *InvertedIndex) LabelNames() []string { - results := make([][]string, 0, indexShards) - - for i := range ii.shards { - shardResult := ii.shards[i].labelNames() - results = append(results, shardResult) - } - - return mergeStringSlices(results) -} - -// LabelValues returns the values for the given label. -func (ii *InvertedIndex) LabelValues(name string) []string { - results := make([][]string, 0, indexShards) - - for i := range ii.shards { - shardResult := ii.shards[i].labelValues(name) - results = append(results, shardResult) - } - - return mergeStringSlices(results) -} - -// Delete a fingerprint with the given label pairs. -func (ii *InvertedIndex) Delete(labels labels.Labels, fp model.Fingerprint) { - shard := &ii.shards[util.HashFP(fp)%indexShards] - shard.delete(labels, fp) -} - -// NB slice entries are sorted in fp order. -type indexEntry struct { - name string - fps map[string]indexValueEntry -} - -type indexValueEntry struct { - value string - fps []model.Fingerprint -} - -type unlockIndex map[string]indexEntry - -// This is the prevalent value for Intel and AMD CPUs as-at 2018. -const cacheLineSize = 64 - -type indexShard struct { - mtx sync.RWMutex - idx unlockIndex - //nolint:structcheck,unused - pad [cacheLineSize - unsafe.Sizeof(sync.Mutex{}) - unsafe.Sizeof(unlockIndex{})]byte -} - -func copyString(s string) string { - return string([]byte(s)) -} - -// add metric to the index; return all the name/value pairs as a fresh -// sorted slice, referencing 'interned' strings from the index so that -// no references are retained to the memory of `metric`. -func (shard *indexShard) add(metric []cortexpb.LabelAdapter, fp model.Fingerprint) labels.Labels { - shard.mtx.Lock() - defer shard.mtx.Unlock() - - internedLabels := make(labels.Labels, len(metric)) - - for i, pair := range metric { - values, ok := shard.idx[pair.Name] - if !ok { - values = indexEntry{ - name: copyString(pair.Name), - fps: map[string]indexValueEntry{}, - } - shard.idx[values.name] = values - } - fingerprints, ok := values.fps[pair.Value] - if !ok { - fingerprints = indexValueEntry{ - value: copyString(pair.Value), - } - } - // Insert into the right position to keep fingerprints sorted - j := sort.Search(len(fingerprints.fps), func(i int) bool { - return fingerprints.fps[i] >= fp - }) - fingerprints.fps = append(fingerprints.fps, 0) - copy(fingerprints.fps[j+1:], fingerprints.fps[j:]) - fingerprints.fps[j] = fp - values.fps[fingerprints.value] = fingerprints - internedLabels[i] = labels.Label{Name: values.name, Value: fingerprints.value} - } - sort.Sort(internedLabels) - return internedLabels -} - -func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint { - // index slice values must only be accessed under lock, so all - // code paths must take a copy before returning - shard.mtx.RLock() - defer shard.mtx.RUnlock() - - // per-shard intersection is initially nil, which is a special case - // meaning "everything" when passed to intersect() - // loop invariant: result is sorted - var result []model.Fingerprint - for _, matcher := range matchers { - values, ok := shard.idx[matcher.Name] - if !ok { - return nil - } - var toIntersect model.Fingerprints - if matcher.Type == labels.MatchEqual { - fps := values.fps[matcher.Value] - toIntersect = append(toIntersect, fps.fps...) // deliberate copy - } else if matcher.Type == labels.MatchRegexp && len(chunk.FindSetMatches(matcher.Value)) > 0 { - // The lookup is of the form `=~"a|b|c|d"` - set := chunk.FindSetMatches(matcher.Value) - for _, value := range set { - toIntersect = append(toIntersect, values.fps[value].fps...) - } - sort.Sort(toIntersect) - } else { - // accumulate the matching fingerprints (which are all distinct) - // then sort to maintain the invariant - for value, fps := range values.fps { - if matcher.Matches(value) { - toIntersect = append(toIntersect, fps.fps...) - } - } - sort.Sort(toIntersect) - } - result = intersect(result, toIntersect) - if len(result) == 0 { - return nil - } - } - - return result -} - -func (shard *indexShard) labelNames() []string { - shard.mtx.RLock() - defer shard.mtx.RUnlock() - - results := make([]string, 0, len(shard.idx)) - for name := range shard.idx { - results = append(results, name) - } - - sort.Strings(results) - return results -} - -func (shard *indexShard) labelValues(name string) []string { - shard.mtx.RLock() - defer shard.mtx.RUnlock() - - values, ok := shard.idx[name] - if !ok { - return nil - } - - results := make([]string, 0, len(values.fps)) - for val := range values.fps { - results = append(results, val) - } - - sort.Strings(results) - return results -} - -func (shard *indexShard) delete(labels labels.Labels, fp model.Fingerprint) { - shard.mtx.Lock() - defer shard.mtx.Unlock() - - for _, pair := range labels { - name, value := pair.Name, pair.Value - values, ok := shard.idx[name] - if !ok { - continue - } - fingerprints, ok := values.fps[value] - if !ok { - continue - } - - j := sort.Search(len(fingerprints.fps), func(i int) bool { - return fingerprints.fps[i] >= fp - }) - - // see if search didn't find fp which matches the condition which means we don't have to do anything. - if j >= len(fingerprints.fps) || fingerprints.fps[j] != fp { - continue - } - fingerprints.fps = fingerprints.fps[:j+copy(fingerprints.fps[j:], fingerprints.fps[j+1:])] - - if len(fingerprints.fps) == 0 { - delete(values.fps, value) - } else { - values.fps[value] = fingerprints - } - - if len(values.fps) == 0 { - delete(shard.idx, name) - } else { - shard.idx[name] = values - } - } -} - -// intersect two sorted lists of fingerprints. Assumes there are no duplicate -// fingerprints within the input lists. -func intersect(a, b []model.Fingerprint) []model.Fingerprint { - if a == nil { - return b - } - result := []model.Fingerprint{} - for i, j := 0, 0; i < len(a) && j < len(b); { - if a[i] == b[j] { - result = append(result, a[i]) - } - if a[i] < b[j] { - i++ - } else { - j++ - } - } - return result -} - -func mergeStringSlices(ss [][]string) []string { - switch len(ss) { - case 0: - return nil - case 1: - return ss[0] - case 2: - return mergeTwoStringSlices(ss[0], ss[1]) - default: - halfway := len(ss) / 2 - return mergeTwoStringSlices( - mergeStringSlices(ss[:halfway]), - mergeStringSlices(ss[halfway:]), - ) - } -} - -func mergeTwoStringSlices(a, b []string) []string { - result := make([]string, 0, len(a)+len(b)) - i, j := 0, 0 - for i < len(a) && j < len(b) { - if a[i] < b[j] { - result = append(result, a[i]) - i++ - } else if a[i] > b[j] { - result = append(result, b[j]) - j++ - } else { - result = append(result, a[i]) - i++ - j++ - } - } - result = append(result, a[i:]...) - result = append(result, b[j:]...) - return result -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go deleted file mode 100644 index b72c369f3..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go +++ /dev/null @@ -1,1188 +0,0 @@ -package ingester - -import ( - "context" - "flag" - "fmt" - "net/http" - "os" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/status" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/tsdb/chunks" - tsdb_record "github.com/prometheus/prometheus/tsdb/record" - "github.com/weaveworks/common/httpgrpc" - "go.uber.org/atomic" - "golang.org/x/time/rate" - "google.golang.org/grpc/codes" - - cortex_chunk "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - logutil "github.com/cortexproject/cortex/pkg/util/log" - util_math "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -const ( - // Number of timeseries to return in each batch of a QueryStream. - queryStreamBatchSize = 128 - metadataStreamBatchSize = 128 - - // Discarded Metadata metric labels. - perUserMetadataLimit = "per_user_metadata_limit" - perMetricMetadataLimit = "per_metric_metadata_limit" - - // Period at which to attempt purging metadata from memory. - metadataPurgePeriod = 5 * time.Minute -) - -var ( - // This is initialised if the WAL is enabled and the records are fetched from this pool. - recordPool sync.Pool - - errIngesterStopping = errors.New("ingester stopping") -) - -// Config for an Ingester. -type Config struct { - WALConfig WALConfig `yaml:"walconfig" doc:"description=Configures the Write-Ahead Log (WAL) for the Cortex chunks storage. This config is ignored when running the Cortex blocks storage."` - LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler"` - - // Config for transferring chunks. Zero or negative = no retries. - MaxTransferRetries int `yaml:"max_transfer_retries"` - - // Config for chunk flushing. - FlushCheckPeriod time.Duration `yaml:"flush_period"` - RetainPeriod time.Duration `yaml:"retain_period"` - MaxChunkIdle time.Duration `yaml:"max_chunk_idle_time"` - MaxStaleChunkIdle time.Duration `yaml:"max_stale_chunk_idle_time"` - FlushOpTimeout time.Duration `yaml:"flush_op_timeout"` - MaxChunkAge time.Duration `yaml:"max_chunk_age"` - ChunkAgeJitter time.Duration `yaml:"chunk_age_jitter"` - ConcurrentFlushes int `yaml:"concurrent_flushes"` - SpreadFlushes bool `yaml:"spread_flushes"` - - // Config for metadata purging. - MetadataRetainPeriod time.Duration `yaml:"metadata_retain_period"` - - RateUpdatePeriod time.Duration `yaml:"rate_update_period"` - - ActiveSeriesMetricsEnabled bool `yaml:"active_series_metrics_enabled"` - ActiveSeriesMetricsUpdatePeriod time.Duration `yaml:"active_series_metrics_update_period"` - ActiveSeriesMetricsIdleTimeout time.Duration `yaml:"active_series_metrics_idle_timeout"` - - // Use blocks storage. - BlocksStorageEnabled bool `yaml:"-"` - BlocksStorageConfig tsdb.BlocksStorageConfig `yaml:"-"` - StreamChunksWhenUsingBlocks bool `yaml:"-"` - // Runtime-override for type of streaming query to use (chunks or samples). - StreamTypeFn func() QueryStreamType `yaml:"-"` - - // Injected at runtime and read from the distributor config, required - // to accurately apply global limits. - DistributorShardingStrategy string `yaml:"-"` - DistributorShardByAllLabels bool `yaml:"-"` - - DefaultLimits InstanceLimits `yaml:"instance_limits"` - InstanceLimitsFn func() *InstanceLimits `yaml:"-"` - - IgnoreSeriesLimitForMetricNames string `yaml:"ignore_series_limit_for_metric_names"` - - // For testing, you can override the address and ID of this ingester. - ingesterClientFactory func(addr string, cfg client.Config) (client.HealthAndIngesterClient, error) -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.LifecyclerConfig.RegisterFlags(f) - cfg.WALConfig.RegisterFlags(f) - - f.IntVar(&cfg.MaxTransferRetries, "ingester.max-transfer-retries", 10, "Number of times to try and transfer chunks before falling back to flushing. Negative value or zero disables hand-over. This feature is supported only by the chunks storage.") - - f.DurationVar(&cfg.FlushCheckPeriod, "ingester.flush-period", 1*time.Minute, "Period with which to attempt to flush chunks.") - f.DurationVar(&cfg.RetainPeriod, "ingester.retain-period", 5*time.Minute, "Period chunks will remain in memory after flushing.") - f.DurationVar(&cfg.MaxChunkIdle, "ingester.max-chunk-idle", 5*time.Minute, "Maximum chunk idle time before flushing.") - f.DurationVar(&cfg.MaxStaleChunkIdle, "ingester.max-stale-chunk-idle", 2*time.Minute, "Maximum chunk idle time for chunks terminating in stale markers before flushing. 0 disables it and a stale series is not flushed until the max-chunk-idle timeout is reached.") - f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 1*time.Minute, "Timeout for individual flush operations.") - f.DurationVar(&cfg.MaxChunkAge, "ingester.max-chunk-age", 12*time.Hour, "Maximum chunk age before flushing.") - f.DurationVar(&cfg.ChunkAgeJitter, "ingester.chunk-age-jitter", 0, "Range of time to subtract from -ingester.max-chunk-age to spread out flushes") - f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushes", 50, "Number of concurrent goroutines flushing to dynamodb.") - f.BoolVar(&cfg.SpreadFlushes, "ingester.spread-flushes", true, "If true, spread series flushes across the whole period of -ingester.max-chunk-age.") - - f.DurationVar(&cfg.MetadataRetainPeriod, "ingester.metadata-retain-period", 10*time.Minute, "Period at which metadata we have not seen will remain in memory before being deleted.") - - f.DurationVar(&cfg.RateUpdatePeriod, "ingester.rate-update-period", 15*time.Second, "Period with which to update the per-user ingestion rates.") - f.BoolVar(&cfg.ActiveSeriesMetricsEnabled, "ingester.active-series-metrics-enabled", true, "Enable tracking of active series and export them as metrics.") - f.DurationVar(&cfg.ActiveSeriesMetricsUpdatePeriod, "ingester.active-series-metrics-update-period", 1*time.Minute, "How often to update active series metrics.") - f.DurationVar(&cfg.ActiveSeriesMetricsIdleTimeout, "ingester.active-series-metrics-idle-timeout", 10*time.Minute, "After what time a series is considered to be inactive.") - f.BoolVar(&cfg.StreamChunksWhenUsingBlocks, "ingester.stream-chunks-when-using-blocks", false, "Stream chunks when using blocks. This is experimental feature and not yet tested. Once ready, it will be made default and this config option removed.") - - f.Float64Var(&cfg.DefaultLimits.MaxIngestionRate, "ingester.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that ingester will accept. This limit is per-ingester, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. This limit only works when using blocks engine. 0 = unlimited.") - f.Int64Var(&cfg.DefaultLimits.MaxInMemoryTenants, "ingester.instance-limits.max-tenants", 0, "Max users that this ingester can hold. Requests from additional users will be rejected. This limit only works when using blocks engine. 0 = unlimited.") - f.Int64Var(&cfg.DefaultLimits.MaxInMemorySeries, "ingester.instance-limits.max-series", 0, "Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. This limit only works when using blocks engine. 0 = unlimited.") - f.Int64Var(&cfg.DefaultLimits.MaxInflightPushRequests, "ingester.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.") - - f.StringVar(&cfg.IgnoreSeriesLimitForMetricNames, "ingester.ignore-series-limit-for-metric-names", "", "Comma-separated list of metric names, for which -ingester.max-series-per-metric and -ingester.max-global-series-per-metric limits will be ignored. Does not affect max-series-per-user or max-global-series-per-metric limits.") -} - -func (cfg *Config) getIgnoreSeriesLimitForMetricNamesMap() map[string]struct{} { - if cfg.IgnoreSeriesLimitForMetricNames == "" { - return nil - } - - result := map[string]struct{}{} - - for _, s := range strings.Split(cfg.IgnoreSeriesLimitForMetricNames, ",") { - tr := strings.TrimSpace(s) - if tr != "" { - result[tr] = struct{}{} - } - } - - if len(result) == 0 { - return nil - } - - return result -} - -// Ingester deals with "in flight" chunks. Based on Prometheus 1.x -// MemorySeriesStorage. -type Ingester struct { - *services.BasicService - - cfg Config - clientConfig client.Config - - metrics *ingesterMetrics - logger log.Logger - - chunkStore ChunkStore - lifecycler *ring.Lifecycler - limits *validation.Overrides - limiter *Limiter - subservicesWatcher *services.FailureWatcher - - userStatesMtx sync.RWMutex // protects userStates and stopped - userStates *userStates - stopped bool // protected by userStatesMtx - - // For storing metadata ingested. - usersMetadataMtx sync.RWMutex - usersMetadata map[string]*userMetricsMetadata - - // One queue per flush thread. Fingerprint is used to - // pick a queue. - flushQueues []*util.PriorityQueue - flushQueuesDone sync.WaitGroup - - // Spread out calls to the chunk store over the flush period - flushRateLimiter *rate.Limiter - - // This should never be nil. - wal WAL - // To be passed to the WAL. - registerer prometheus.Registerer - - // Hooks for injecting behaviour from tests. - preFlushUserSeries func() - preFlushChunks func() - - // Prometheus block storage - TSDBState TSDBState - - // Rate of pushed samples. Only used by V2-ingester to limit global samples push rate. - ingestionRate *util_math.EwmaRate - inflightPushRequests atomic.Int64 -} - -// ChunkStore is the interface we need to store chunks -type ChunkStore interface { - Put(ctx context.Context, chunks []cortex_chunk.Chunk) error -} - -// New constructs a new Ingester. -func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, chunkStore ChunkStore, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { - defaultInstanceLimits = &cfg.DefaultLimits - - if cfg.ingesterClientFactory == nil { - cfg.ingesterClientFactory = client.MakeIngesterClient - } - - if cfg.BlocksStorageEnabled { - return NewV2(cfg, clientConfig, limits, registerer, logger) - } - - if cfg.WALConfig.WALEnabled { - // If WAL is enabled, we don't transfer out the data to any ingester. - // Either the next ingester which takes it's place should recover from WAL - // or the data has to be flushed during scaledown. - cfg.MaxTransferRetries = 0 - - // Transfers are disabled with WAL, hence no need to wait for transfers. - cfg.LifecyclerConfig.JoinAfter = 0 - - recordPool = sync.Pool{ - New: func() interface{} { - return &WALRecord{} - }, - } - } - - if cfg.WALConfig.WALEnabled || cfg.WALConfig.Recover { - if err := os.MkdirAll(cfg.WALConfig.Dir, os.ModePerm); err != nil { - return nil, err - } - } - - i := &Ingester{ - cfg: cfg, - clientConfig: clientConfig, - - limits: limits, - chunkStore: chunkStore, - flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), - flushRateLimiter: rate.NewLimiter(rate.Inf, 1), - usersMetadata: map[string]*userMetricsMetadata{}, - registerer: registerer, - logger: logger, - } - i.metrics = newIngesterMetrics(registerer, true, cfg.ActiveSeriesMetricsEnabled, i.getInstanceLimits, nil, &i.inflightPushRequests) - - var err error - // During WAL recovery, it will create new user states which requires the limiter. - // Hence initialise the limiter before creating the WAL. - // The '!cfg.WALConfig.WALEnabled' argument says don't flush on shutdown if the WAL is enabled. - i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", RingKey, !cfg.WALConfig.WALEnabled || cfg.WALConfig.FlushOnShutdown, logger, prometheus.WrapRegistererWithPrefix("cortex_", registerer)) - if err != nil { - return nil, err - } - - i.limiter = NewLimiter( - limits, - i.lifecycler, - cfg.DistributorShardingStrategy, - cfg.DistributorShardByAllLabels, - cfg.LifecyclerConfig.RingConfig.ReplicationFactor, - cfg.LifecyclerConfig.RingConfig.ZoneAwarenessEnabled) - - i.subservicesWatcher = services.NewFailureWatcher() - i.subservicesWatcher.WatchService(i.lifecycler) - - i.BasicService = services.NewBasicService(i.starting, i.loop, i.stopping) - return i, nil -} - -func (i *Ingester) starting(ctx context.Context) error { - if i.cfg.WALConfig.Recover { - level.Info(i.logger).Log("msg", "recovering from WAL") - start := time.Now() - if err := recoverFromWAL(i); err != nil { - level.Error(i.logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) - return errors.Wrap(err, "failed to recover from WAL") - } - elapsed := time.Since(start) - level.Info(i.logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) - i.metrics.walReplayDuration.Set(elapsed.Seconds()) - } - - // If the WAL recover happened, then the userStates would already be set. - if i.userStates == nil { - i.userStates = newUserStates(i.limiter, i.cfg, i.metrics, i.logger) - } - - var err error - i.wal, err = newWAL(i.cfg.WALConfig, i.userStates.cp, i.registerer, i.logger) - if err != nil { - return errors.Wrap(err, "starting WAL") - } - - // Now that user states have been created, we can start the lifecycler. - // Important: we want to keep lifecycler running until we ask it to stop, so we need to give it independent context - if err := i.lifecycler.StartAsync(context.Background()); err != nil { - return errors.Wrap(err, "failed to start lifecycler") - } - if err := i.lifecycler.AwaitRunning(ctx); err != nil { - return errors.Wrap(err, "failed to start lifecycler") - } - - i.startFlushLoops() - - return nil -} - -func (i *Ingester) startFlushLoops() { - i.flushQueuesDone.Add(i.cfg.ConcurrentFlushes) - for j := 0; j < i.cfg.ConcurrentFlushes; j++ { - i.flushQueues[j] = util.NewPriorityQueue(i.metrics.flushQueueLength) - go i.flushLoop(j) - } -} - -// NewForFlusher constructs a new Ingester to be used by flusher target. -// Compared to the 'New' method: -// * Always replays the WAL. -// * Does not start the lifecycler. -func NewForFlusher(cfg Config, chunkStore ChunkStore, limits *validation.Overrides, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { - if cfg.BlocksStorageEnabled { - return NewV2ForFlusher(cfg, limits, registerer, logger) - } - - i := &Ingester{ - cfg: cfg, - chunkStore: chunkStore, - flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), - flushRateLimiter: rate.NewLimiter(rate.Inf, 1), - wal: &noopWAL{}, - limits: limits, - logger: logger, - } - i.metrics = newIngesterMetrics(registerer, true, false, i.getInstanceLimits, nil, &i.inflightPushRequests) - - i.BasicService = services.NewBasicService(i.startingForFlusher, i.loopForFlusher, i.stopping) - return i, nil -} - -func (i *Ingester) startingForFlusher(ctx context.Context) error { - level.Info(i.logger).Log("msg", "recovering from WAL") - - // We recover from WAL always. - start := time.Now() - if err := recoverFromWAL(i); err != nil { - level.Error(i.logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) - return err - } - elapsed := time.Since(start) - - level.Info(i.logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) - i.metrics.walReplayDuration.Set(elapsed.Seconds()) - - i.startFlushLoops() - return nil -} - -func (i *Ingester) loopForFlusher(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return nil - - case err := <-i.subservicesWatcher.Chan(): - return errors.Wrap(err, "ingester subservice failed") - } - } -} - -func (i *Ingester) loop(ctx context.Context) error { - flushTicker := time.NewTicker(i.cfg.FlushCheckPeriod) - defer flushTicker.Stop() - - rateUpdateTicker := time.NewTicker(i.cfg.RateUpdatePeriod) - defer rateUpdateTicker.Stop() - - metadataPurgeTicker := time.NewTicker(metadataPurgePeriod) - defer metadataPurgeTicker.Stop() - - var activeSeriesTickerChan <-chan time.Time - if i.cfg.ActiveSeriesMetricsEnabled { - t := time.NewTicker(i.cfg.ActiveSeriesMetricsUpdatePeriod) - activeSeriesTickerChan = t.C - defer t.Stop() - } - - for { - select { - case <-metadataPurgeTicker.C: - i.purgeUserMetricsMetadata() - - case <-flushTicker.C: - i.sweepUsers(false) - - case <-rateUpdateTicker.C: - i.userStates.updateRates() - - case <-activeSeriesTickerChan: - i.userStates.purgeAndUpdateActiveSeries(time.Now().Add(-i.cfg.ActiveSeriesMetricsIdleTimeout)) - - case <-ctx.Done(): - return nil - - case err := <-i.subservicesWatcher.Chan(): - return errors.Wrap(err, "ingester subservice failed") - } - } -} - -// stopping is run when ingester is asked to stop -func (i *Ingester) stopping(_ error) error { - i.wal.Stop() - - // This will prevent us accepting any more samples - i.stopIncomingRequests() - - // Lifecycler can be nil if the ingester is for a flusher. - if i.lifecycler != nil { - // Next initiate our graceful exit from the ring. - return services.StopAndAwaitTerminated(context.Background(), i.lifecycler) - } - - return nil -} - -// ShutdownHandler triggers the following set of operations in order: -// * Change the state of ring to stop accepting writes. -// * Flush all the chunks. -func (i *Ingester) ShutdownHandler(w http.ResponseWriter, r *http.Request) { - originalFlush := i.lifecycler.FlushOnShutdown() - // We want to flush the chunks if transfer fails irrespective of original flag. - i.lifecycler.SetFlushOnShutdown(true) - - // In the case of an HTTP shutdown, we want to unregister no matter what. - originalUnregister := i.lifecycler.ShouldUnregisterOnShutdown() - i.lifecycler.SetUnregisterOnShutdown(true) - - _ = services.StopAndAwaitTerminated(context.Background(), i) - // Set state back to original. - i.lifecycler.SetFlushOnShutdown(originalFlush) - i.lifecycler.SetUnregisterOnShutdown(originalUnregister) - - w.WriteHeader(http.StatusNoContent) -} - -// stopIncomingRequests is called during the shutdown process. -func (i *Ingester) stopIncomingRequests() { - i.userStatesMtx.Lock() - defer i.userStatesMtx.Unlock() - i.stopped = true -} - -// check that ingester has finished starting, i.e. it is in Running or Stopping state. -// Why Stopping? Because ingester still runs, even when it is transferring data out in Stopping state. -// Ingester handles this state on its own (via `stopped` flag). -func (i *Ingester) checkRunningOrStopping() error { - s := i.State() - if s == services.Running || s == services.Stopping { - return nil - } - return status.Error(codes.Unavailable, s.String()) -} - -// Using block store, the ingester is only available when it is in a Running state. The ingester is not available -// when stopping to prevent any read or writes to the TSDB after the ingester has closed them. -func (i *Ingester) checkRunning() error { - s := i.State() - if s == services.Running { - return nil - } - return status.Error(codes.Unavailable, s.String()) -} - -// Push implements client.IngesterServer -func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - // We will report *this* request in the error too. - inflight := i.inflightPushRequests.Inc() - defer i.inflightPushRequests.Dec() - - gl := i.getInstanceLimits() - if gl != nil && gl.MaxInflightPushRequests > 0 { - if inflight > gl.MaxInflightPushRequests { - return nil, errTooManyInflightPushRequests - } - } - - if i.cfg.BlocksStorageEnabled { - return i.v2Push(ctx, req) - } - - // NOTE: because we use `unsafe` in deserialisation, we must not - // retain anything from `req` past the call to ReuseSlice - defer cortexpb.ReuseSlice(req.Timeseries) - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - // Given metadata is a best-effort approach, and we don't halt on errors - // process it before samples. Otherwise, we risk returning an error before ingestion. - i.pushMetadata(ctx, userID, req.GetMetadata()) - - var firstPartialErr *validationError - var record *WALRecord - if i.cfg.WALConfig.WALEnabled { - record = recordPool.Get().(*WALRecord) - record.UserID = userID - // Assuming there is not much churn in most cases, there is no use - // keeping the record.Labels slice hanging around. - record.Series = nil - if cap(record.Samples) < len(req.Timeseries) { - record.Samples = make([]tsdb_record.RefSample, 0, len(req.Timeseries)) - } else { - record.Samples = record.Samples[:0] - } - } - - for _, ts := range req.Timeseries { - seriesSamplesIngested := 0 - for _, s := range ts.Samples { - // append() copies the memory in `ts.Labels` except on the error path - err := i.append(ctx, userID, ts.Labels, model.Time(s.TimestampMs), model.SampleValue(s.Value), req.Source, record) - if err == nil { - seriesSamplesIngested++ - continue - } - - i.metrics.ingestedSamplesFail.Inc() - if ve, ok := err.(*validationError); ok { - if firstPartialErr == nil { - firstPartialErr = ve - } - continue - } - - // non-validation error: abandon this request - return nil, grpcForwardableError(userID, http.StatusInternalServerError, err) - } - - if i.cfg.ActiveSeriesMetricsEnabled && seriesSamplesIngested > 0 { - // updateActiveSeries will copy labels if necessary. - i.updateActiveSeries(userID, time.Now(), ts.Labels) - } - } - - if record != nil { - // Log the record only if there was no error in ingestion. - if err := i.wal.Log(record); err != nil { - return nil, err - } - recordPool.Put(record) - } - - if firstPartialErr != nil { - // grpcForwardableError turns the error into a string so it no longer references `req` - return &cortexpb.WriteResponse{}, grpcForwardableError(userID, firstPartialErr.code, firstPartialErr) - } - - return &cortexpb.WriteResponse{}, nil -} - -// NOTE: memory for `labels` is unsafe; anything retained beyond the -// life of this function must be copied -func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, timestamp model.Time, value model.SampleValue, source cortexpb.WriteRequest_SourceEnum, record *WALRecord) error { - labels.removeBlanks() - - var ( - state *userState - fp model.Fingerprint - ) - i.userStatesMtx.RLock() - defer func() { - i.userStatesMtx.RUnlock() - if state != nil { - state.fpLocker.Unlock(fp) - } - }() - if i.stopped { - return errIngesterStopping - } - - // getOrCreateSeries copies the memory for `labels`, except on the error path. - state, fp, series, err := i.userStates.getOrCreateSeries(ctx, userID, labels, record) - if err != nil { - if ve, ok := err.(*validationError); ok { - state.discardedSamples.WithLabelValues(ve.errorType).Inc() - } - - // Reset the state so that the defer will not try to unlock the fpLocker - // in case of error, because that lock has already been released on error. - state = nil - return err - } - - prevNumChunks := len(series.chunkDescs) - if i.cfg.SpreadFlushes && prevNumChunks > 0 { - // Map from the fingerprint hash to a point in the cycle of period MaxChunkAge - startOfCycle := timestamp.Add(-(timestamp.Sub(model.Time(0)) % i.cfg.MaxChunkAge)) - slot := startOfCycle.Add(time.Duration(uint64(fp) % uint64(i.cfg.MaxChunkAge))) - // If adding this sample means the head chunk will span that point in time, close so it will get flushed - if series.head().FirstTime < slot && timestamp >= slot { - series.closeHead(reasonSpreadFlush) - } - } - - if err := series.add(model.SamplePair{ - Value: value, - Timestamp: timestamp, - }); err != nil { - if ve, ok := err.(*validationError); ok { - state.discardedSamples.WithLabelValues(ve.errorType).Inc() - if ve.noReport { - return nil - } - } - return err - } - - if record != nil { - record.Samples = append(record.Samples, tsdb_record.RefSample{ - Ref: chunks.HeadSeriesRef(fp), - T: int64(timestamp), - V: float64(value), - }) - } - - i.metrics.memoryChunks.Add(float64(len(series.chunkDescs) - prevNumChunks)) - i.metrics.ingestedSamples.Inc() - switch source { - case cortexpb.RULE: - state.ingestedRuleSamples.Inc() - case cortexpb.API: - fallthrough - default: - state.ingestedAPISamples.Inc() - } - - return err -} - -// pushMetadata returns number of ingested metadata. -func (i *Ingester) pushMetadata(ctx context.Context, userID string, metadata []*cortexpb.MetricMetadata) int { - ingestedMetadata := 0 - failedMetadata := 0 - - var firstMetadataErr error - for _, metadata := range metadata { - err := i.appendMetadata(userID, metadata) - if err == nil { - ingestedMetadata++ - continue - } - - failedMetadata++ - if firstMetadataErr == nil { - firstMetadataErr = err - } - } - - i.metrics.ingestedMetadata.Add(float64(ingestedMetadata)) - i.metrics.ingestedMetadataFail.Add(float64(failedMetadata)) - - // If we have any error with regard to metadata we just log and no-op. - // We consider metadata a best effort approach, errors here should not stop processing. - if firstMetadataErr != nil { - logger := logutil.WithContext(ctx, i.logger) - level.Warn(logger).Log("msg", "failed to ingest some metadata", "err", firstMetadataErr) - } - - return ingestedMetadata -} - -func (i *Ingester) appendMetadata(userID string, m *cortexpb.MetricMetadata) error { - i.userStatesMtx.RLock() - if i.stopped { - i.userStatesMtx.RUnlock() - return errIngesterStopping - } - i.userStatesMtx.RUnlock() - - userMetadata := i.getOrCreateUserMetadata(userID) - - return userMetadata.add(m.GetMetricFamilyName(), m) -} - -func (i *Ingester) getOrCreateUserMetadata(userID string) *userMetricsMetadata { - userMetadata := i.getUserMetadata(userID) - if userMetadata != nil { - return userMetadata - } - - i.usersMetadataMtx.Lock() - defer i.usersMetadataMtx.Unlock() - - // Ensure it was not created between switching locks. - userMetadata, ok := i.usersMetadata[userID] - if !ok { - userMetadata = newMetadataMap(i.limiter, i.metrics, userID) - i.usersMetadata[userID] = userMetadata - } - return userMetadata -} - -func (i *Ingester) getUserMetadata(userID string) *userMetricsMetadata { - i.usersMetadataMtx.RLock() - defer i.usersMetadataMtx.RUnlock() - return i.usersMetadata[userID] -} - -func (i *Ingester) deleteUserMetadata(userID string) { - i.usersMetadataMtx.Lock() - um := i.usersMetadata[userID] - delete(i.usersMetadata, userID) - i.usersMetadataMtx.Unlock() - - if um != nil { - // We need call purge to update i.metrics.memMetadata correctly (it counts number of metrics with metadata in memory). - // Passing zero time means purge everything. - um.purge(time.Time{}) - } -} - -func (i *Ingester) getUsersWithMetadata() []string { - i.usersMetadataMtx.RLock() - defer i.usersMetadataMtx.RUnlock() - - userIDs := make([]string, 0, len(i.usersMetadata)) - for userID := range i.usersMetadata { - userIDs = append(userIDs, userID) - } - - return userIDs -} - -func (i *Ingester) purgeUserMetricsMetadata() { - deadline := time.Now().Add(-i.cfg.MetadataRetainPeriod) - - for _, userID := range i.getUsersWithMetadata() { - metadata := i.getUserMetadata(userID) - if metadata == nil { - continue - } - - // Remove all metadata that we no longer need to retain. - metadata.purge(deadline) - } -} - -// Query implements service.IngesterServer -func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client.QueryResponse, error) { - if i.cfg.BlocksStorageEnabled { - return i.v2Query(ctx, req) - } - - if err := i.checkRunningOrStopping(); err != nil { - return nil, err - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - from, through, matchers, err := client.FromQueryRequest(req) - if err != nil { - return nil, err - } - - i.metrics.queries.Inc() - - i.userStatesMtx.RLock() - state, ok, err := i.userStates.getViaContext(ctx) - i.userStatesMtx.RUnlock() - if err != nil { - return nil, err - } else if !ok { - return &client.QueryResponse{}, nil - } - - result := &client.QueryResponse{} - numSeries, numSamples := 0, 0 - maxSamplesPerQuery := i.limits.MaxSamplesPerQuery(userID) - err = state.forSeriesMatching(ctx, matchers, func(ctx context.Context, _ model.Fingerprint, series *memorySeries) error { - values, err := series.samplesForRange(from, through) - if err != nil { - return err - } - if len(values) == 0 { - return nil - } - numSeries++ - - numSamples += len(values) - if numSamples > maxSamplesPerQuery { - return httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "exceeded maximum number of samples in a query (%d)", maxSamplesPerQuery) - } - - ts := cortexpb.TimeSeries{ - Labels: cortexpb.FromLabelsToLabelAdapters(series.metric), - Samples: make([]cortexpb.Sample, 0, len(values)), - } - for _, s := range values { - ts.Samples = append(ts.Samples, cortexpb.Sample{ - Value: float64(s.Value), - TimestampMs: int64(s.Timestamp), - }) - } - result.Timeseries = append(result.Timeseries, ts) - return nil - }, nil, 0) - i.metrics.queriedSeries.Observe(float64(numSeries)) - i.metrics.queriedSamples.Observe(float64(numSamples)) - return result, err -} - -// QueryStream implements service.IngesterServer -func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_QueryStreamServer) error { - if i.cfg.BlocksStorageEnabled { - return i.v2QueryStream(req, stream) - } - - if err := i.checkRunningOrStopping(); err != nil { - return err - } - - spanLog, ctx := spanlogger.New(stream.Context(), "QueryStream") - defer spanLog.Finish() - - from, through, matchers, err := client.FromQueryRequest(req) - if err != nil { - return err - } - - i.metrics.queries.Inc() - - i.userStatesMtx.RLock() - state, ok, err := i.userStates.getViaContext(ctx) - i.userStatesMtx.RUnlock() - if err != nil { - return err - } else if !ok { - return nil - } - - numSeries, numChunks := 0, 0 - reuseWireChunks := [queryStreamBatchSize][]client.Chunk{} - batch := make([]client.TimeSeriesChunk, 0, queryStreamBatchSize) - // We'd really like to have series in label order, not FP order, so we - // can iteratively merge them with entries coming from the chunk store. But - // that would involve locking all the series & sorting, so until we have - // a better solution in the ingesters I'd rather take the hit in the queriers. - err = state.forSeriesMatching(stream.Context(), matchers, func(ctx context.Context, _ model.Fingerprint, series *memorySeries) error { - chunks := make([]*desc, 0, len(series.chunkDescs)) - for _, chunk := range series.chunkDescs { - if !(chunk.FirstTime.After(through) || chunk.LastTime.Before(from)) { - chunks = append(chunks, chunk.slice(from, through)) - } - } - - if len(chunks) == 0 { - return nil - } - - numSeries++ - reusePos := len(batch) - wireChunks, err := toWireChunks(chunks, reuseWireChunks[reusePos]) - if err != nil { - return err - } - reuseWireChunks[reusePos] = wireChunks - - numChunks += len(wireChunks) - batch = append(batch, client.TimeSeriesChunk{ - Labels: cortexpb.FromLabelsToLabelAdapters(series.metric), - Chunks: wireChunks, - }) - - return nil - }, func(ctx context.Context) error { - if len(batch) == 0 { - return nil - } - err = client.SendQueryStream(stream, &client.QueryStreamResponse{ - Chunkseries: batch, - }) - batch = batch[:0] - return err - }, queryStreamBatchSize) - if err != nil { - return err - } - - i.metrics.queriedSeries.Observe(float64(numSeries)) - i.metrics.queriedChunks.Observe(float64(numChunks)) - level.Debug(spanLog).Log("streams", numSeries) - level.Debug(spanLog).Log("chunks", numChunks) - return err -} - -// Query implements service.IngesterServer -func (i *Ingester) QueryExemplars(ctx context.Context, req *client.ExemplarQueryRequest) (*client.ExemplarQueryResponse, error) { - if !i.cfg.BlocksStorageEnabled { - return nil, errors.New("not supported") - } - - return i.v2QueryExemplars(ctx, req) -} - -// LabelValues returns all label values that are associated with a given label name. -func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesRequest) (*client.LabelValuesResponse, error) { - if i.cfg.BlocksStorageEnabled { - return i.v2LabelValues(ctx, req) - } - - if err := i.checkRunningOrStopping(); err != nil { - return nil, err - } - - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - state, ok, err := i.userStates.getViaContext(ctx) - if err != nil { - return nil, err - } else if !ok { - return &client.LabelValuesResponse{}, nil - } - - resp := &client.LabelValuesResponse{} - resp.LabelValues = append(resp.LabelValues, state.index.LabelValues(req.LabelName)...) - - return resp, nil -} - -func (i *Ingester) LabelValuesStream(req *client.LabelValuesRequest, stream client.Ingester_LabelValuesStreamServer) error { - if i.cfg.BlocksStorageEnabled { - return i.v2LabelValuesStream(req, stream) - } - - resp, err := i.LabelValues(stream.Context(), req) - if err != nil { - return err - } - - return client.SendAsBatchToStream(len(resp.LabelValues), metadataStreamBatchSize, func(i, j int) error { - resp := &client.LabelValuesStreamResponse{ - LabelValues: resp.LabelValues[i:j], - } - return client.SendLabelValuesStream(stream, resp) - }) -} - -// LabelNames return all the label names. -func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest) (*client.LabelNamesResponse, error) { - if i.cfg.BlocksStorageEnabled { - return i.v2LabelNames(ctx, req) - } - - if err := i.checkRunningOrStopping(); err != nil { - return nil, err - } - - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - state, ok, err := i.userStates.getViaContext(ctx) - if err != nil { - return nil, err - } else if !ok { - return &client.LabelNamesResponse{}, nil - } - - resp := &client.LabelNamesResponse{} - resp.LabelNames = append(resp.LabelNames, state.index.LabelNames()...) - - return resp, nil -} - -// LabelNames return all the label names. -func (i *Ingester) LabelNamesStream(req *client.LabelNamesRequest, stream client.Ingester_LabelNamesStreamServer) error { - if i.cfg.BlocksStorageEnabled { - return i.v2LabelNamesStream(req, stream) - } - - resp, err := i.LabelNames(stream.Context(), req) - if err != nil { - return err - } - - return client.SendAsBatchToStream(len(resp.LabelNames), metadataStreamBatchSize, func(i, j int) error { - resp := &client.LabelNamesStreamResponse{ - LabelNames: resp.LabelNames[i:j], - } - return client.SendLabelNamesStream(stream, resp) - }) -} - -// MetricsForLabelMatchers returns all the metrics which match a set of matchers. -func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.MetricsForLabelMatchersRequest) (*client.MetricsForLabelMatchersResponse, error) { - if i.cfg.BlocksStorageEnabled { - return i.v2MetricsForLabelMatchers(ctx, req) - } - - if err := i.checkRunningOrStopping(); err != nil { - return nil, err - } - - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - state, ok, err := i.userStates.getViaContext(ctx) - if err != nil { - return nil, err - } else if !ok { - return &client.MetricsForLabelMatchersResponse{}, nil - } - - // TODO Right now we ignore start and end. - _, _, matchersSet, err := client.FromMetricsForLabelMatchersRequest(req) - if err != nil { - return nil, err - } - - lss := map[model.Fingerprint]labels.Labels{} - for _, matchers := range matchersSet { - if err := state.forSeriesMatching(ctx, matchers, func(ctx context.Context, fp model.Fingerprint, series *memorySeries) error { - if _, ok := lss[fp]; !ok { - lss[fp] = series.metric - } - return nil - }, nil, 0); err != nil { - return nil, err - } - } - - result := &client.MetricsForLabelMatchersResponse{ - Metric: make([]*cortexpb.Metric, 0, len(lss)), - } - for _, ls := range lss { - result.Metric = append(result.Metric, &cortexpb.Metric{Labels: cortexpb.FromLabelsToLabelAdapters(ls)}) - } - - return result, nil -} - -func (i *Ingester) MetricsForLabelMatchersStream(req *client.MetricsForLabelMatchersRequest, stream client.Ingester_MetricsForLabelMatchersStreamServer) error { - if i.cfg.BlocksStorageEnabled { - return i.v2MetricsForLabelMatchersStream(req, stream) - } - - resp, err := i.MetricsForLabelMatchers(stream.Context(), req) - - if err != nil { - return err - } - - return client.SendAsBatchToStream(len(resp.Metric), metadataStreamBatchSize, func(i, j int) error { - resp := &client.MetricsForLabelMatchersStreamResponse{ - Metric: resp.Metric[i:j], - } - return client.SendMetricsForLabelMatchersStream(stream, resp) - }) -} - -// MetricsMetadata returns all the metric metadata of a user. -func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) (*client.MetricsMetadataResponse, error) { - i.userStatesMtx.RLock() - if err := i.checkRunningOrStopping(); err != nil { - i.userStatesMtx.RUnlock() - return nil, err - } - i.userStatesMtx.RUnlock() - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - userMetadata := i.getUserMetadata(userID) - - if userMetadata == nil { - return &client.MetricsMetadataResponse{}, nil - } - - return &client.MetricsMetadataResponse{Metadata: userMetadata.toClientMetadata()}, nil -} - -// UserStats returns ingestion statistics for the current user. -func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UserStatsResponse, error) { - if i.cfg.BlocksStorageEnabled { - return i.v2UserStats(ctx, req) - } - - if err := i.checkRunningOrStopping(); err != nil { - return nil, err - } - - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - state, ok, err := i.userStates.getViaContext(ctx) - if err != nil { - return nil, err - } else if !ok { - return &client.UserStatsResponse{}, nil - } - - apiRate := state.ingestedAPISamples.Rate() - ruleRate := state.ingestedRuleSamples.Rate() - return &client.UserStatsResponse{ - IngestionRate: apiRate + ruleRate, - ApiIngestionRate: apiRate, - RuleIngestionRate: ruleRate, - NumSeries: uint64(state.fpToSeries.length()), - }, nil -} - -// AllUserStats returns ingestion statistics for all users known to this ingester. -func (i *Ingester) AllUserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UsersStatsResponse, error) { - if i.cfg.BlocksStorageEnabled { - return i.v2AllUserStats(ctx, req) - } - - if err := i.checkRunningOrStopping(); err != nil { - return nil, err - } - - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - users := i.userStates.cp() - - response := &client.UsersStatsResponse{ - Stats: make([]*client.UserIDStatsResponse, 0, len(users)), - } - for userID, state := range users { - apiRate := state.ingestedAPISamples.Rate() - ruleRate := state.ingestedRuleSamples.Rate() - response.Stats = append(response.Stats, &client.UserIDStatsResponse{ - UserId: userID, - Data: &client.UserStatsResponse{ - IngestionRate: apiRate + ruleRate, - ApiIngestionRate: apiRate, - RuleIngestionRate: ruleRate, - NumSeries: uint64(state.fpToSeries.length()), - }, - }) - } - return response, nil -} - -// CheckReady is the readiness handler used to indicate to k8s when the ingesters -// are ready for the addition or removal of another ingester. -func (i *Ingester) CheckReady(ctx context.Context) error { - if err := i.checkRunningOrStopping(); err != nil { - return fmt.Errorf("ingester not ready: %v", err) - } - return i.lifecycler.CheckReady(ctx) -} - -// labels will be copied if needed. -func (i *Ingester) updateActiveSeries(userID string, now time.Time, labels []cortexpb.LabelAdapter) { - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - - i.userStates.updateActiveSeriesForUser(userID, now, cortexpb.FromLabelAdaptersToLabels(labels)) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go deleted file mode 100644 index 653126a64..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ /dev/null @@ -1,2364 +0,0 @@ -package ingester - -import ( - "context" - "fmt" - "io" - "math" - "net/http" - "os" - "path/filepath" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/shipper" - "github.com/weaveworks/common/httpgrpc" - "go.uber.org/atomic" - "golang.org/x/sync/errgroup" - - "github.com/cortexproject/cortex/pkg/chunk/encoding" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/concurrency" - "github.com/cortexproject/cortex/pkg/util/extract" - logutil "github.com/cortexproject/cortex/pkg/util/log" - util_math "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -const ( - // RingKey is the key under which we store the ingesters ring in the KVStore. - RingKey = "ring" -) - -const ( - errTSDBCreateIncompatibleState = "cannot create a new TSDB while the ingester is not in active state (current state: %s)" - errTSDBIngest = "err: %v. timestamp=%s, series=%s" // Using error.Wrap puts the message before the error and if the series is too long, its truncated. - errTSDBIngestExemplar = "err: %v. timestamp=%s, series=%s, exemplar=%s" - - // Jitter applied to the idle timeout to prevent compaction in all ingesters concurrently. - compactionIdleTimeoutJitter = 0.25 - - instanceIngestionRateTickInterval = time.Second -) - -var ( - errExemplarRef = errors.New("exemplars not ingested because series not already present") -) - -// Shipper interface is used to have an easy way to mock it in tests. -type Shipper interface { - Sync(ctx context.Context) (uploaded int, err error) -} - -type tsdbState int - -const ( - active tsdbState = iota // Pushes are allowed. - activeShipping // Pushes are allowed. Blocks shipping is in progress. - forceCompacting // TSDB is being force-compacted. - closing // Used while closing idle TSDB. - closed // Used to avoid setting closing back to active in closeAndDeleteIdleUsers method. -) - -// Describes result of TSDB-close check. String is used as metric label. -type tsdbCloseCheckResult string - -const ( - tsdbIdle tsdbCloseCheckResult = "idle" // Not reported via metrics. Metrics use tsdbIdleClosed on success. - tsdbShippingDisabled tsdbCloseCheckResult = "shipping_disabled" - tsdbNotIdle tsdbCloseCheckResult = "not_idle" - tsdbNotCompacted tsdbCloseCheckResult = "not_compacted" - tsdbNotShipped tsdbCloseCheckResult = "not_shipped" - tsdbCheckFailed tsdbCloseCheckResult = "check_failed" - tsdbCloseFailed tsdbCloseCheckResult = "close_failed" - tsdbNotActive tsdbCloseCheckResult = "not_active" - tsdbDataRemovalFailed tsdbCloseCheckResult = "data_removal_failed" - tsdbTenantMarkedForDeletion tsdbCloseCheckResult = "tenant_marked_for_deletion" - tsdbIdleClosed tsdbCloseCheckResult = "idle_closed" // Success. -) - -func (r tsdbCloseCheckResult) shouldClose() bool { - return r == tsdbIdle || r == tsdbTenantMarkedForDeletion -} - -// QueryStreamType defines type of function to use when doing query-stream operation. -type QueryStreamType int - -const ( - QueryStreamDefault QueryStreamType = iota // Use default configured value. - QueryStreamSamples // Stream individual samples. - QueryStreamChunks // Stream entire chunks. -) - -type userTSDB struct { - db *tsdb.DB - userID string - activeSeries *ActiveSeries - seriesInMetric *metricCounter - limiter *Limiter - - instanceSeriesCount *atomic.Int64 // Shared across all userTSDB instances created by ingester. - instanceLimitsFn func() *InstanceLimits - - stateMtx sync.RWMutex - state tsdbState - pushesInFlight sync.WaitGroup // Increased with stateMtx read lock held, only if state == active or activeShipping. - - // Used to detect idle TSDBs. - lastUpdate atomic.Int64 - - // Thanos shipper used to ship blocks to the storage. - shipper Shipper - - // When deletion marker is found for the tenant (checked before shipping), - // shipping stops and TSDB is closed before reaching idle timeout time (if enabled). - deletionMarkFound atomic.Bool - - // Unix timestamp of last deletion mark check. - lastDeletionMarkCheck atomic.Int64 - - // for statistics - ingestedAPISamples *util_math.EwmaRate - ingestedRuleSamples *util_math.EwmaRate - - // Cached shipped blocks. - shippedBlocksMtx sync.Mutex - shippedBlocks map[ulid.ULID]struct{} -} - -// Explicitly wrapping the tsdb.DB functions that we use. - -func (u *userTSDB) Appender(ctx context.Context) storage.Appender { - return u.db.Appender(ctx) -} - -func (u *userTSDB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return u.db.Querier(ctx, mint, maxt) -} - -func (u *userTSDB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - return u.db.ChunkQuerier(ctx, mint, maxt) -} - -func (u *userTSDB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { - return u.db.ExemplarQuerier(ctx) -} - -func (u *userTSDB) Head() *tsdb.Head { - return u.db.Head() -} - -func (u *userTSDB) Blocks() []*tsdb.Block { - return u.db.Blocks() -} - -func (u *userTSDB) Close() error { - return u.db.Close() -} - -func (u *userTSDB) Compact() error { - return u.db.Compact() -} - -func (u *userTSDB) StartTime() (int64, error) { - return u.db.StartTime() -} - -func (u *userTSDB) casState(from, to tsdbState) bool { - u.stateMtx.Lock() - defer u.stateMtx.Unlock() - - if u.state != from { - return false - } - u.state = to - return true -} - -// compactHead compacts the Head block at specified block durations avoiding a single huge block. -func (u *userTSDB) compactHead(blockDuration int64) error { - if !u.casState(active, forceCompacting) { - return errors.New("TSDB head cannot be compacted because it is not in active state (possibly being closed or blocks shipping in progress)") - } - - defer u.casState(forceCompacting, active) - - // Ingestion of samples in parallel with forced compaction can lead to overlapping blocks, - // and possible invalidation of the references returned from Appender.GetRef(). - // So we wait for existing in-flight requests to finish. Future push requests would fail until compaction is over. - u.pushesInFlight.Wait() - - h := u.Head() - - minTime, maxTime := h.MinTime(), h.MaxTime() - - for (minTime/blockDuration)*blockDuration != (maxTime/blockDuration)*blockDuration { - // Data in Head spans across multiple block ranges, so we break it into blocks here. - // Block max time is exclusive, so we do a -1 here. - blockMaxTime := ((minTime/blockDuration)+1)*blockDuration - 1 - if err := u.db.CompactHead(tsdb.NewRangeHead(h, minTime, blockMaxTime)); err != nil { - return err - } - - // Get current min/max times after compaction. - minTime, maxTime = h.MinTime(), h.MaxTime() - } - - return u.db.CompactHead(tsdb.NewRangeHead(h, minTime, maxTime)) -} - -// PreCreation implements SeriesLifecycleCallback interface. -func (u *userTSDB) PreCreation(metric labels.Labels) error { - if u.limiter == nil { - return nil - } - - // Verify ingester's global limit - gl := u.instanceLimitsFn() - if gl != nil && gl.MaxInMemorySeries > 0 { - if series := u.instanceSeriesCount.Load(); series >= gl.MaxInMemorySeries { - return errMaxSeriesLimitReached - } - } - - // Total series limit. - if err := u.limiter.AssertMaxSeriesPerUser(u.userID, int(u.Head().NumSeries())); err != nil { - return err - } - - // Series per metric name limit. - metricName, err := extract.MetricNameFromLabels(metric) - if err != nil { - return err - } - if err := u.seriesInMetric.canAddSeriesFor(u.userID, metricName); err != nil { - return err - } - - return nil -} - -// PostCreation implements SeriesLifecycleCallback interface. -func (u *userTSDB) PostCreation(metric labels.Labels) { - u.instanceSeriesCount.Inc() - - metricName, err := extract.MetricNameFromLabels(metric) - if err != nil { - // This should never happen because it has already been checked in PreCreation(). - return - } - u.seriesInMetric.increaseSeriesForMetric(metricName) -} - -// PostDeletion implements SeriesLifecycleCallback interface. -func (u *userTSDB) PostDeletion(metrics ...labels.Labels) { - u.instanceSeriesCount.Sub(int64(len(metrics))) - - for _, metric := range metrics { - metricName, err := extract.MetricNameFromLabels(metric) - if err != nil { - // This should never happen because it has already been checked in PreCreation(). - continue - } - u.seriesInMetric.decreaseSeriesForMetric(metricName) - } -} - -// blocksToDelete filters the input blocks and returns the blocks which are safe to be deleted from the ingester. -func (u *userTSDB) blocksToDelete(blocks []*tsdb.Block) map[ulid.ULID]struct{} { - if u.db == nil { - return nil - } - deletable := tsdb.DefaultBlocksToDelete(u.db)(blocks) - if u.shipper == nil { - return deletable - } - - shippedBlocks := u.getCachedShippedBlocks() - - result := map[ulid.ULID]struct{}{} - for shippedID := range shippedBlocks { - if _, ok := deletable[shippedID]; ok { - result[shippedID] = struct{}{} - } - } - return result -} - -// updateCachedShipperBlocks reads the shipper meta file and updates the cached shipped blocks. -func (u *userTSDB) updateCachedShippedBlocks() error { - shipperMeta, err := shipper.ReadMetaFile(u.db.Dir()) - if os.IsNotExist(err) { - // If the meta file doesn't exist it means the shipper hasn't run yet. - shipperMeta = &shipper.Meta{} - } else if err != nil { - return err - } - - // Build a map. - shippedBlocks := make(map[ulid.ULID]struct{}, len(shipperMeta.Uploaded)) - for _, blockID := range shipperMeta.Uploaded { - shippedBlocks[blockID] = struct{}{} - } - - // Cache it. - u.shippedBlocksMtx.Lock() - u.shippedBlocks = shippedBlocks - u.shippedBlocksMtx.Unlock() - - return nil -} - -// getCachedShippedBlocks returns the cached shipped blocks. -func (u *userTSDB) getCachedShippedBlocks() map[ulid.ULID]struct{} { - u.shippedBlocksMtx.Lock() - defer u.shippedBlocksMtx.Unlock() - - // It's safe to directly return the map because it's never updated in-place. - return u.shippedBlocks -} - -// getOldestUnshippedBlockTime returns the unix timestamp with milliseconds precision of the oldest -// TSDB block not shipped to the storage yet, or 0 if all blocks have been shipped. -func (u *userTSDB) getOldestUnshippedBlockTime() uint64 { - shippedBlocks := u.getCachedShippedBlocks() - oldestTs := uint64(0) - - for _, b := range u.Blocks() { - if _, ok := shippedBlocks[b.Meta().ULID]; ok { - continue - } - - if oldestTs == 0 || b.Meta().ULID.Time() < oldestTs { - oldestTs = b.Meta().ULID.Time() - } - } - - return oldestTs -} - -func (u *userTSDB) isIdle(now time.Time, idle time.Duration) bool { - lu := u.lastUpdate.Load() - - return time.Unix(lu, 0).Add(idle).Before(now) -} - -func (u *userTSDB) setLastUpdate(t time.Time) { - u.lastUpdate.Store(t.Unix()) -} - -// Checks if TSDB can be closed. -func (u *userTSDB) shouldCloseTSDB(idleTimeout time.Duration) tsdbCloseCheckResult { - if u.deletionMarkFound.Load() { - return tsdbTenantMarkedForDeletion - } - - if !u.isIdle(time.Now(), idleTimeout) { - return tsdbNotIdle - } - - // If head is not compacted, we cannot close this yet. - if u.Head().NumSeries() > 0 { - return tsdbNotCompacted - } - - // Ensure that all blocks have been shipped. - if oldest := u.getOldestUnshippedBlockTime(); oldest > 0 { - return tsdbNotShipped - } - - return tsdbIdle -} - -// TSDBState holds data structures used by the TSDB storage engine -type TSDBState struct { - dbs map[string]*userTSDB // tsdb sharded by userID - bucket objstore.Bucket - - // Value used by shipper as external label. - shipperIngesterID string - - subservices *services.Manager - - tsdbMetrics *tsdbMetrics - - forceCompactTrigger chan requestWithUsersAndCallback - shipTrigger chan requestWithUsersAndCallback - - // Timeout chosen for idle compactions. - compactionIdleTimeout time.Duration - - // Number of series in memory, across all tenants. - seriesCount atomic.Int64 - - // Head compactions metrics. - compactionsTriggered prometheus.Counter - compactionsFailed prometheus.Counter - walReplayTime prometheus.Histogram - appenderAddDuration prometheus.Histogram - appenderCommitDuration prometheus.Histogram - idleTsdbChecks *prometheus.CounterVec -} - -type requestWithUsersAndCallback struct { - users *util.AllowedTenants // if nil, all tenants are allowed. - callback chan<- struct{} // when compaction/shipping is finished, this channel is closed -} - -func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer) TSDBState { - idleTsdbChecks := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_idle_tsdb_checks_total", - Help: "The total number of various results for idle TSDB checks.", - }, []string{"result"}) - - idleTsdbChecks.WithLabelValues(string(tsdbShippingDisabled)) - idleTsdbChecks.WithLabelValues(string(tsdbNotIdle)) - idleTsdbChecks.WithLabelValues(string(tsdbNotCompacted)) - idleTsdbChecks.WithLabelValues(string(tsdbNotShipped)) - idleTsdbChecks.WithLabelValues(string(tsdbCheckFailed)) - idleTsdbChecks.WithLabelValues(string(tsdbCloseFailed)) - idleTsdbChecks.WithLabelValues(string(tsdbNotActive)) - idleTsdbChecks.WithLabelValues(string(tsdbDataRemovalFailed)) - idleTsdbChecks.WithLabelValues(string(tsdbTenantMarkedForDeletion)) - idleTsdbChecks.WithLabelValues(string(tsdbIdleClosed)) - - return TSDBState{ - dbs: make(map[string]*userTSDB), - bucket: bucketClient, - tsdbMetrics: newTSDBMetrics(registerer), - forceCompactTrigger: make(chan requestWithUsersAndCallback), - shipTrigger: make(chan requestWithUsersAndCallback), - - compactionsTriggered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_tsdb_compactions_triggered_total", - Help: "Total number of triggered compactions.", - }), - - compactionsFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_tsdb_compactions_failed_total", - Help: "Total number of compactions that failed.", - }), - walReplayTime: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_tsdb_wal_replay_duration_seconds", - Help: "The total time it takes to open and replay a TSDB WAL.", - Buckets: prometheus.DefBuckets, - }), - appenderAddDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_tsdb_appender_add_duration_seconds", - Help: "The total time it takes for a push request to add samples to the TSDB appender.", - Buckets: []float64{.001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, - }), - appenderCommitDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_tsdb_appender_commit_duration_seconds", - Help: "The total time it takes for a push request to commit samples appended to TSDB.", - Buckets: []float64{.001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, - }), - - idleTsdbChecks: idleTsdbChecks, - } -} - -// NewV2 returns a new Ingester that uses Cortex block storage instead of chunks storage. -func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { - bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", logger, registerer) - if err != nil { - return nil, errors.Wrap(err, "failed to create the bucket client") - } - - i := &Ingester{ - cfg: cfg, - clientConfig: clientConfig, - limits: limits, - chunkStore: nil, - usersMetadata: map[string]*userMetricsMetadata{}, - wal: &noopWAL{}, - TSDBState: newTSDBState(bucketClient, registerer), - logger: logger, - ingestionRate: util_math.NewEWMARate(0.2, instanceIngestionRateTickInterval), - } - i.metrics = newIngesterMetrics(registerer, false, cfg.ActiveSeriesMetricsEnabled, i.getInstanceLimits, i.ingestionRate, &i.inflightPushRequests) - - // Replace specific metrics which we can't directly track but we need to read - // them from the underlying system (ie. TSDB). - if registerer != nil { - registerer.Unregister(i.metrics.memSeries) - - promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_ingester_memory_series", - Help: "The current number of series in memory.", - }, i.getMemorySeriesMetric) - - promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_ingester_oldest_unshipped_block_timestamp_seconds", - Help: "Unix timestamp of the oldest TSDB block not shipped to the storage yet. 0 if ingester has no blocks or all blocks have been shipped.", - }, i.getOldestUnshippedBlockMetric) - } - - i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", RingKey, cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown, logger, prometheus.WrapRegistererWithPrefix("cortex_", registerer)) - if err != nil { - return nil, err - } - i.subservicesWatcher = services.NewFailureWatcher() - i.subservicesWatcher.WatchService(i.lifecycler) - - // Init the limter and instantiate the user states which depend on it - i.limiter = NewLimiter( - limits, - i.lifecycler, - cfg.DistributorShardingStrategy, - cfg.DistributorShardByAllLabels, - cfg.LifecyclerConfig.RingConfig.ReplicationFactor, - cfg.LifecyclerConfig.RingConfig.ZoneAwarenessEnabled) - - i.TSDBState.shipperIngesterID = i.lifecycler.ID - - // Apply positive jitter only to ensure that the minimum timeout is adhered to. - i.TSDBState.compactionIdleTimeout = util.DurationWithPositiveJitter(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout, compactionIdleTimeoutJitter) - level.Info(i.logger).Log("msg", "TSDB idle compaction timeout set", "timeout", i.TSDBState.compactionIdleTimeout) - - i.BasicService = services.NewBasicService(i.startingV2, i.updateLoop, i.stoppingV2) - return i, nil -} - -// NewV2ForFlusher is a special version of ingester used by Flusher. This ingester is not ingesting anything, its only purpose is to react -// on Flush method and flush all openened TSDBs when called. -func NewV2ForFlusher(cfg Config, limits *validation.Overrides, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { - bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", logger, registerer) - if err != nil { - return nil, errors.Wrap(err, "failed to create the bucket client") - } - - i := &Ingester{ - cfg: cfg, - limits: limits, - wal: &noopWAL{}, - TSDBState: newTSDBState(bucketClient, registerer), - logger: logger, - } - i.metrics = newIngesterMetrics(registerer, false, false, i.getInstanceLimits, nil, &i.inflightPushRequests) - - i.TSDBState.shipperIngesterID = "flusher" - - // This ingester will not start any subservices (lifecycler, compaction, shipping), - // and will only open TSDBs, wait for Flush to be called, and then close TSDBs again. - i.BasicService = services.NewIdleService(i.startingV2ForFlusher, i.stoppingV2ForFlusher) - return i, nil -} - -func (i *Ingester) startingV2ForFlusher(ctx context.Context) error { - if err := i.openExistingTSDB(ctx); err != nil { - // Try to rollback and close opened TSDBs before halting the ingester. - i.closeAllTSDB() - - return errors.Wrap(err, "opening existing TSDBs") - } - - // Don't start any sub-services (lifecycler, compaction, shipper) at all. - return nil -} - -func (i *Ingester) startingV2(ctx context.Context) error { - if err := i.openExistingTSDB(ctx); err != nil { - // Try to rollback and close opened TSDBs before halting the ingester. - i.closeAllTSDB() - - return errors.Wrap(err, "opening existing TSDBs") - } - - // Important: we want to keep lifecycler running until we ask it to stop, so we need to give it independent context - if err := i.lifecycler.StartAsync(context.Background()); err != nil { - return errors.Wrap(err, "failed to start lifecycler") - } - if err := i.lifecycler.AwaitRunning(ctx); err != nil { - return errors.Wrap(err, "failed to start lifecycler") - } - - // let's start the rest of subservices via manager - servs := []services.Service(nil) - - compactionService := services.NewBasicService(nil, i.compactionLoop, nil) - servs = append(servs, compactionService) - - if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { - shippingService := services.NewBasicService(nil, i.shipBlocksLoop, nil) - servs = append(servs, shippingService) - } - - if i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout > 0 { - interval := i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBInterval - if interval == 0 { - interval = cortex_tsdb.DefaultCloseIdleTSDBInterval - } - closeIdleService := services.NewTimerService(interval, nil, i.closeAndDeleteIdleUserTSDBs, nil) - servs = append(servs, closeIdleService) - } - - var err error - i.TSDBState.subservices, err = services.NewManager(servs...) - if err == nil { - err = services.StartManagerAndAwaitHealthy(ctx, i.TSDBState.subservices) - } - return errors.Wrap(err, "failed to start ingester components") -} - -func (i *Ingester) stoppingV2ForFlusher(_ error) error { - if !i.cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown { - i.closeAllTSDB() - } - return nil -} - -// runs when V2 ingester is stopping -func (i *Ingester) stoppingV2(_ error) error { - // It's important to wait until shipper is finished, - // because the blocks transfer should start only once it's guaranteed - // there's no shipping on-going. - - if err := services.StopManagerAndAwaitStopped(context.Background(), i.TSDBState.subservices); err != nil { - level.Warn(i.logger).Log("msg", "failed to stop ingester subservices", "err", err) - } - - // Next initiate our graceful exit from the ring. - if err := services.StopAndAwaitTerminated(context.Background(), i.lifecycler); err != nil { - level.Warn(i.logger).Log("msg", "failed to stop ingester lifecycler", "err", err) - } - - if !i.cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown { - i.closeAllTSDB() - } - return nil -} - -func (i *Ingester) updateLoop(ctx context.Context) error { - if limits := i.getInstanceLimits(); limits != nil && *limits != (InstanceLimits{}) { - // This check will not cover enabling instance limits in runtime, but it will do for now. - logutil.WarnExperimentalUse("ingester instance limits") - } - - rateUpdateTicker := time.NewTicker(i.cfg.RateUpdatePeriod) - defer rateUpdateTicker.Stop() - - ingestionRateTicker := time.NewTicker(instanceIngestionRateTickInterval) - defer ingestionRateTicker.Stop() - - var activeSeriesTickerChan <-chan time.Time - if i.cfg.ActiveSeriesMetricsEnabled { - t := time.NewTicker(i.cfg.ActiveSeriesMetricsUpdatePeriod) - activeSeriesTickerChan = t.C - defer t.Stop() - } - - // Similarly to the above, this is a hardcoded value. - metadataPurgeTicker := time.NewTicker(metadataPurgePeriod) - defer metadataPurgeTicker.Stop() - - for { - select { - case <-metadataPurgeTicker.C: - i.purgeUserMetricsMetadata() - case <-ingestionRateTicker.C: - i.ingestionRate.Tick() - case <-rateUpdateTicker.C: - i.userStatesMtx.RLock() - for _, db := range i.TSDBState.dbs { - db.ingestedAPISamples.Tick() - db.ingestedRuleSamples.Tick() - } - i.userStatesMtx.RUnlock() - - case <-activeSeriesTickerChan: - i.v2UpdateActiveSeries() - - case <-ctx.Done(): - return nil - case err := <-i.subservicesWatcher.Chan(): - return errors.Wrap(err, "ingester subservice failed") - } - } -} - -func (i *Ingester) v2UpdateActiveSeries() { - purgeTime := time.Now().Add(-i.cfg.ActiveSeriesMetricsIdleTimeout) - - for _, userID := range i.getTSDBUsers() { - userDB := i.getTSDB(userID) - if userDB == nil { - continue - } - - userDB.activeSeries.Purge(purgeTime) - i.metrics.activeSeriesPerUser.WithLabelValues(userID).Set(float64(userDB.activeSeries.Active())) - } -} - -// GetRef() is an extra method added to TSDB to let Cortex check before calling Add() -type extendedAppender interface { - storage.Appender - storage.GetRef -} - -// v2Push adds metrics to a block -func (i *Ingester) v2Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { - var firstPartialErr error - - // NOTE: because we use `unsafe` in deserialisation, we must not - // retain anything from `req` past the call to ReuseSlice - defer cortexpb.ReuseSlice(req.Timeseries) - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - il := i.getInstanceLimits() - if il != nil && il.MaxIngestionRate > 0 { - if rate := i.ingestionRate.Rate(); rate >= il.MaxIngestionRate { - return nil, errMaxSamplesPushRateLimitReached - } - } - - db, err := i.getOrCreateTSDB(userID, false) - if err != nil { - return nil, wrapWithUser(err, userID) - } - - // Ensure the ingester shutdown procedure hasn't started - i.userStatesMtx.RLock() - if i.stopped { - i.userStatesMtx.RUnlock() - return nil, errIngesterStopping - } - i.userStatesMtx.RUnlock() - - if err := db.acquireAppendLock(); err != nil { - return &cortexpb.WriteResponse{}, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(err, userID).Error()) - } - defer db.releaseAppendLock() - - // Given metadata is a best-effort approach, and we don't halt on errors - // process it before samples. Otherwise, we risk returning an error before ingestion. - ingestedMetadata := i.pushMetadata(ctx, userID, req.GetMetadata()) - - // Keep track of some stats which are tracked only if the samples will be - // successfully committed - var ( - succeededSamplesCount = 0 - failedSamplesCount = 0 - succeededExemplarsCount = 0 - failedExemplarsCount = 0 - startAppend = time.Now() - sampleOutOfBoundsCount = 0 - sampleOutOfOrderCount = 0 - newValueForTimestampCount = 0 - perUserSeriesLimitCount = 0 - perMetricSeriesLimitCount = 0 - - updateFirstPartial = func(errFn func() error) { - if firstPartialErr == nil { - firstPartialErr = errFn() - } - } - ) - - // Walk the samples, appending them to the users database - app := db.Appender(ctx).(extendedAppender) - for _, ts := range req.Timeseries { - // The labels must be sorted (in our case, it's guaranteed a write request - // has sorted labels once hit the ingester). - - // Look up a reference for this series. - ref, copiedLabels := app.GetRef(cortexpb.FromLabelAdaptersToLabels(ts.Labels)) - - // To find out if any sample was added to this series, we keep old value. - oldSucceededSamplesCount := succeededSamplesCount - - for _, s := range ts.Samples { - var err error - - // If the cached reference exists, we try to use it. - if ref != 0 { - if _, err = app.Append(ref, copiedLabels, s.TimestampMs, s.Value); err == nil { - succeededSamplesCount++ - continue - } - - } else { - // Copy the label set because both TSDB and the active series tracker may retain it. - copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(ts.Labels) - - // Retain the reference in case there are multiple samples for the series. - if ref, err = app.Append(0, copiedLabels, s.TimestampMs, s.Value); err == nil { - succeededSamplesCount++ - continue - } - } - - failedSamplesCount++ - - // Check if the error is a soft error we can proceed on. If so, we keep track - // of it, so that we can return it back to the distributor, which will return a - // 400 error to the client. The client (Prometheus) will not retry on 400, and - // we actually ingested all samples which haven't failed. - switch cause := errors.Cause(err); cause { - case storage.ErrOutOfBounds: - sampleOutOfBoundsCount++ - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) }) - continue - - case storage.ErrOutOfOrderSample: - sampleOutOfOrderCount++ - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) }) - continue - - case storage.ErrDuplicateSampleForTimestamp: - newValueForTimestampCount++ - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(s.TimestampMs), ts.Labels) }) - continue - - case errMaxSeriesPerUserLimitExceeded: - perUserSeriesLimitCount++ - updateFirstPartial(func() error { return makeLimitError(perUserSeriesLimit, i.limiter.FormatError(userID, cause)) }) - continue - - case errMaxSeriesPerMetricLimitExceeded: - perMetricSeriesLimitCount++ - updateFirstPartial(func() error { - return makeMetricLimitError(perMetricSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause)) - }) - continue - } - - // The error looks an issue on our side, so we should rollback - if rollbackErr := app.Rollback(); rollbackErr != nil { - level.Warn(i.logger).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr) - } - - return nil, wrapWithUser(err, userID) - } - - if i.cfg.ActiveSeriesMetricsEnabled && succeededSamplesCount > oldSucceededSamplesCount { - db.activeSeries.UpdateSeries(cortexpb.FromLabelAdaptersToLabels(ts.Labels), startAppend, func(l labels.Labels) labels.Labels { - // we must already have copied the labels if succeededSamplesCount has been incremented. - return copiedLabels - }) - } - - if i.cfg.BlocksStorageConfig.TSDB.MaxExemplars > 0 { - // app.AppendExemplar currently doesn't create the series, it must - // already exist. If it does not then drop. - if ref == 0 && len(ts.Exemplars) > 0 { - updateFirstPartial(func() error { - return wrappedTSDBIngestExemplarErr(errExemplarRef, - model.Time(ts.Exemplars[0].TimestampMs), ts.Labels, ts.Exemplars[0].Labels) - }) - failedExemplarsCount += len(ts.Exemplars) - } else { // Note that else is explicit, rather than a continue in the above if, in case of additional logic post exemplar processing. - for _, ex := range ts.Exemplars { - e := exemplar.Exemplar{ - Value: ex.Value, - Ts: ex.TimestampMs, - HasTs: true, - Labels: cortexpb.FromLabelAdaptersToLabelsWithCopy(ex.Labels), - } - - if _, err = app.AppendExemplar(ref, nil, e); err == nil { - succeededExemplarsCount++ - continue - } - - // Error adding exemplar - updateFirstPartial(func() error { - return wrappedTSDBIngestExemplarErr(err, model.Time(ex.TimestampMs), ts.Labels, ex.Labels) - }) - failedExemplarsCount++ - } - } - } - } - - // At this point all samples have been added to the appender, so we can track the time it took. - i.TSDBState.appenderAddDuration.Observe(time.Since(startAppend).Seconds()) - - startCommit := time.Now() - if err := app.Commit(); err != nil { - return nil, wrapWithUser(err, userID) - } - i.TSDBState.appenderCommitDuration.Observe(time.Since(startCommit).Seconds()) - - // If only invalid samples are pushed, don't change "last update", as TSDB was not modified. - if succeededSamplesCount > 0 { - db.setLastUpdate(time.Now()) - } - - // Increment metrics only if the samples have been successfully committed. - // If the code didn't reach this point, it means that we returned an error - // which will be converted into an HTTP 5xx and the client should/will retry. - i.metrics.ingestedSamples.Add(float64(succeededSamplesCount)) - i.metrics.ingestedSamplesFail.Add(float64(failedSamplesCount)) - i.metrics.ingestedExemplars.Add(float64(succeededExemplarsCount)) - i.metrics.ingestedExemplarsFail.Add(float64(failedExemplarsCount)) - - if sampleOutOfBoundsCount > 0 { - validation.DiscardedSamples.WithLabelValues(sampleOutOfBounds, userID).Add(float64(sampleOutOfBoundsCount)) - } - if sampleOutOfOrderCount > 0 { - validation.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Add(float64(sampleOutOfOrderCount)) - } - if newValueForTimestampCount > 0 { - validation.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Add(float64(newValueForTimestampCount)) - } - if perUserSeriesLimitCount > 0 { - validation.DiscardedSamples.WithLabelValues(perUserSeriesLimit, userID).Add(float64(perUserSeriesLimitCount)) - } - if perMetricSeriesLimitCount > 0 { - validation.DiscardedSamples.WithLabelValues(perMetricSeriesLimit, userID).Add(float64(perMetricSeriesLimitCount)) - } - - // Distributor counts both samples and metadata, so for consistency ingester does the same. - i.ingestionRate.Add(int64(succeededSamplesCount + ingestedMetadata)) - - switch req.Source { - case cortexpb.RULE: - db.ingestedRuleSamples.Add(int64(succeededSamplesCount)) - case cortexpb.API: - fallthrough - default: - db.ingestedAPISamples.Add(int64(succeededSamplesCount)) - } - - if firstPartialErr != nil { - code := http.StatusBadRequest - var ve *validationError - if errors.As(firstPartialErr, &ve) { - code = ve.code - } - return &cortexpb.WriteResponse{}, httpgrpc.Errorf(code, wrapWithUser(firstPartialErr, userID).Error()) - } - - return &cortexpb.WriteResponse{}, nil -} - -func (u *userTSDB) acquireAppendLock() error { - u.stateMtx.RLock() - defer u.stateMtx.RUnlock() - - switch u.state { - case active: - case activeShipping: - // Pushes are allowed. - case forceCompacting: - return errors.New("forced compaction in progress") - case closing: - return errors.New("TSDB is closing") - default: - return errors.New("TSDB is not active") - } - - u.pushesInFlight.Add(1) - return nil -} - -func (u *userTSDB) releaseAppendLock() { - u.pushesInFlight.Done() -} - -func (i *Ingester) v2Query(ctx context.Context, req *client.QueryRequest) (*client.QueryResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - from, through, matchers, err := client.FromQueryRequest(req) - if err != nil { - return nil, err - } - - i.metrics.queries.Inc() - - db := i.getTSDB(userID) - if db == nil { - return &client.QueryResponse{}, nil - } - - q, err := db.Querier(ctx, int64(from), int64(through)) - if err != nil { - return nil, err - } - defer q.Close() - - // It's not required to return sorted series because series are sorted by the Cortex querier. - ss := q.Select(false, nil, matchers...) - if ss.Err() != nil { - return nil, ss.Err() - } - - numSamples := 0 - - result := &client.QueryResponse{} - for ss.Next() { - series := ss.At() - - ts := cortexpb.TimeSeries{ - Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), - } - - it := series.Iterator() - for it.Next() { - t, v := it.At() - ts.Samples = append(ts.Samples, cortexpb.Sample{Value: v, TimestampMs: t}) - } - - numSamples += len(ts.Samples) - result.Timeseries = append(result.Timeseries, ts) - } - - i.metrics.queriedSeries.Observe(float64(len(result.Timeseries))) - i.metrics.queriedSamples.Observe(float64(numSamples)) - - return result, ss.Err() -} - -func (i *Ingester) v2QueryExemplars(ctx context.Context, req *client.ExemplarQueryRequest) (*client.ExemplarQueryResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - from, through, matchers, err := client.FromExemplarQueryRequest(req) - if err != nil { - return nil, err - } - - i.metrics.queries.Inc() - - db := i.getTSDB(userID) - if db == nil { - return &client.ExemplarQueryResponse{}, nil - } - - q, err := db.ExemplarQuerier(ctx) - if err != nil { - return nil, err - } - - // It's not required to sort series from a single ingester because series are sorted by the Exemplar Storage before returning from Select. - res, err := q.Select(from, through, matchers...) - if err != nil { - return nil, err - } - - numExemplars := 0 - - result := &client.ExemplarQueryResponse{} - for _, es := range res { - ts := cortexpb.TimeSeries{ - Labels: cortexpb.FromLabelsToLabelAdapters(es.SeriesLabels), - Exemplars: cortexpb.FromExemplarsToExemplarProtos(es.Exemplars), - } - - numExemplars += len(ts.Exemplars) - result.Timeseries = append(result.Timeseries, ts) - } - - i.metrics.queriedExemplars.Observe(float64(numExemplars)) - - return result, nil -} - -func (i *Ingester) v2LabelValues(ctx context.Context, req *client.LabelValuesRequest) (*client.LabelValuesResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - labelName, startTimestampMs, endTimestampMs, matchers, err := client.FromLabelValuesRequest(req) - if err != nil { - return nil, err - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - db := i.getTSDB(userID) - if db == nil { - return &client.LabelValuesResponse{}, nil - } - - mint, maxt, err := metadataQueryRange(startTimestampMs, endTimestampMs, db) - if err != nil { - return nil, err - } - - q, err := db.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - defer q.Close() - - vals, _, err := q.LabelValues(labelName, matchers...) - if err != nil { - return nil, err - } - - return &client.LabelValuesResponse{ - LabelValues: vals, - }, nil -} - -func (i *Ingester) v2LabelValuesStream(req *client.LabelValuesRequest, stream client.Ingester_LabelValuesStreamServer) error { - resp, err := i.v2LabelValues(stream.Context(), req) - - if err != nil { - return err - } - - for i := 0; i < len(resp.LabelValues); i += metadataStreamBatchSize { - j := i + metadataStreamBatchSize - if j > len(resp.LabelValues) { - j = len(resp.LabelValues) - } - resp := &client.LabelValuesStreamResponse{ - LabelValues: resp.LabelValues[i:j], - } - err := client.SendLabelValuesStream(stream, resp) - if err != nil { - return err - } - } - - return nil -} - -func (i *Ingester) v2LabelNames(ctx context.Context, req *client.LabelNamesRequest) (*client.LabelNamesResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - db := i.getTSDB(userID) - if db == nil { - return &client.LabelNamesResponse{}, nil - } - - mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db) - if err != nil { - return nil, err - } - - q, err := db.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - defer q.Close() - - names, _, err := q.LabelNames() - if err != nil { - return nil, err - } - - return &client.LabelNamesResponse{ - LabelNames: names, - }, nil -} - -func (i *Ingester) v2LabelNamesStream(req *client.LabelNamesRequest, stream client.Ingester_LabelNamesStreamServer) error { - resp, err := i.v2LabelNames(stream.Context(), req) - - if err != nil { - return err - } - - for i := 0; i < len(resp.LabelNames); i += metadataStreamBatchSize { - j := i + metadataStreamBatchSize - if j > len(resp.LabelNames) { - j = len(resp.LabelNames) - } - resp := &client.LabelNamesStreamResponse{ - LabelNames: resp.LabelNames[i:j], - } - err := client.SendLabelNamesStream(stream, resp) - if err != nil { - return err - } - } - - return nil -} - -func (i *Ingester) v2MetricsForLabelMatchers(ctx context.Context, req *client.MetricsForLabelMatchersRequest) (*client.MetricsForLabelMatchersResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - db := i.getTSDB(userID) - if db == nil { - return &client.MetricsForLabelMatchersResponse{}, nil - } - - // Parse the request - _, _, matchersSet, err := client.FromMetricsForLabelMatchersRequest(req) - if err != nil { - return nil, err - } - - mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db) - if err != nil { - return nil, err - } - - q, err := db.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - defer q.Close() - - // Run a query for each matchers set and collect all the results. - var sets []storage.SeriesSet - - for _, matchers := range matchersSet { - // Interrupt if the context has been canceled. - if ctx.Err() != nil { - return nil, ctx.Err() - } - - hints := &storage.SelectHints{ - Start: mint, - End: maxt, - Func: "series", // There is no series function, this token is used for lookups that don't need samples. - } - - seriesSet := q.Select(true, hints, matchers...) - sets = append(sets, seriesSet) - } - - // Generate the response merging all series sets. - result := &client.MetricsForLabelMatchersResponse{ - Metric: make([]*cortexpb.Metric, 0), - } - - mergedSet := storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) - for mergedSet.Next() { - // Interrupt if the context has been canceled. - if ctx.Err() != nil { - return nil, ctx.Err() - } - - result.Metric = append(result.Metric, &cortexpb.Metric{ - Labels: cortexpb.FromLabelsToLabelAdapters(mergedSet.At().Labels()), - }) - } - - return result, nil -} - -func (i *Ingester) v2MetricsForLabelMatchersStream(req *client.MetricsForLabelMatchersRequest, stream client.Ingester_MetricsForLabelMatchersStreamServer) error { - result, err := i.v2MetricsForLabelMatchers(stream.Context(), req) - if err != nil { - return err - } - - for i := 0; i < len(result.Metric); i += metadataStreamBatchSize { - j := i + metadataStreamBatchSize - if j > len(result.Metric) { - j = len(result.Metric) - } - resp := &client.MetricsForLabelMatchersStreamResponse{ - Metric: result.Metric[i:j], - } - err := client.SendMetricsForLabelMatchersStream(stream, resp) - if err != nil { - return err - } - } - - return nil -} - -func (i *Ingester) v2UserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UserStatsResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - db := i.getTSDB(userID) - if db == nil { - return &client.UserStatsResponse{}, nil - } - - return createUserStats(db), nil -} - -func (i *Ingester) v2AllUserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UsersStatsResponse, error) { - if err := i.checkRunning(); err != nil { - return nil, err - } - - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - - users := i.TSDBState.dbs - - response := &client.UsersStatsResponse{ - Stats: make([]*client.UserIDStatsResponse, 0, len(users)), - } - for userID, db := range users { - response.Stats = append(response.Stats, &client.UserIDStatsResponse{ - UserId: userID, - Data: createUserStats(db), - }) - } - return response, nil -} - -func createUserStats(db *userTSDB) *client.UserStatsResponse { - apiRate := db.ingestedAPISamples.Rate() - ruleRate := db.ingestedRuleSamples.Rate() - return &client.UserStatsResponse{ - IngestionRate: apiRate + ruleRate, - ApiIngestionRate: apiRate, - RuleIngestionRate: ruleRate, - NumSeries: db.Head().NumSeries(), - } -} - -const queryStreamBatchMessageSize = 1 * 1024 * 1024 - -// v2QueryStream streams metrics from a TSDB. This implements the client.IngesterServer interface -func (i *Ingester) v2QueryStream(req *client.QueryRequest, stream client.Ingester_QueryStreamServer) error { - if err := i.checkRunning(); err != nil { - return err - } - - spanlog, ctx := spanlogger.New(stream.Context(), "v2QueryStream") - defer spanlog.Finish() - - userID, err := tenant.TenantID(ctx) - if err != nil { - return err - } - - from, through, matchers, err := client.FromQueryRequest(req) - if err != nil { - return err - } - - i.metrics.queries.Inc() - - db := i.getTSDB(userID) - if db == nil { - return nil - } - - numSamples := 0 - numSeries := 0 - - streamType := QueryStreamSamples - if i.cfg.StreamChunksWhenUsingBlocks { - streamType = QueryStreamChunks - } - - if i.cfg.StreamTypeFn != nil { - runtimeType := i.cfg.StreamTypeFn() - switch runtimeType { - case QueryStreamChunks: - streamType = QueryStreamChunks - case QueryStreamSamples: - streamType = QueryStreamSamples - default: - // no change from config value. - } - } - - if streamType == QueryStreamChunks { - level.Debug(spanlog).Log("msg", "using v2QueryStreamChunks") - numSeries, numSamples, err = i.v2QueryStreamChunks(ctx, db, int64(from), int64(through), matchers, stream) - } else { - level.Debug(spanlog).Log("msg", "using v2QueryStreamSamples") - numSeries, numSamples, err = i.v2QueryStreamSamples(ctx, db, int64(from), int64(through), matchers, stream) - } - if err != nil { - return err - } - - i.metrics.queriedSeries.Observe(float64(numSeries)) - i.metrics.queriedSamples.Observe(float64(numSamples)) - level.Debug(spanlog).Log("series", numSeries, "samples", numSamples) - return nil -} - -func (i *Ingester) v2QueryStreamSamples(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, stream client.Ingester_QueryStreamServer) (numSeries, numSamples int, _ error) { - q, err := db.Querier(ctx, from, through) - if err != nil { - return 0, 0, err - } - defer q.Close() - - // It's not required to return sorted series because series are sorted by the Cortex querier. - ss := q.Select(false, nil, matchers...) - if ss.Err() != nil { - return 0, 0, ss.Err() - } - - timeseries := make([]cortexpb.TimeSeries, 0, queryStreamBatchSize) - batchSizeBytes := 0 - for ss.Next() { - series := ss.At() - - // convert labels to LabelAdapter - ts := cortexpb.TimeSeries{ - Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), - } - - it := series.Iterator() - for it.Next() { - t, v := it.At() - ts.Samples = append(ts.Samples, cortexpb.Sample{Value: v, TimestampMs: t}) - } - numSamples += len(ts.Samples) - numSeries++ - tsSize := ts.Size() - - if (batchSizeBytes > 0 && batchSizeBytes+tsSize > queryStreamBatchMessageSize) || len(timeseries) >= queryStreamBatchSize { - // Adding this series to the batch would make it too big, - // flush the data and add it to new batch instead. - err = client.SendQueryStream(stream, &client.QueryStreamResponse{ - Timeseries: timeseries, - }) - if err != nil { - return 0, 0, err - } - - batchSizeBytes = 0 - timeseries = timeseries[:0] - } - - timeseries = append(timeseries, ts) - batchSizeBytes += tsSize - } - - // Ensure no error occurred while iterating the series set. - if err := ss.Err(); err != nil { - return 0, 0, err - } - - // Final flush any existing metrics - if batchSizeBytes != 0 { - err = client.SendQueryStream(stream, &client.QueryStreamResponse{ - Timeseries: timeseries, - }) - if err != nil { - return 0, 0, err - } - } - - return numSeries, numSamples, nil -} - -// v2QueryStream streams metrics from a TSDB. This implements the client.IngesterServer interface -func (i *Ingester) v2QueryStreamChunks(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, stream client.Ingester_QueryStreamServer) (numSeries, numSamples int, _ error) { - q, err := db.ChunkQuerier(ctx, from, through) - if err != nil { - return 0, 0, err - } - defer q.Close() - - // It's not required to return sorted series because series are sorted by the Cortex querier. - ss := q.Select(false, nil, matchers...) - if ss.Err() != nil { - return 0, 0, ss.Err() - } - - chunkSeries := make([]client.TimeSeriesChunk, 0, queryStreamBatchSize) - batchSizeBytes := 0 - for ss.Next() { - series := ss.At() - - // convert labels to LabelAdapter - ts := client.TimeSeriesChunk{ - Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), - } - - it := series.Iterator() - for it.Next() { - // Chunks are ordered by min time. - meta := it.At() - - // It is not guaranteed that chunk returned by iterator is populated. - // For now just return error. We could also try to figure out how to read the chunk. - if meta.Chunk == nil { - return 0, 0, errors.Errorf("unfilled chunk returned from TSDB chunk querier") - } - - ch := client.Chunk{ - StartTimestampMs: meta.MinTime, - EndTimestampMs: meta.MaxTime, - Data: meta.Chunk.Bytes(), - } - - switch meta.Chunk.Encoding() { - case chunkenc.EncXOR: - ch.Encoding = int32(encoding.PrometheusXorChunk) - default: - return 0, 0, errors.Errorf("unknown chunk encoding from TSDB chunk querier: %v", meta.Chunk.Encoding()) - } - - ts.Chunks = append(ts.Chunks, ch) - numSamples += meta.Chunk.NumSamples() - } - numSeries++ - tsSize := ts.Size() - - if (batchSizeBytes > 0 && batchSizeBytes+tsSize > queryStreamBatchMessageSize) || len(chunkSeries) >= queryStreamBatchSize { - // Adding this series to the batch would make it too big, - // flush the data and add it to new batch instead. - err = client.SendQueryStream(stream, &client.QueryStreamResponse{ - Chunkseries: chunkSeries, - }) - if err != nil { - return 0, 0, err - } - - batchSizeBytes = 0 - chunkSeries = chunkSeries[:0] - } - - chunkSeries = append(chunkSeries, ts) - batchSizeBytes += tsSize - } - - // Ensure no error occurred while iterating the series set. - if err := ss.Err(); err != nil { - return 0, 0, err - } - - // Final flush any existing metrics - if batchSizeBytes != 0 { - err = client.SendQueryStream(stream, &client.QueryStreamResponse{ - Chunkseries: chunkSeries, - }) - if err != nil { - return 0, 0, err - } - } - - return numSeries, numSamples, nil -} - -func (i *Ingester) getTSDB(userID string) *userTSDB { - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - db := i.TSDBState.dbs[userID] - return db -} - -// List all users for which we have a TSDB. We do it here in order -// to keep the mutex locked for the shortest time possible. -func (i *Ingester) getTSDBUsers() []string { - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - - ids := make([]string, 0, len(i.TSDBState.dbs)) - for userID := range i.TSDBState.dbs { - ids = append(ids, userID) - } - - return ids -} - -func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) { - db := i.getTSDB(userID) - if db != nil { - return db, nil - } - - i.userStatesMtx.Lock() - defer i.userStatesMtx.Unlock() - - // Check again for DB in the event it was created in-between locks - var ok bool - db, ok = i.TSDBState.dbs[userID] - if ok { - return db, nil - } - - // We're ready to create the TSDB, however we must be sure that the ingester - // is in the ACTIVE state, otherwise it may conflict with the transfer in/out. - // The TSDB is created when the first series is pushed and this shouldn't happen - // to a non-ACTIVE ingester, however we want to protect from any bug, cause we - // may have data loss or TSDB WAL corruption if the TSDB is created before/during - // a transfer in occurs. - if ingesterState := i.lifecycler.GetState(); !force && ingesterState != ring.ACTIVE { - return nil, fmt.Errorf(errTSDBCreateIncompatibleState, ingesterState) - } - - gl := i.getInstanceLimits() - if gl != nil && gl.MaxInMemoryTenants > 0 { - if users := int64(len(i.TSDBState.dbs)); users >= gl.MaxInMemoryTenants { - return nil, errMaxUsersLimitReached - } - } - - // Create the database and a shipper for a user - db, err := i.createTSDB(userID) - if err != nil { - return nil, err - } - - // Add the db to list of user databases - i.TSDBState.dbs[userID] = db - i.metrics.memUsers.Inc() - - return db, nil -} - -// createTSDB creates a TSDB for a given userID, and returns the created db. -func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { - tsdbPromReg := prometheus.NewRegistry() - udir := i.cfg.BlocksStorageConfig.TSDB.BlocksDir(userID) - userLogger := logutil.WithUserID(userID, i.logger) - - blockRanges := i.cfg.BlocksStorageConfig.TSDB.BlockRanges.ToMilliseconds() - - userDB := &userTSDB{ - userID: userID, - activeSeries: NewActiveSeries(), - seriesInMetric: newMetricCounter(i.limiter, i.cfg.getIgnoreSeriesLimitForMetricNamesMap()), - ingestedAPISamples: util_math.NewEWMARate(0.2, i.cfg.RateUpdatePeriod), - ingestedRuleSamples: util_math.NewEWMARate(0.2, i.cfg.RateUpdatePeriod), - - instanceLimitsFn: i.getInstanceLimits, - instanceSeriesCount: &i.TSDBState.seriesCount, - } - - enableExemplars := false - if i.cfg.BlocksStorageConfig.TSDB.MaxExemplars > 0 { - enableExemplars = true - } - // Create a new user database - db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ - RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), - MinBlockDuration: blockRanges[0], - MaxBlockDuration: blockRanges[len(blockRanges)-1], - NoLockfile: true, - StripeSize: i.cfg.BlocksStorageConfig.TSDB.StripeSize, - HeadChunksWriteBufferSize: i.cfg.BlocksStorageConfig.TSDB.HeadChunksWriteBufferSize, - WALCompression: i.cfg.BlocksStorageConfig.TSDB.WALCompressionEnabled, - WALSegmentSize: i.cfg.BlocksStorageConfig.TSDB.WALSegmentSizeBytes, - SeriesLifecycleCallback: userDB, - BlocksToDelete: userDB.blocksToDelete, - EnableExemplarStorage: enableExemplars, - MaxExemplars: int64(i.cfg.BlocksStorageConfig.TSDB.MaxExemplars), - }, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) - } - db.DisableCompactions() // we will compact on our own schedule - - // Run compaction before using this TSDB. If there is data in head that needs to be put into blocks, - // this will actually create the blocks. If there is no data (empty TSDB), this is a no-op, although - // local blocks compaction may still take place if configured. - level.Info(userLogger).Log("msg", "Running compaction after WAL replay") - err = db.Compact() - if err != nil { - return nil, errors.Wrapf(err, "failed to compact TSDB: %s", udir) - } - - userDB.db = db - // We set the limiter here because we don't want to limit - // series during WAL replay. - userDB.limiter = i.limiter - - if db.Head().NumSeries() > 0 { - // If there are series in the head, use max time from head. If this time is too old, - // TSDB will be eligible for flushing and closing sooner, unless more data is pushed to it quickly. - userDB.setLastUpdate(util.TimeFromMillis(db.Head().MaxTime())) - } else { - // If head is empty (eg. new TSDB), don't close it right after. - userDB.setLastUpdate(time.Now()) - } - - // Thanos shipper requires at least 1 external label to be set. For this reason, - // we set the tenant ID as external label and we'll filter it out when reading - // the series from the storage. - l := labels.Labels{ - { - Name: cortex_tsdb.TenantIDExternalLabel, - Value: userID, - }, { - Name: cortex_tsdb.IngesterIDExternalLabel, - Value: i.TSDBState.shipperIngesterID, - }, - } - - // Create a new shipper for this database - if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { - userDB.shipper = shipper.New( - userLogger, - tsdbPromReg, - udir, - bucket.NewUserBucketClient(userID, i.TSDBState.bucket, i.limits), - func() labels.Labels { return l }, - metadata.ReceiveSource, - false, // No need to upload compacted blocks. Cortex compactor takes care of that. - true, // Allow out of order uploads. It's fine in Cortex's context. - metadata.NoneFunc, - ) - - // Initialise the shipper blocks cache. - if err := userDB.updateCachedShippedBlocks(); err != nil { - level.Error(userLogger).Log("msg", "failed to update cached shipped blocks after shipper initialisation", "err", err) - } - } - - i.TSDBState.tsdbMetrics.setRegistryForUser(userID, tsdbPromReg) - return userDB, nil -} - -func (i *Ingester) closeAllTSDB() { - i.userStatesMtx.Lock() - - wg := &sync.WaitGroup{} - wg.Add(len(i.TSDBState.dbs)) - - // Concurrently close all users TSDB - for userID, userDB := range i.TSDBState.dbs { - userID := userID - - go func(db *userTSDB) { - defer wg.Done() - - if err := db.Close(); err != nil { - level.Warn(i.logger).Log("msg", "unable to close TSDB", "err", err, "user", userID) - return - } - - // Now that the TSDB has been closed, we should remove it from the - // set of open ones. This lock acquisition doesn't deadlock with the - // outer one, because the outer one is released as soon as all go - // routines are started. - i.userStatesMtx.Lock() - delete(i.TSDBState.dbs, userID) - i.userStatesMtx.Unlock() - - i.metrics.memUsers.Dec() - i.metrics.activeSeriesPerUser.DeleteLabelValues(userID) - }(userDB) - } - - // Wait until all Close() completed - i.userStatesMtx.Unlock() - wg.Wait() -} - -// openExistingTSDB walks the user tsdb dir, and opens a tsdb for each user. This may start a WAL replay, so we limit the number of -// concurrently opening TSDB. -func (i *Ingester) openExistingTSDB(ctx context.Context) error { - level.Info(i.logger).Log("msg", "opening existing TSDBs") - - queue := make(chan string) - group, groupCtx := errgroup.WithContext(ctx) - - // Create a pool of workers which will open existing TSDBs. - for n := 0; n < i.cfg.BlocksStorageConfig.TSDB.MaxTSDBOpeningConcurrencyOnStartup; n++ { - group.Go(func() error { - for userID := range queue { - startTime := time.Now() - - db, err := i.createTSDB(userID) - if err != nil { - level.Error(i.logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) - return errors.Wrapf(err, "unable to open TSDB for user %s", userID) - } - - // Add the database to the map of user databases - i.userStatesMtx.Lock() - i.TSDBState.dbs[userID] = db - i.userStatesMtx.Unlock() - i.metrics.memUsers.Inc() - - i.TSDBState.walReplayTime.Observe(time.Since(startTime).Seconds()) - } - - return nil - }) - } - - // Spawn a goroutine to find all users with a TSDB on the filesystem. - group.Go(func() error { - // Close the queue once filesystem walking is done. - defer close(queue) - - walkErr := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - // If the root directory doesn't exist, we're OK (not needed to be created upfront). - if os.IsNotExist(err) && path == i.cfg.BlocksStorageConfig.TSDB.Dir { - return filepath.SkipDir - } - - level.Error(i.logger).Log("msg", "an error occurred while iterating the filesystem storing TSDBs", "path", path, "err", err) - return errors.Wrapf(err, "an error occurred while iterating the filesystem storing TSDBs at %s", path) - } - - // Skip root dir and all other files - if path == i.cfg.BlocksStorageConfig.TSDB.Dir || !info.IsDir() { - return nil - } - - // Top level directories are assumed to be user TSDBs - userID := info.Name() - f, err := os.Open(path) - if err != nil { - level.Error(i.logger).Log("msg", "unable to open TSDB dir", "err", err, "user", userID, "path", path) - return errors.Wrapf(err, "unable to open TSDB dir %s for user %s", path, userID) - } - defer f.Close() - - // If the dir is empty skip it - if _, err := f.Readdirnames(1); err != nil { - if err == io.EOF { - return filepath.SkipDir - } - - level.Error(i.logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) - return errors.Wrapf(err, "unable to read TSDB dir %s for user %s", path, userID) - } - - // Enqueue the user to be processed. - select { - case queue <- userID: - // Nothing to do. - case <-groupCtx.Done(): - // Interrupt in case a failure occurred in another goroutine. - return nil - } - - // Don't descend into subdirectories. - return filepath.SkipDir - }) - - return errors.Wrapf(walkErr, "unable to walk directory %s containing existing TSDBs", i.cfg.BlocksStorageConfig.TSDB.Dir) - }) - - // Wait for all workers to complete. - err := group.Wait() - if err != nil { - level.Error(i.logger).Log("msg", "error while opening existing TSDBs", "err", err) - return err - } - - level.Info(i.logger).Log("msg", "successfully opened existing TSDBs") - return nil -} - -// getMemorySeriesMetric returns the total number of in-memory series across all open TSDBs. -func (i *Ingester) getMemorySeriesMetric() float64 { - if err := i.checkRunning(); err != nil { - return 0 - } - - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - - count := uint64(0) - for _, db := range i.TSDBState.dbs { - count += db.Head().NumSeries() - } - - return float64(count) -} - -// getOldestUnshippedBlockMetric returns the unix timestamp of the oldest unshipped block or -// 0 if all blocks have been shipped. -func (i *Ingester) getOldestUnshippedBlockMetric() float64 { - i.userStatesMtx.RLock() - defer i.userStatesMtx.RUnlock() - - oldest := uint64(0) - for _, db := range i.TSDBState.dbs { - if ts := db.getOldestUnshippedBlockTime(); oldest == 0 || ts < oldest { - oldest = ts - } - } - - return float64(oldest / 1000) -} - -func (i *Ingester) shipBlocksLoop(ctx context.Context) error { - // We add a slight jitter to make sure that if the head compaction interval and ship interval are set to the same - // value they don't clash (if they both continuously run at the same exact time, the head compaction may not run - // because can't successfully change the state). - shipTicker := time.NewTicker(util.DurationWithJitter(i.cfg.BlocksStorageConfig.TSDB.ShipInterval, 0.01)) - defer shipTicker.Stop() - - for { - select { - case <-shipTicker.C: - i.shipBlocks(ctx, nil) - - case req := <-i.TSDBState.shipTrigger: - i.shipBlocks(ctx, req.users) - close(req.callback) // Notify back. - - case <-ctx.Done(): - return nil - } - } -} - -// shipBlocks runs shipping for all users. -func (i *Ingester) shipBlocks(ctx context.Context, allowed *util.AllowedTenants) { - // Do not ship blocks if the ingester is PENDING or JOINING. It's - // particularly important for the JOINING state because there could - // be a blocks transfer in progress (from another ingester) and if we - // run the shipper in such state we could end up with race conditions. - if i.lifecycler != nil { - if ingesterState := i.lifecycler.GetState(); ingesterState == ring.PENDING || ingesterState == ring.JOINING { - level.Info(i.logger).Log("msg", "TSDB blocks shipping has been skipped because of the current ingester state", "state", ingesterState) - return - } - } - - // Number of concurrent workers is limited in order to avoid to concurrently sync a lot - // of tenants in a large cluster. - _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.ShipConcurrency, func(ctx context.Context, userID string) error { - if !allowed.IsAllowed(userID) { - return nil - } - - // Get the user's DB. If the user doesn't exist, we skip it. - userDB := i.getTSDB(userID) - if userDB == nil || userDB.shipper == nil { - return nil - } - - if userDB.deletionMarkFound.Load() { - return nil - } - - if time.Since(time.Unix(userDB.lastDeletionMarkCheck.Load(), 0)) > cortex_tsdb.DeletionMarkCheckInterval { - // Even if check fails with error, we don't want to repeat it too often. - userDB.lastDeletionMarkCheck.Store(time.Now().Unix()) - - deletionMarkExists, err := cortex_tsdb.TenantDeletionMarkExists(ctx, i.TSDBState.bucket, userID) - if err != nil { - // If we cannot check for deletion mark, we continue anyway, even though in production shipper will likely fail too. - // This however simplifies unit tests, where tenant deletion check is enabled by default, but tests don't setup bucket. - level.Warn(i.logger).Log("msg", "failed to check for tenant deletion mark before shipping blocks", "user", userID, "err", err) - } else if deletionMarkExists { - userDB.deletionMarkFound.Store(true) - - level.Info(i.logger).Log("msg", "tenant deletion mark exists, not shipping blocks", "user", userID) - return nil - } - } - - // Run the shipper's Sync() to upload unshipped blocks. Make sure the TSDB state is active, in order to - // avoid any race condition with closing idle TSDBs. - if !userDB.casState(active, activeShipping) { - level.Info(i.logger).Log("msg", "shipper skipped because the TSDB is not active", "user", userID) - return nil - } - defer userDB.casState(activeShipping, active) - - uploaded, err := userDB.shipper.Sync(ctx) - if err != nil { - level.Warn(i.logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err) - } else { - level.Debug(i.logger).Log("msg", "shipper successfully synchronized TSDB blocks with storage", "user", userID, "uploaded", uploaded) - } - - // The shipper meta file could be updated even if the Sync() returned an error, - // so it's safer to update it each time at least a block has been uploaded. - // Moreover, the shipper meta file could be updated even if no blocks are uploaded - // (eg. blocks removed due to retention) but doesn't cause any harm not updating - // the cached list of blocks in such case, so we're not handling it. - if uploaded > 0 { - if err := userDB.updateCachedShippedBlocks(); err != nil { - level.Error(i.logger).Log("msg", "failed to update cached shipped blocks after shipper synchronisation", "user", userID, "err", err) - } - } - - return nil - }) -} - -func (i *Ingester) compactionLoop(ctx context.Context) error { - ticker := time.NewTicker(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval) - defer ticker.Stop() - - for ctx.Err() == nil { - select { - case <-ticker.C: - i.compactBlocks(ctx, false, nil) - - case req := <-i.TSDBState.forceCompactTrigger: - i.compactBlocks(ctx, true, req.users) - close(req.callback) // Notify back. - - case <-ctx.Done(): - return nil - } - } - return nil -} - -// Compacts all compactable blocks. Force flag will force compaction even if head is not compactable yet. -func (i *Ingester) compactBlocks(ctx context.Context, force bool, allowed *util.AllowedTenants) { - // Don't compact TSDB blocks while JOINING as there may be ongoing blocks transfers. - // Compaction loop is not running in LEAVING state, so if we get here in LEAVING state, we're flushing blocks. - if i.lifecycler != nil { - if ingesterState := i.lifecycler.GetState(); ingesterState == ring.JOINING { - level.Info(i.logger).Log("msg", "TSDB blocks compaction has been skipped because of the current ingester state", "state", ingesterState) - return - } - } - - _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(ctx context.Context, userID string) error { - if !allowed.IsAllowed(userID) { - return nil - } - - userDB := i.getTSDB(userID) - if userDB == nil { - return nil - } - - // Don't do anything, if there is nothing to compact. - h := userDB.Head() - if h.NumSeries() == 0 { - return nil - } - - var err error - - i.TSDBState.compactionsTriggered.Inc() - - reason := "" - switch { - case force: - reason = "forced" - err = userDB.compactHead(i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds()) - - case i.TSDBState.compactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.TSDBState.compactionIdleTimeout): - reason = "idle" - level.Info(i.logger).Log("msg", "TSDB is idle, forcing compaction", "user", userID) - err = userDB.compactHead(i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds()) - - default: - reason = "regular" - err = userDB.Compact() - } - - if err != nil { - i.TSDBState.compactionsFailed.Inc() - level.Warn(i.logger).Log("msg", "TSDB blocks compaction for user has failed", "user", userID, "err", err, "compactReason", reason) - } else { - level.Debug(i.logger).Log("msg", "TSDB blocks compaction completed successfully", "user", userID, "compactReason", reason) - } - - return nil - }) -} - -func (i *Ingester) closeAndDeleteIdleUserTSDBs(ctx context.Context) error { - for _, userID := range i.getTSDBUsers() { - if ctx.Err() != nil { - return nil - } - - result := i.closeAndDeleteUserTSDBIfIdle(userID) - - i.TSDBState.idleTsdbChecks.WithLabelValues(string(result)).Inc() - } - - return nil -} - -func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckResult { - userDB := i.getTSDB(userID) - if userDB == nil || userDB.shipper == nil { - // We will not delete local data when not using shipping to storage. - return tsdbShippingDisabled - } - - if result := userDB.shouldCloseTSDB(i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout); !result.shouldClose() { - return result - } - - // This disables pushes and force-compactions. Not allowed to close while shipping is in progress. - if !userDB.casState(active, closing) { - return tsdbNotActive - } - - // If TSDB is fully closed, we will set state to 'closed', which will prevent this defered closing -> active transition. - defer userDB.casState(closing, active) - - // Make sure we don't ignore any possible inflight pushes. - userDB.pushesInFlight.Wait() - - // Verify again, things may have changed during the checks and pushes. - tenantDeleted := false - if result := userDB.shouldCloseTSDB(i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout); !result.shouldClose() { - // This will also change TSDB state back to active (via defer above). - return result - } else if result == tsdbTenantMarkedForDeletion { - tenantDeleted = true - } - - // At this point there are no more pushes to TSDB, and no possible compaction. Normally TSDB is empty, - // but if we're closing TSDB because of tenant deletion mark, then it may still contain some series. - // We need to remove these series from series count. - i.TSDBState.seriesCount.Sub(int64(userDB.Head().NumSeries())) - - dir := userDB.db.Dir() - - if err := userDB.Close(); err != nil { - level.Error(i.logger).Log("msg", "failed to close idle TSDB", "user", userID, "err", err) - return tsdbCloseFailed - } - - level.Info(i.logger).Log("msg", "closed idle TSDB", "user", userID) - - // This will prevent going back to "active" state in deferred statement. - userDB.casState(closing, closed) - - // Only remove user from TSDBState when everything is cleaned up - // This will prevent concurrency problems when cortex are trying to open new TSDB - Ie: New request for a given tenant - // came in - while closing the tsdb for the same tenant. - // If this happens now, the request will get reject as the push will not be able to acquire the lock as the tsdb will be - // in closed state - defer func() { - i.userStatesMtx.Lock() - delete(i.TSDBState.dbs, userID) - i.userStatesMtx.Unlock() - }() - - i.metrics.memUsers.Dec() - i.TSDBState.tsdbMetrics.removeRegistryForUser(userID) - - i.deleteUserMetadata(userID) - i.metrics.deletePerUserMetrics(userID) - - validation.DeletePerUserValidationMetrics(userID, i.logger) - - // And delete local data. - if err := os.RemoveAll(dir); err != nil { - level.Error(i.logger).Log("msg", "failed to delete local TSDB", "user", userID, "err", err) - return tsdbDataRemovalFailed - } - - if tenantDeleted { - level.Info(i.logger).Log("msg", "deleted local TSDB, user marked for deletion", "user", userID, "dir", dir) - return tsdbTenantMarkedForDeletion - } - - level.Info(i.logger).Log("msg", "deleted local TSDB, due to being idle", "user", userID, "dir", dir) - return tsdbIdleClosed -} - -// This method will flush all data. It is called as part of Lifecycler's shutdown (if flush on shutdown is configured), or from the flusher. -// -// When called as during Lifecycler shutdown, this happens as part of normal Ingester shutdown (see stoppingV2 method). -// Samples are not received at this stage. Compaction and Shipping loops have already been stopped as well. -// -// When used from flusher, ingester is constructed in a way that compaction, shipping and receiving of samples is never started. -func (i *Ingester) v2LifecyclerFlush() { - level.Info(i.logger).Log("msg", "starting to flush and ship TSDB blocks") - - ctx := context.Background() - - i.compactBlocks(ctx, true, nil) - if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { - i.shipBlocks(ctx, nil) - } - - level.Info(i.logger).Log("msg", "finished flushing and shipping TSDB blocks") -} - -const ( - tenantParam = "tenant" - waitParam = "wait" -) - -// Blocks version of Flush handler. It force-compacts blocks, and triggers shipping. -func (i *Ingester) v2FlushHandler(w http.ResponseWriter, r *http.Request) { - err := r.ParseForm() - if err != nil { - level.Warn(i.logger).Log("msg", "failed to parse HTTP request in flush handler", "err", err) - w.WriteHeader(http.StatusBadRequest) - return - } - - tenants := r.Form[tenantParam] - - allowedUsers := util.NewAllowedTenants(tenants, nil) - run := func() { - ingCtx := i.BasicService.ServiceContext() - if ingCtx == nil || ingCtx.Err() != nil { - level.Info(i.logger).Log("msg", "flushing TSDB blocks: ingester not running, ignoring flush request") - return - } - - compactionCallbackCh := make(chan struct{}) - - level.Info(i.logger).Log("msg", "flushing TSDB blocks: triggering compaction") - select { - case i.TSDBState.forceCompactTrigger <- requestWithUsersAndCallback{users: allowedUsers, callback: compactionCallbackCh}: - // Compacting now. - case <-ingCtx.Done(): - level.Warn(i.logger).Log("msg", "failed to compact TSDB blocks, ingester not running anymore") - return - } - - // Wait until notified about compaction being finished. - select { - case <-compactionCallbackCh: - level.Info(i.logger).Log("msg", "finished compacting TSDB blocks") - case <-ingCtx.Done(): - level.Warn(i.logger).Log("msg", "failed to compact TSDB blocks, ingester not running anymore") - return - } - - if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { - shippingCallbackCh := make(chan struct{}) // must be new channel, as compactionCallbackCh is closed now. - - level.Info(i.logger).Log("msg", "flushing TSDB blocks: triggering shipping") - - select { - case i.TSDBState.shipTrigger <- requestWithUsersAndCallback{users: allowedUsers, callback: shippingCallbackCh}: - // shipping now - case <-ingCtx.Done(): - level.Warn(i.logger).Log("msg", "failed to ship TSDB blocks, ingester not running anymore") - return - } - - // Wait until shipping finished. - select { - case <-shippingCallbackCh: - level.Info(i.logger).Log("msg", "shipping of TSDB blocks finished") - case <-ingCtx.Done(): - level.Warn(i.logger).Log("msg", "failed to ship TSDB blocks, ingester not running anymore") - return - } - } - - level.Info(i.logger).Log("msg", "flushing TSDB blocks: finished") - } - - if len(r.Form[waitParam]) > 0 && r.Form[waitParam][0] == "true" { - // Run synchronously. This simplifies and speeds up tests. - run() - } else { - go run() - } - - w.WriteHeader(http.StatusNoContent) -} - -// metadataQueryRange returns the best range to query for metadata queries based on the timerange in the ingester. -func metadataQueryRange(queryStart, queryEnd int64, db *userTSDB) (mint, maxt int64, err error) { - // Ingesters are run with limited retention and we don't support querying the store-gateway for labels yet. - // This means if someone loads a dashboard that is outside the range of the ingester, and we only return the - // data for the timerange requested (which will be empty), the dashboards will break. To fix this we should - // return the "head block" range until we can query the store-gateway. - - // Now the question would be what to do when the query is partially in the ingester range. I would err on the side - // of caution and query the entire db, as I can't think of a good way to query the head + the overlapping range. - mint, maxt = queryStart, queryEnd - - lowestTs, err := db.StartTime() - if err != nil { - return mint, maxt, err - } - - // Completely outside. - if queryEnd < lowestTs { - mint, maxt = db.Head().MinTime(), db.Head().MaxTime() - } else if queryStart < lowestTs { - // Partially inside. - mint, maxt = 0, math.MaxInt64 - } - - return -} - -func wrappedTSDBIngestErr(ingestErr error, timestamp model.Time, labels []cortexpb.LabelAdapter) error { - if ingestErr == nil { - return nil - } - - return fmt.Errorf(errTSDBIngest, ingestErr, timestamp.Time().UTC().Format(time.RFC3339Nano), cortexpb.FromLabelAdaptersToLabels(labels).String()) -} - -func wrappedTSDBIngestExemplarErr(ingestErr error, timestamp model.Time, seriesLabels, exemplarLabels []cortexpb.LabelAdapter) error { - if ingestErr == nil { - return nil - } - - return fmt.Errorf(errTSDBIngestExemplar, ingestErr, timestamp.Time().UTC().Format(time.RFC3339Nano), - cortexpb.FromLabelAdaptersToLabels(seriesLabels).String(), - cortexpb.FromLabelAdaptersToLabels(exemplarLabels).String(), - ) -} - -func (i *Ingester) getInstanceLimits() *InstanceLimits { - // Don't apply any limits while starting. We especially don't want to apply series in memory limit while replaying WAL. - if i.State() == services.Starting { - return nil - } - - if i.cfg.InstanceLimitsFn == nil { - return defaultInstanceLimits - } - - l := i.cfg.InstanceLimitsFn() - if l == nil { - return defaultInstanceLimits - } - - return l -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/instance_limits.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/instance_limits.go deleted file mode 100644 index 5b222e8fc..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/instance_limits.go +++ /dev/null @@ -1,32 +0,0 @@ -package ingester - -import "github.com/pkg/errors" - -var ( - // We don't include values in the message to avoid leaking Cortex cluster configuration to users. - errMaxSamplesPushRateLimitReached = errors.New("cannot push more samples: ingester's samples push rate limit reached") - errMaxUsersLimitReached = errors.New("cannot create TSDB: ingesters's max tenants limit reached") - errMaxSeriesLimitReached = errors.New("cannot add series: ingesters's max series limit reached") - errTooManyInflightPushRequests = errors.New("cannot push: too many inflight push requests in ingester") -) - -// InstanceLimits describes limits used by ingester. Reaching any of these will result in Push method to return -// (internal) error. -type InstanceLimits struct { - MaxIngestionRate float64 `yaml:"max_ingestion_rate"` - MaxInMemoryTenants int64 `yaml:"max_tenants"` - MaxInMemorySeries int64 `yaml:"max_series"` - MaxInflightPushRequests int64 `yaml:"max_inflight_push_requests"` -} - -// Sets default limit values for unmarshalling. -var defaultInstanceLimits *InstanceLimits = nil - -// UnmarshalYAML implements the yaml.Unmarshaler interface. If give -func (l *InstanceLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { - if defaultInstanceLimits != nil { - *l = *defaultInstanceLimits - } - type plain InstanceLimits // type indirection to make sure we don't go into recursive loop - return unmarshal((*plain)(l)) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/label_pairs.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/label_pairs.go deleted file mode 100644 index bd0e8af63..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/label_pairs.go +++ /dev/null @@ -1,90 +0,0 @@ -package ingester - -import ( - "sort" - "strings" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util/extract" -) - -// A series is uniquely identified by its set of label name/value -// pairs, which may arrive in any order over the wire -type labelPairs []cortexpb.LabelAdapter - -func (a labelPairs) String() string { - var b strings.Builder - - metricName, err := extract.MetricNameFromLabelAdapters(a) - numLabels := len(a) - 1 - if err != nil { - numLabels = len(a) - } - b.WriteString(metricName) - b.WriteByte('{') - count := 0 - for _, pair := range a { - if pair.Name != model.MetricNameLabel { - b.WriteString(pair.Name) - b.WriteString("=\"") - b.WriteString(pair.Value) - b.WriteByte('"') - count++ - if count < numLabels { - b.WriteByte(',') - } - } - } - b.WriteByte('}') - return b.String() -} - -// Remove any label where the value is "" - Prometheus 2+ will remove these -// before sending, but other clients such as Prometheus 1.x might send us blanks. -func (a *labelPairs) removeBlanks() { - for i := 0; i < len(*a); { - if len((*a)[i].Value) == 0 { - // Delete by swap with the value at the end of the slice - (*a)[i] = (*a)[len(*a)-1] - (*a) = (*a)[:len(*a)-1] - continue // go round and check the data that is now at position i - } - i++ - } -} - -func valueForName(s labels.Labels, name string) (string, bool) { - pos := sort.Search(len(s), func(i int) bool { return s[i].Name >= name }) - if pos == len(s) || s[pos].Name != name { - return "", false - } - return s[pos].Value, true -} - -// Check if a and b contain the same name/value pairs -func (a labelPairs) equal(b labels.Labels) bool { - if len(a) != len(b) { - return false - } - // Check as many as we can where the two sets are in the same order - i := 0 - for ; i < len(a); i++ { - if b[i].Name != string(a[i].Name) { - break - } - if b[i].Value != string(a[i].Value) { - return false - } - } - // Now check remaining values using binary search - for ; i < len(a); i++ { - v, found := valueForName(b, a[i].Name) - if !found || v != a[i].Value { - return false - } - } - return true -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go deleted file mode 100644 index c2293ccea..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go +++ /dev/null @@ -1,288 +0,0 @@ -package ingester - -import ( - "fmt" - "math" - - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util" - util_math "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - errMaxSeriesPerMetricLimitExceeded = errors.New("per-metric series limit exceeded") - errMaxMetadataPerMetricLimitExceeded = errors.New("per-metric metadata limit exceeded") - errMaxSeriesPerUserLimitExceeded = errors.New("per-user series limit exceeded") - errMaxMetadataPerUserLimitExceeded = errors.New("per-user metric metadata limit exceeded") -) - -// RingCount is the interface exposed by a ring implementation which allows -// to count members -type RingCount interface { - HealthyInstancesCount() int - ZonesCount() int -} - -// Limiter implements primitives to get the maximum number of series -// an ingester can handle for a specific tenant -type Limiter struct { - limits *validation.Overrides - ring RingCount - replicationFactor int - shuffleShardingEnabled bool - shardByAllLabels bool - zoneAwarenessEnabled bool -} - -// NewLimiter makes a new in-memory series limiter -func NewLimiter( - limits *validation.Overrides, - ring RingCount, - shardingStrategy string, - shardByAllLabels bool, - replicationFactor int, - zoneAwarenessEnabled bool, -) *Limiter { - return &Limiter{ - limits: limits, - ring: ring, - replicationFactor: replicationFactor, - shuffleShardingEnabled: shardingStrategy == util.ShardingStrategyShuffle, - shardByAllLabels: shardByAllLabels, - zoneAwarenessEnabled: zoneAwarenessEnabled, - } -} - -// AssertMaxSeriesPerMetric limit has not been reached compared to the current -// number of series in input and returns an error if so. -func (l *Limiter) AssertMaxSeriesPerMetric(userID string, series int) error { - if actualLimit := l.maxSeriesPerMetric(userID); series < actualLimit { - return nil - } - - return errMaxSeriesPerMetricLimitExceeded -} - -// AssertMaxMetadataPerMetric limit has not been reached compared to the current -// number of metadata per metric in input and returns an error if so. -func (l *Limiter) AssertMaxMetadataPerMetric(userID string, metadata int) error { - if actualLimit := l.maxMetadataPerMetric(userID); metadata < actualLimit { - return nil - } - - return errMaxMetadataPerMetricLimitExceeded -} - -// AssertMaxSeriesPerUser limit has not been reached compared to the current -// number of series in input and returns an error if so. -func (l *Limiter) AssertMaxSeriesPerUser(userID string, series int) error { - if actualLimit := l.maxSeriesPerUser(userID); series < actualLimit { - return nil - } - - return errMaxSeriesPerUserLimitExceeded -} - -// AssertMaxMetricsWithMetadataPerUser limit has not been reached compared to the current -// number of metrics with metadata in input and returns an error if so. -func (l *Limiter) AssertMaxMetricsWithMetadataPerUser(userID string, metrics int) error { - if actualLimit := l.maxMetadataPerUser(userID); metrics < actualLimit { - return nil - } - - return errMaxMetadataPerUserLimitExceeded -} - -// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit. -func (l *Limiter) MaxSeriesPerQuery(userID string) int { - return l.limits.MaxSeriesPerQuery(userID) -} - -// FormatError returns the input error enriched with the actual limits for the given user. -// It acts as pass-through if the input error is unknown. -func (l *Limiter) FormatError(userID string, err error) error { - switch err { - case errMaxSeriesPerUserLimitExceeded: - return l.formatMaxSeriesPerUserError(userID) - case errMaxSeriesPerMetricLimitExceeded: - return l.formatMaxSeriesPerMetricError(userID) - case errMaxMetadataPerUserLimitExceeded: - return l.formatMaxMetadataPerUserError(userID) - case errMaxMetadataPerMetricLimitExceeded: - return l.formatMaxMetadataPerMetricError(userID) - default: - return err - } -} - -func (l *Limiter) formatMaxSeriesPerUserError(userID string) error { - actualLimit := l.maxSeriesPerUser(userID) - localLimit := l.limits.MaxLocalSeriesPerUser(userID) - globalLimit := l.limits.MaxGlobalSeriesPerUser(userID) - - return fmt.Errorf("per-user series limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)", - minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit) -} - -func (l *Limiter) formatMaxSeriesPerMetricError(userID string) error { - actualLimit := l.maxSeriesPerMetric(userID) - localLimit := l.limits.MaxLocalSeriesPerMetric(userID) - globalLimit := l.limits.MaxGlobalSeriesPerMetric(userID) - - return fmt.Errorf("per-metric series limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)", - minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit) -} - -func (l *Limiter) formatMaxMetadataPerUserError(userID string) error { - actualLimit := l.maxMetadataPerUser(userID) - localLimit := l.limits.MaxLocalMetricsWithMetadataPerUser(userID) - globalLimit := l.limits.MaxGlobalMetricsWithMetadataPerUser(userID) - - return fmt.Errorf("per-user metric metadata limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)", - minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit) -} - -func (l *Limiter) formatMaxMetadataPerMetricError(userID string) error { - actualLimit := l.maxMetadataPerMetric(userID) - localLimit := l.limits.MaxLocalMetadataPerMetric(userID) - globalLimit := l.limits.MaxGlobalMetadataPerMetric(userID) - - return fmt.Errorf("per-metric metadata limit of %d exceeded, please contact administrator to raise it (local limit: %d global limit: %d actual local limit: %d)", - minNonZero(localLimit, globalLimit), localLimit, globalLimit, actualLimit) -} - -func (l *Limiter) maxSeriesPerMetric(userID string) int { - localLimit := l.limits.MaxLocalSeriesPerMetric(userID) - globalLimit := l.limits.MaxGlobalSeriesPerMetric(userID) - - if globalLimit > 0 { - if l.shardByAllLabels { - // We can assume that series are evenly distributed across ingesters - // so we do convert the global limit into a local limit - localLimit = minNonZero(localLimit, l.convertGlobalToLocalLimit(userID, globalLimit)) - } else { - // Given a metric is always pushed to the same set of ingesters (based on - // the replication factor), we can configure the per-ingester local limit - // equal to the global limit. - localLimit = minNonZero(localLimit, globalLimit) - } - } - - // If both the local and global limits are disabled, we just - // use the largest int value - if localLimit == 0 { - localLimit = math.MaxInt32 - } - - return localLimit -} - -func (l *Limiter) maxMetadataPerMetric(userID string) int { - localLimit := l.limits.MaxLocalMetadataPerMetric(userID) - globalLimit := l.limits.MaxGlobalMetadataPerMetric(userID) - - if globalLimit > 0 { - if l.shardByAllLabels { - localLimit = minNonZero(localLimit, l.convertGlobalToLocalLimit(userID, globalLimit)) - } else { - localLimit = minNonZero(localLimit, globalLimit) - } - } - - if localLimit == 0 { - localLimit = math.MaxInt32 - } - - return localLimit -} - -func (l *Limiter) maxSeriesPerUser(userID string) int { - return l.maxByLocalAndGlobal( - userID, - l.limits.MaxLocalSeriesPerUser, - l.limits.MaxGlobalSeriesPerUser, - ) -} - -func (l *Limiter) maxMetadataPerUser(userID string) int { - return l.maxByLocalAndGlobal( - userID, - l.limits.MaxLocalMetricsWithMetadataPerUser, - l.limits.MaxGlobalMetricsWithMetadataPerUser, - ) -} - -func (l *Limiter) maxByLocalAndGlobal(userID string, localLimitFn, globalLimitFn func(string) int) int { - localLimit := localLimitFn(userID) - - // The global limit is supported only when shard-by-all-labels is enabled, - // otherwise we wouldn't get an even split of series/metadata across ingesters and - // can't take a "local decision" without any centralized coordination. - if l.shardByAllLabels { - // We can assume that series/metadata are evenly distributed across ingesters - // so we do convert the global limit into a local limit - globalLimit := globalLimitFn(userID) - localLimit = minNonZero(localLimit, l.convertGlobalToLocalLimit(userID, globalLimit)) - } - - // If both the local and global limits are disabled, we just - // use the largest int value - if localLimit == 0 { - localLimit = math.MaxInt32 - } - - return localLimit -} - -func (l *Limiter) convertGlobalToLocalLimit(userID string, globalLimit int) int { - if globalLimit == 0 { - return 0 - } - - // Given we don't need a super accurate count (ie. when the ingesters - // topology changes) and we prefer to always be in favor of the tenant, - // we can use a per-ingester limit equal to: - // (global limit / number of ingesters) * replication factor - numIngesters := l.ring.HealthyInstancesCount() - - // May happen because the number of ingesters is asynchronously updated. - // If happens, we just temporarily ignore the global limit. - if numIngesters == 0 { - return 0 - } - - // If the number of available ingesters is greater than the tenant's shard - // size, then we should honor the shard size because series/metadata won't - // be written to more ingesters than it. - if shardSize := l.getShardSize(userID); shardSize > 0 { - // We use Min() to protect from the case the expected shard size is > available ingesters. - numIngesters = util_math.Min(numIngesters, util.ShuffleShardExpectedInstances(shardSize, l.getNumZones())) - } - - return int((float64(globalLimit) / float64(numIngesters)) * float64(l.replicationFactor)) -} - -func (l *Limiter) getShardSize(userID string) int { - if !l.shuffleShardingEnabled { - return 0 - } - - return l.limits.IngestionTenantShardSize(userID) -} - -func (l *Limiter) getNumZones() int { - if l.zoneAwarenessEnabled { - return util_math.Max(l.ring.ZonesCount(), 1) - } - return 1 -} - -func minNonZero(first, second int) int { - if first == 0 || (second != 0 && first > second) { - return second - } - - return first -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/locker.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/locker.go deleted file mode 100644 index 3c97f38ba..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/locker.go +++ /dev/null @@ -1,58 +0,0 @@ -package ingester - -import ( - "sync" - "unsafe" - - "github.com/prometheus/common/model" - - "github.com/cortexproject/cortex/pkg/util" -) - -const ( - cacheLineSize = 64 -) - -// Avoid false sharing when using array of mutexes. -type paddedMutex struct { - sync.Mutex - //nolint:structcheck,unused - pad [cacheLineSize - unsafe.Sizeof(sync.Mutex{})]byte -} - -// fingerprintLocker allows locking individual fingerprints. To limit the number -// of mutexes needed for that, only a fixed number of mutexes are -// allocated. Fingerprints to be locked are assigned to those pre-allocated -// mutexes by their value. Collisions are not detected. If two fingerprints get -// assigned to the same mutex, only one of them can be locked at the same -// time. As long as the number of pre-allocated mutexes is much larger than the -// number of goroutines requiring a fingerprint lock concurrently, the loss in -// efficiency is small. However, a goroutine must never lock more than one -// fingerprint at the same time. (In that case a collision would try to acquire -// the same mutex twice). -type fingerprintLocker struct { - fpMtxs []paddedMutex - numFpMtxs uint32 -} - -// newFingerprintLocker returns a new fingerprintLocker ready for use. At least -// 1024 preallocated mutexes are used, even if preallocatedMutexes is lower. -func newFingerprintLocker(preallocatedMutexes int) *fingerprintLocker { - if preallocatedMutexes < 1024 { - preallocatedMutexes = 1024 - } - return &fingerprintLocker{ - make([]paddedMutex, preallocatedMutexes), - uint32(preallocatedMutexes), - } -} - -// Lock locks the given fingerprint. -func (l *fingerprintLocker) Lock(fp model.Fingerprint) { - l.fpMtxs[util.HashFP(fp)%l.numFpMtxs].Lock() -} - -// Unlock unlocks the given fingerprint. -func (l *fingerprintLocker) Unlock(fp model.Fingerprint) { - l.fpMtxs[util.HashFP(fp)%l.numFpMtxs].Unlock() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go deleted file mode 100644 index 835f6253a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go +++ /dev/null @@ -1,155 +0,0 @@ -package ingester - -import ( - "fmt" - "sort" - "strings" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/common/model" - "go.uber.org/atomic" -) - -const maxMappedFP = 1 << 20 // About 1M fingerprints reserved for mapping. - -var separatorString = string([]byte{model.SeparatorByte}) - -// fpMappings maps original fingerprints to a map of string representations of -// metrics to the truly unique fingerprint. -type fpMappings map[model.Fingerprint]map[string]model.Fingerprint - -// fpMapper is used to map fingerprints in order to work around fingerprint -// collisions. -type fpMapper struct { - highestMappedFP atomic.Uint64 - - mtx sync.RWMutex // Protects mappings. - mappings fpMappings - - fpToSeries *seriesMap - - logger log.Logger -} - -// newFPMapper loads the collision map from the persistence and -// returns an fpMapper ready to use. -func newFPMapper(fpToSeries *seriesMap, logger log.Logger) *fpMapper { - return &fpMapper{ - fpToSeries: fpToSeries, - mappings: map[model.Fingerprint]map[string]model.Fingerprint{}, - logger: logger, - } -} - -// mapFP takes a raw fingerprint (as returned by Metrics.FastFingerprint) and -// returns a truly unique fingerprint. The caller must have locked the raw -// fingerprint. -// -// If an error is encountered, it is returned together with the unchanged raw -// fingerprint. -func (m *fpMapper) mapFP(fp model.Fingerprint, metric labelPairs) model.Fingerprint { - // First check if we are in the reserved FP space, in which case this is - // automatically a collision that has to be mapped. - if fp <= maxMappedFP { - return m.maybeAddMapping(fp, metric) - } - - // Then check the most likely case: This fp belongs to a series that is - // already in memory. - s, ok := m.fpToSeries.get(fp) - if ok { - // FP exists in memory, but is it for the same metric? - if metric.equal(s.metric) { - // Yup. We are done. - return fp - } - // Collision detected! - return m.maybeAddMapping(fp, metric) - } - // Metric is not in memory. Before doing the expensive archive lookup, - // check if we have a mapping for this metric in place already. - m.mtx.RLock() - mappedFPs, fpAlreadyMapped := m.mappings[fp] - m.mtx.RUnlock() - if fpAlreadyMapped { - // We indeed have mapped fp historically. - ms := metricToUniqueString(metric) - // fp is locked by the caller, so no further locking of - // 'collisions' required (it is specific to fp). - mappedFP, ok := mappedFPs[ms] - if ok { - // Historical mapping found, return the mapped FP. - return mappedFP - } - } - return fp -} - -// maybeAddMapping is only used internally. It takes a detected collision and -// adds it to the collisions map if not yet there. In any case, it returns the -// truly unique fingerprint for the colliding metric. -func (m *fpMapper) maybeAddMapping( - fp model.Fingerprint, - collidingMetric labelPairs, -) model.Fingerprint { - ms := metricToUniqueString(collidingMetric) - m.mtx.RLock() - mappedFPs, ok := m.mappings[fp] - m.mtx.RUnlock() - if ok { - // fp is locked by the caller, so no further locking required. - mappedFP, ok := mappedFPs[ms] - if ok { - return mappedFP // Existing mapping. - } - // A new mapping has to be created. - mappedFP = m.nextMappedFP() - mappedFPs[ms] = mappedFP - level.Debug(m.logger).Log( - "msg", "fingerprint collision detected, mapping to new fingerprint", - "old_fp", fp, - "new_fp", mappedFP, - "metric", collidingMetric, - ) - return mappedFP - } - // This is the first collision for fp. - mappedFP := m.nextMappedFP() - mappedFPs = map[string]model.Fingerprint{ms: mappedFP} - m.mtx.Lock() - m.mappings[fp] = mappedFPs - m.mtx.Unlock() - level.Debug(m.logger).Log( - "msg", "fingerprint collision detected, mapping to new fingerprint", - "old_fp", fp, - "new_fp", mappedFP, - "metric", collidingMetric, - ) - return mappedFP -} - -func (m *fpMapper) nextMappedFP() model.Fingerprint { - mappedFP := model.Fingerprint(m.highestMappedFP.Inc()) - if mappedFP > maxMappedFP { - panic(fmt.Errorf("more than %v fingerprints mapped in collision detection", maxMappedFP)) - } - return mappedFP -} - -// metricToUniqueString turns a metric into a string in a reproducible and -// unique way, i.e. the same metric will always create the same string, and -// different metrics will always create different strings. In a way, it is the -// "ideal" fingerprint function, only that it is more expensive than the -// FastFingerprint function, and its result is not suitable as a key for maps -// and indexes as it might become really large, causing a lot of hashing effort -// in maps and a lot of storage overhead in indexes. -func metricToUniqueString(m labelPairs) string { - parts := make([]string, 0, len(m)) - for _, pair := range m { - parts = append(parts, string(pair.Name)+separatorString+string(pair.Value)) - } - sort.Strings(parts) - return strings.Join(parts, separatorString) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go deleted file mode 100644 index 585cc45df..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go +++ /dev/null @@ -1,657 +0,0 @@ -package ingester - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" - util_math "github.com/cortexproject/cortex/pkg/util/math" -) - -const ( - memSeriesCreatedTotalName = "cortex_ingester_memory_series_created_total" - memSeriesCreatedTotalHelp = "The total number of series that were created per user." - - memSeriesRemovedTotalName = "cortex_ingester_memory_series_removed_total" - memSeriesRemovedTotalHelp = "The total number of series that were removed per user." -) - -type ingesterMetrics struct { - flushQueueLength prometheus.Gauge - ingestedSamples prometheus.Counter - ingestedExemplars prometheus.Counter - ingestedMetadata prometheus.Counter - ingestedSamplesFail prometheus.Counter - ingestedExemplarsFail prometheus.Counter - ingestedMetadataFail prometheus.Counter - queries prometheus.Counter - queriedSamples prometheus.Histogram - queriedExemplars prometheus.Histogram - queriedSeries prometheus.Histogram - queriedChunks prometheus.Histogram - memSeries prometheus.Gauge - memMetadata prometheus.Gauge - memUsers prometheus.Gauge - memSeriesCreatedTotal *prometheus.CounterVec - memMetadataCreatedTotal *prometheus.CounterVec - memSeriesRemovedTotal *prometheus.CounterVec - memMetadataRemovedTotal *prometheus.CounterVec - createdChunks prometheus.Counter - walReplayDuration prometheus.Gauge - walCorruptionsTotal prometheus.Counter - - // Chunks transfer. - sentChunks prometheus.Counter - receivedChunks prometheus.Counter - - // Chunks flushing. - flushSeriesInProgress prometheus.Gauge - chunkUtilization prometheus.Histogram - chunkLength prometheus.Histogram - chunkSize prometheus.Histogram - chunkAge prometheus.Histogram - memoryChunks prometheus.Gauge - seriesEnqueuedForFlush *prometheus.CounterVec - seriesDequeuedOutcome *prometheus.CounterVec - droppedChunks prometheus.Counter - oldestUnflushedChunkTimestamp prometheus.Gauge - - activeSeriesPerUser *prometheus.GaugeVec - - // Global limit metrics - maxUsersGauge prometheus.GaugeFunc - maxSeriesGauge prometheus.GaugeFunc - maxIngestionRate prometheus.GaugeFunc - ingestionRate prometheus.GaugeFunc - maxInflightPushRequests prometheus.GaugeFunc - inflightRequests prometheus.GaugeFunc -} - -func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSDB bool, activeSeriesEnabled bool, instanceLimitsFn func() *InstanceLimits, ingestionRate *util_math.EwmaRate, inflightRequests *atomic.Int64) *ingesterMetrics { - const ( - instanceLimits = "cortex_ingester_instance_limits" - instanceLimitsHelp = "Instance limits used by this ingester." // Must be same for all registrations. - limitLabel = "limit" - ) - - m := &ingesterMetrics{ - flushQueueLength: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_flush_queue_length", - Help: "The total number of series pending in the flush queue.", - }), - ingestedSamples: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_samples_total", - Help: "The total number of samples ingested.", - }), - ingestedExemplars: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_exemplars_total", - Help: "The total number of exemplars ingested.", - }), - ingestedMetadata: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_metadata_total", - Help: "The total number of metadata ingested.", - }), - ingestedSamplesFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_samples_failures_total", - Help: "The total number of samples that errored on ingestion.", - }), - ingestedExemplarsFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_exemplars_failures_total", - Help: "The total number of exemplars that errored on ingestion.", - }), - ingestedMetadataFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_metadata_failures_total", - Help: "The total number of metadata that errored on ingestion.", - }), - queries: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_queries_total", - Help: "The total number of queries the ingester has handled.", - }), - queriedSamples: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_queried_samples", - Help: "The total number of samples returned from queries.", - // Could easily return 10m samples per query - 10*(8^(8-1)) = 20.9m. - Buckets: prometheus.ExponentialBuckets(10, 8, 8), - }), - queriedExemplars: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_queried_exemplars", - Help: "The total number of exemplars returned from queries.", - // A reasonable upper bound is around 6k - 10*(5^(5-1)) = 6250. - Buckets: prometheus.ExponentialBuckets(10, 5, 5), - }), - queriedSeries: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_queried_series", - Help: "The total number of series returned from queries.", - // A reasonable upper bound is around 100k - 10*(8^(6-1)) = 327k. - Buckets: prometheus.ExponentialBuckets(10, 8, 6), - }), - queriedChunks: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_queried_chunks", - Help: "The total number of chunks returned from queries.", - // A small number of chunks per series - 10*(8^(7-1)) = 2.6m. - Buckets: prometheus.ExponentialBuckets(10, 8, 7), - }), - memSeries: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_memory_series", - Help: "The current number of series in memory.", - }), - memMetadata: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_memory_metadata", - Help: "The current number of metadata in memory.", - }), - memUsers: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_memory_users", - Help: "The current number of users in memory.", - }), - createdChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_chunks_created_total", - Help: "The total number of chunks the ingester has created.", - }), - walReplayDuration: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_wal_replay_duration_seconds", - Help: "Time taken to replay the checkpoint and the WAL.", - }), - walCorruptionsTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_wal_corruptions_total", - Help: "Total number of WAL corruptions encountered.", - }), - memMetadataCreatedTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_memory_metadata_created_total", - Help: "The total number of metadata that were created per user", - }, []string{"user"}), - memMetadataRemovedTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_memory_metadata_removed_total", - Help: "The total number of metadata that were removed per user.", - }, []string{"user"}), - - // Chunks / blocks transfer. - sentChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_sent_chunks", - Help: "The total number of chunks sent by this ingester whilst leaving.", - }), - receivedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_received_chunks", - Help: "The total number of chunks received by this ingester whilst joining", - }), - - // Chunks flushing. - flushSeriesInProgress: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_flush_series_in_progress", - Help: "Number of flush series operations in progress.", - }), - chunkUtilization: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_chunk_utilization", - Help: "Distribution of stored chunk utilization (when stored).", - Buckets: prometheus.LinearBuckets(0, 0.2, 6), - }), - chunkLength: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_chunk_length", - Help: "Distribution of stored chunk lengths (when stored).", - Buckets: prometheus.ExponentialBuckets(5, 2, 11), // biggest bucket is 5*2^(11-1) = 5120 - }), - chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_chunk_size_bytes", - Help: "Distribution of stored chunk sizes (when stored).", - Buckets: prometheus.ExponentialBuckets(500, 2, 7), // biggest bucket is 500*2^(7-1) = 32000 - }), - chunkAge: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_chunk_age_seconds", - Help: "Distribution of chunk ages (when stored).", - // with default settings chunks should flush between 5 min and 12 hours - // so buckets at 1min, 5min, 10min, 30min, 1hr, 2hr, 4hr, 10hr, 12hr, 16hr - Buckets: []float64{60, 300, 600, 1800, 3600, 7200, 14400, 36000, 43200, 57600}, - }), - memoryChunks: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_memory_chunks", - Help: "The total number of chunks in memory.", - }), - seriesEnqueuedForFlush: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_flushing_enqueued_series_total", - Help: "Total number of series enqueued for flushing, with reasons.", - }, []string{"reason"}), - seriesDequeuedOutcome: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_flushing_dequeued_series_total", - Help: "Total number of series dequeued for flushing, with outcome (superset of enqueue reasons)", - }, []string{"outcome"}), - droppedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_dropped_chunks_total", - Help: "Total number of chunks dropped from flushing because they have too few samples.", - }), - oldestUnflushedChunkTimestamp: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_oldest_unflushed_chunk_timestamp_seconds", - Help: "Unix timestamp of the oldest unflushed chunk in the memory", - }), - - maxUsersGauge: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ - Name: instanceLimits, - Help: instanceLimitsHelp, - ConstLabels: map[string]string{limitLabel: "max_tenants"}, - }, func() float64 { - if g := instanceLimitsFn(); g != nil { - return float64(g.MaxInMemoryTenants) - } - return 0 - }), - - maxSeriesGauge: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ - Name: instanceLimits, - Help: instanceLimitsHelp, - ConstLabels: map[string]string{limitLabel: "max_series"}, - }, func() float64 { - if g := instanceLimitsFn(); g != nil { - return float64(g.MaxInMemorySeries) - } - return 0 - }), - - maxIngestionRate: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ - Name: instanceLimits, - Help: instanceLimitsHelp, - ConstLabels: map[string]string{limitLabel: "max_ingestion_rate"}, - }, func() float64 { - if g := instanceLimitsFn(); g != nil { - return float64(g.MaxIngestionRate) - } - return 0 - }), - - maxInflightPushRequests: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ - Name: instanceLimits, - Help: instanceLimitsHelp, - ConstLabels: map[string]string{limitLabel: "max_inflight_push_requests"}, - }, func() float64 { - if g := instanceLimitsFn(); g != nil { - return float64(g.MaxInflightPushRequests) - } - return 0 - }), - - ingestionRate: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_ingester_ingestion_rate_samples_per_second", - Help: "Current ingestion rate in samples/sec that ingester is using to limit access.", - }, func() float64 { - if ingestionRate != nil { - return ingestionRate.Rate() - } - return 0 - }), - - inflightRequests: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_ingester_inflight_push_requests", - Help: "Current number of inflight push requests in ingester.", - }, func() float64 { - if inflightRequests != nil { - return float64(inflightRequests.Load()) - } - return 0 - }), - - // Not registered automatically, but only if activeSeriesEnabled is true. - activeSeriesPerUser: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_ingester_active_series", - Help: "Number of currently active series per user.", - }, []string{"user"}), - } - - if activeSeriesEnabled && r != nil { - r.MustRegister(m.activeSeriesPerUser) - } - - if createMetricsConflictingWithTSDB { - m.memSeriesCreatedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: memSeriesCreatedTotalName, - Help: memSeriesCreatedTotalHelp, - }, []string{"user"}) - - m.memSeriesRemovedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: memSeriesRemovedTotalName, - Help: memSeriesRemovedTotalHelp, - }, []string{"user"}) - } - - return m -} - -func (m *ingesterMetrics) deletePerUserMetrics(userID string) { - m.memMetadataCreatedTotal.DeleteLabelValues(userID) - m.memMetadataRemovedTotal.DeleteLabelValues(userID) - m.activeSeriesPerUser.DeleteLabelValues(userID) - - if m.memSeriesCreatedTotal != nil { - m.memSeriesCreatedTotal.DeleteLabelValues(userID) - } - - if m.memSeriesRemovedTotal != nil { - m.memSeriesRemovedTotal.DeleteLabelValues(userID) - } -} - -// TSDB metrics collector. Each tenant has its own registry, that TSDB code uses. -type tsdbMetrics struct { - // Metrics aggregated from Thanos shipper. - dirSyncs *prometheus.Desc // sum(thanos_shipper_dir_syncs_total) - dirSyncFailures *prometheus.Desc // sum(thanos_shipper_dir_sync_failures_total) - uploads *prometheus.Desc // sum(thanos_shipper_uploads_total) - uploadFailures *prometheus.Desc // sum(thanos_shipper_upload_failures_total) - - // Metrics aggregated from TSDB. - tsdbCompactionsTotal *prometheus.Desc - tsdbCompactionDuration *prometheus.Desc - tsdbFsyncDuration *prometheus.Desc - tsdbPageFlushes *prometheus.Desc - tsdbPageCompletions *prometheus.Desc - tsdbWALTruncateFail *prometheus.Desc - tsdbWALTruncateTotal *prometheus.Desc - tsdbWALTruncateDuration *prometheus.Desc - tsdbWALCorruptionsTotal *prometheus.Desc - tsdbWALWritesFailed *prometheus.Desc - tsdbHeadTruncateFail *prometheus.Desc - tsdbHeadTruncateTotal *prometheus.Desc - tsdbHeadGcDuration *prometheus.Desc - tsdbActiveAppenders *prometheus.Desc - tsdbSeriesNotFound *prometheus.Desc - tsdbChunks *prometheus.Desc - tsdbChunksCreatedTotal *prometheus.Desc - tsdbChunksRemovedTotal *prometheus.Desc - tsdbMmapChunkCorruptionTotal *prometheus.Desc - - tsdbExemplarsTotal *prometheus.Desc - tsdbExemplarsInStorage *prometheus.Desc - tsdbExemplarSeriesInStorage *prometheus.Desc - tsdbExemplarLastTs *prometheus.Desc - tsdbExemplarsOutOfOrder *prometheus.Desc - - // Follow metrics are from https://github.com/prometheus/prometheus/blob/fbe960f2c1ad9d6f5fe2f267d2559bf7ecfab6df/tsdb/db.go#L179 - tsdbLoadedBlocks *prometheus.Desc - tsdbSymbolTableSize *prometheus.Desc - tsdbReloads *prometheus.Desc - tsdbReloadsFailed *prometheus.Desc - tsdbTimeRetentionCount *prometheus.Desc - tsdbBlocksBytes *prometheus.Desc - - checkpointDeleteFail *prometheus.Desc - checkpointDeleteTotal *prometheus.Desc - checkpointCreationFail *prometheus.Desc - checkpointCreationTotal *prometheus.Desc - - // These two metrics replace metrics in ingesterMetrics, as we count them differently - memSeriesCreatedTotal *prometheus.Desc - memSeriesRemovedTotal *prometheus.Desc - - regs *util.UserRegistries -} - -func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { - m := &tsdbMetrics{ - regs: util.NewUserRegistries(), - - dirSyncs: prometheus.NewDesc( - "cortex_ingester_shipper_dir_syncs_total", - "Total number of TSDB dir syncs", - nil, nil), - dirSyncFailures: prometheus.NewDesc( - "cortex_ingester_shipper_dir_sync_failures_total", - "Total number of failed TSDB dir syncs", - nil, nil), - uploads: prometheus.NewDesc( - "cortex_ingester_shipper_uploads_total", - "Total number of uploaded TSDB blocks", - nil, nil), - uploadFailures: prometheus.NewDesc( - "cortex_ingester_shipper_upload_failures_total", - "Total number of TSDB block upload failures", - nil, nil), - tsdbCompactionsTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_compactions_total", - "Total number of TSDB compactions that were executed.", - nil, nil), - tsdbCompactionDuration: prometheus.NewDesc( - "cortex_ingester_tsdb_compaction_duration_seconds", - "Duration of TSDB compaction runs.", - nil, nil), - tsdbFsyncDuration: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_fsync_duration_seconds", - "Duration of TSDB WAL fsync.", - nil, nil), - tsdbPageFlushes: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_page_flushes_total", - "Total number of TSDB WAL page flushes.", - nil, nil), - tsdbPageCompletions: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_completed_pages_total", - "Total number of TSDB WAL completed pages.", - nil, nil), - tsdbWALTruncateFail: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_truncations_failed_total", - "Total number of TSDB WAL truncations that failed.", - nil, nil), - tsdbWALTruncateTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_truncations_total", - "Total number of TSDB WAL truncations attempted.", - nil, nil), - tsdbWALTruncateDuration: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_truncate_duration_seconds", - "Duration of TSDB WAL truncation.", - nil, nil), - tsdbWALCorruptionsTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_corruptions_total", - "Total number of TSDB WAL corruptions.", - nil, nil), - tsdbWALWritesFailed: prometheus.NewDesc( - "cortex_ingester_tsdb_wal_writes_failed_total", - "Total number of TSDB WAL writes that failed.", - nil, nil), - tsdbHeadTruncateFail: prometheus.NewDesc( - "cortex_ingester_tsdb_head_truncations_failed_total", - "Total number of TSDB head truncations that failed.", - nil, nil), - tsdbHeadTruncateTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_head_truncations_total", - "Total number of TSDB head truncations attempted.", - nil, nil), - tsdbHeadGcDuration: prometheus.NewDesc( - "cortex_ingester_tsdb_head_gc_duration_seconds", - "Runtime of garbage collection in the TSDB head.", - nil, nil), - tsdbActiveAppenders: prometheus.NewDesc( - "cortex_ingester_tsdb_head_active_appenders", - "Number of currently active TSDB appender transactions.", - nil, nil), - tsdbSeriesNotFound: prometheus.NewDesc( - "cortex_ingester_tsdb_head_series_not_found_total", - "Total number of TSDB requests for series that were not found.", - nil, nil), - tsdbChunks: prometheus.NewDesc( - "cortex_ingester_tsdb_head_chunks", - "Total number of chunks in the TSDB head block.", - nil, nil), - tsdbChunksCreatedTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_head_chunks_created_total", - "Total number of series created in the TSDB head.", - []string{"user"}, nil), - tsdbChunksRemovedTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_head_chunks_removed_total", - "Total number of series removed in the TSDB head.", - []string{"user"}, nil), - tsdbMmapChunkCorruptionTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_mmap_chunk_corruptions_total", - "Total number of memory-mapped TSDB chunk corruptions.", - nil, nil), - tsdbLoadedBlocks: prometheus.NewDesc( - "cortex_ingester_tsdb_blocks_loaded", - "Number of currently loaded data blocks", - nil, nil), - tsdbReloads: prometheus.NewDesc( - "cortex_ingester_tsdb_reloads_total", - "Number of times the database reloaded block data from disk.", - nil, nil), - tsdbReloadsFailed: prometheus.NewDesc( - "cortex_ingester_tsdb_reloads_failures_total", - "Number of times the database failed to reloadBlocks block data from disk.", - nil, nil), - tsdbSymbolTableSize: prometheus.NewDesc( - "cortex_ingester_tsdb_symbol_table_size_bytes", - "Size of symbol table in memory for loaded blocks", - []string{"user"}, nil), - tsdbBlocksBytes: prometheus.NewDesc( - "cortex_ingester_tsdb_storage_blocks_bytes", - "The number of bytes that are currently used for local storage by all blocks.", - []string{"user"}, nil), - tsdbTimeRetentionCount: prometheus.NewDesc( - "cortex_ingester_tsdb_time_retentions_total", - "The number of times that blocks were deleted because the maximum time limit was exceeded.", - nil, nil), - checkpointDeleteFail: prometheus.NewDesc( - "cortex_ingester_tsdb_checkpoint_deletions_failed_total", - "Total number of TSDB checkpoint deletions that failed.", - nil, nil), - checkpointDeleteTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_checkpoint_deletions_total", - "Total number of TSDB checkpoint deletions attempted.", - nil, nil), - checkpointCreationFail: prometheus.NewDesc( - "cortex_ingester_tsdb_checkpoint_creations_failed_total", - "Total number of TSDB checkpoint creations that failed.", - nil, nil), - checkpointCreationTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_checkpoint_creations_total", - "Total number of TSDB checkpoint creations attempted.", - nil, nil), - - // The most useful exemplar metrics are per-user. The rest - // are global to reduce metrics overhead. - tsdbExemplarsTotal: prometheus.NewDesc( - "cortex_ingester_tsdb_exemplar_exemplars_appended_total", - "Total number of TSDB exemplars appended.", - nil, nil), // see distributor_exemplars_in for per-user rate - tsdbExemplarsInStorage: prometheus.NewDesc( - "cortex_ingester_tsdb_exemplar_exemplars_in_storage", - "Number of TSDB exemplars currently in storage.", - nil, nil), - tsdbExemplarSeriesInStorage: prometheus.NewDesc( - "cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage", - "Number of TSDB series with exemplars currently in storage.", - []string{"user"}, nil), - tsdbExemplarLastTs: prometheus.NewDesc( - "cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds", - "The timestamp of the oldest exemplar stored in circular storage. Useful to check for what time "+ - "range the current exemplar buffer limit allows. This usually means the last timestamp "+ - "for all exemplars for a typical setup. This is not true though if one of the series timestamp is in future compared to rest series.", - []string{"user"}, nil), - tsdbExemplarsOutOfOrder: prometheus.NewDesc( - "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", - "Total number of out of order exemplar ingestion failed attempts.", - nil, nil), - - memSeriesCreatedTotal: prometheus.NewDesc(memSeriesCreatedTotalName, memSeriesCreatedTotalHelp, []string{"user"}, nil), - memSeriesRemovedTotal: prometheus.NewDesc(memSeriesRemovedTotalName, memSeriesRemovedTotalHelp, []string{"user"}, nil), - } - - if r != nil { - r.MustRegister(m) - } - return m -} - -func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { - out <- sm.dirSyncs - out <- sm.dirSyncFailures - out <- sm.uploads - out <- sm.uploadFailures - - out <- sm.tsdbCompactionsTotal - out <- sm.tsdbCompactionDuration - out <- sm.tsdbFsyncDuration - out <- sm.tsdbPageFlushes - out <- sm.tsdbPageCompletions - out <- sm.tsdbWALTruncateFail - out <- sm.tsdbWALTruncateTotal - out <- sm.tsdbWALTruncateDuration - out <- sm.tsdbWALCorruptionsTotal - out <- sm.tsdbWALWritesFailed - out <- sm.tsdbHeadTruncateFail - out <- sm.tsdbHeadTruncateTotal - out <- sm.tsdbHeadGcDuration - out <- sm.tsdbActiveAppenders - out <- sm.tsdbSeriesNotFound - out <- sm.tsdbChunks - out <- sm.tsdbChunksCreatedTotal - out <- sm.tsdbChunksRemovedTotal - out <- sm.tsdbMmapChunkCorruptionTotal - out <- sm.tsdbLoadedBlocks - out <- sm.tsdbSymbolTableSize - out <- sm.tsdbReloads - out <- sm.tsdbReloadsFailed - out <- sm.tsdbTimeRetentionCount - out <- sm.tsdbBlocksBytes - out <- sm.checkpointDeleteFail - out <- sm.checkpointDeleteTotal - out <- sm.checkpointCreationFail - out <- sm.checkpointCreationTotal - - out <- sm.tsdbExemplarsTotal - out <- sm.tsdbExemplarsInStorage - out <- sm.tsdbExemplarSeriesInStorage - out <- sm.tsdbExemplarLastTs - out <- sm.tsdbExemplarsOutOfOrder - - out <- sm.memSeriesCreatedTotal - out <- sm.memSeriesRemovedTotal -} - -func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { - data := sm.regs.BuildMetricFamiliesPerUser() - - // OK, we have it all. Let's build results. - data.SendSumOfCounters(out, sm.dirSyncs, "thanos_shipper_dir_syncs_total") - data.SendSumOfCounters(out, sm.dirSyncFailures, "thanos_shipper_dir_sync_failures_total") - data.SendSumOfCounters(out, sm.uploads, "thanos_shipper_uploads_total") - data.SendSumOfCounters(out, sm.uploadFailures, "thanos_shipper_upload_failures_total") - - data.SendSumOfCounters(out, sm.tsdbCompactionsTotal, "prometheus_tsdb_compactions_total") - data.SendSumOfHistograms(out, sm.tsdbCompactionDuration, "prometheus_tsdb_compaction_duration_seconds") - data.SendSumOfSummaries(out, sm.tsdbFsyncDuration, "prometheus_tsdb_wal_fsync_duration_seconds") - data.SendSumOfCounters(out, sm.tsdbPageFlushes, "prometheus_tsdb_wal_page_flushes_total") - data.SendSumOfCounters(out, sm.tsdbPageCompletions, "prometheus_tsdb_wal_completed_pages_total") - data.SendSumOfCounters(out, sm.tsdbWALTruncateFail, "prometheus_tsdb_wal_truncations_failed_total") - data.SendSumOfCounters(out, sm.tsdbWALTruncateTotal, "prometheus_tsdb_wal_truncations_total") - data.SendSumOfSummaries(out, sm.tsdbWALTruncateDuration, "prometheus_tsdb_wal_truncate_duration_seconds") - data.SendSumOfCounters(out, sm.tsdbWALCorruptionsTotal, "prometheus_tsdb_wal_corruptions_total") - data.SendSumOfCounters(out, sm.tsdbWALWritesFailed, "prometheus_tsdb_wal_writes_failed_total") - data.SendSumOfCounters(out, sm.tsdbHeadTruncateFail, "prometheus_tsdb_head_truncations_failed_total") - data.SendSumOfCounters(out, sm.tsdbHeadTruncateTotal, "prometheus_tsdb_head_truncations_total") - data.SendSumOfSummaries(out, sm.tsdbHeadGcDuration, "prometheus_tsdb_head_gc_duration_seconds") - data.SendSumOfGauges(out, sm.tsdbActiveAppenders, "prometheus_tsdb_head_active_appenders") - data.SendSumOfCounters(out, sm.tsdbSeriesNotFound, "prometheus_tsdb_head_series_not_found_total") - data.SendSumOfGauges(out, sm.tsdbChunks, "prometheus_tsdb_head_chunks") - data.SendSumOfCountersPerUser(out, sm.tsdbChunksCreatedTotal, "prometheus_tsdb_head_chunks_created_total") - data.SendSumOfCountersPerUser(out, sm.tsdbChunksRemovedTotal, "prometheus_tsdb_head_chunks_removed_total") - data.SendSumOfCounters(out, sm.tsdbMmapChunkCorruptionTotal, "prometheus_tsdb_mmap_chunk_corruptions_total") - data.SendSumOfGauges(out, sm.tsdbLoadedBlocks, "prometheus_tsdb_blocks_loaded") - data.SendSumOfGaugesPerUser(out, sm.tsdbSymbolTableSize, "prometheus_tsdb_symbol_table_size_bytes") - data.SendSumOfCounters(out, sm.tsdbReloads, "prometheus_tsdb_reloads_total") - data.SendSumOfCounters(out, sm.tsdbReloadsFailed, "prometheus_tsdb_reloads_failures_total") - data.SendSumOfCounters(out, sm.tsdbTimeRetentionCount, "prometheus_tsdb_time_retentions_total") - data.SendSumOfGaugesPerUser(out, sm.tsdbBlocksBytes, "prometheus_tsdb_storage_blocks_bytes") - data.SendSumOfCounters(out, sm.checkpointDeleteFail, "prometheus_tsdb_checkpoint_deletions_failed_total") - data.SendSumOfCounters(out, sm.checkpointDeleteTotal, "prometheus_tsdb_checkpoint_deletions_total") - data.SendSumOfCounters(out, sm.checkpointCreationFail, "prometheus_tsdb_checkpoint_creations_failed_total") - data.SendSumOfCounters(out, sm.checkpointCreationTotal, "prometheus_tsdb_checkpoint_creations_total") - data.SendSumOfCounters(out, sm.tsdbExemplarsTotal, "prometheus_tsdb_exemplar_exemplars_appended_total") - data.SendSumOfGauges(out, sm.tsdbExemplarsInStorage, "prometheus_tsdb_exemplar_exemplars_in_storage") - data.SendSumOfGaugesPerUser(out, sm.tsdbExemplarSeriesInStorage, "prometheus_tsdb_exemplar_series_with_exemplars_in_storage") - data.SendSumOfGaugesPerUser(out, sm.tsdbExemplarLastTs, "prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds") - data.SendSumOfCounters(out, sm.tsdbExemplarsOutOfOrder, "prometheus_tsdb_exemplar_out_of_order_exemplars_total") - - data.SendSumOfCountersPerUser(out, sm.memSeriesCreatedTotal, "prometheus_tsdb_head_series_created_total") - data.SendSumOfCountersPerUser(out, sm.memSeriesRemovedTotal, "prometheus_tsdb_head_series_removed_total") -} - -func (sm *tsdbMetrics) setRegistryForUser(userID string, registry *prometheus.Registry) { - sm.regs.AddUserRegistry(userID, registry) -} - -func (sm *tsdbMetrics) removeRegistryForUser(userID string) { - sm.regs.RemoveUserRegistry(userID, false) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/series.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/series.go deleted file mode 100644 index a5dfcacde..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/series.go +++ /dev/null @@ -1,260 +0,0 @@ -package ingester - -import ( - "fmt" - "sort" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/value" - - "github.com/cortexproject/cortex/pkg/chunk/encoding" - "github.com/cortexproject/cortex/pkg/prom1/storage/metric" -) - -const ( - sampleOutOfOrder = "sample-out-of-order" - newValueForTimestamp = "new-value-for-timestamp" - sampleOutOfBounds = "sample-out-of-bounds" - duplicateSample = "duplicate-sample" - duplicateTimestamp = "duplicate-timestamp" -) - -type memorySeries struct { - metric labels.Labels - - // Sorted by start time, overlapping chunk ranges are forbidden. - chunkDescs []*desc - - // Whether the current head chunk has already been finished. If true, - // the current head chunk must not be modified anymore. - headChunkClosed bool - - // The timestamp & value of the last sample in this series. Needed to - // ensure timestamp monotonicity during ingestion. - lastSampleValueSet bool - lastTime model.Time - lastSampleValue model.SampleValue - - // Prometheus metrics. - createdChunks prometheus.Counter -} - -// newMemorySeries returns a pointer to a newly allocated memorySeries for the -// given metric. -func newMemorySeries(m labels.Labels, createdChunks prometheus.Counter) *memorySeries { - return &memorySeries{ - metric: m, - lastTime: model.Earliest, - createdChunks: createdChunks, - } -} - -// add adds a sample pair to the series, possibly creating a new chunk. -// The caller must have locked the fingerprint of the series. -func (s *memorySeries) add(v model.SamplePair) error { - // If sender has repeated the same timestamp, check more closely and perhaps return error. - if v.Timestamp == s.lastTime { - // If we don't know what the last sample value is, silently discard. - // This will mask some errors but better than complaining when we don't really know. - if !s.lastSampleValueSet { - return makeNoReportError(duplicateTimestamp) - } - // If both timestamp and sample value are the same as for the last append, - // ignore as they are a common occurrence when using client-side timestamps - // (e.g. Pushgateway or federation). - if v.Value.Equal(s.lastSampleValue) { - return makeNoReportError(duplicateSample) - } - return makeMetricValidationError(newValueForTimestamp, s.metric, - fmt.Errorf("sample with repeated timestamp but different value; last value: %v, incoming value: %v", s.lastSampleValue, v.Value)) - } - if v.Timestamp < s.lastTime { - return makeMetricValidationError(sampleOutOfOrder, s.metric, - fmt.Errorf("sample timestamp out of order; last timestamp: %v, incoming timestamp: %v", s.lastTime, v.Timestamp)) - } - - if len(s.chunkDescs) == 0 || s.headChunkClosed { - newHead := newDesc(encoding.New(), v.Timestamp, v.Timestamp) - s.chunkDescs = append(s.chunkDescs, newHead) - s.headChunkClosed = false - s.createdChunks.Inc() - } - - newChunk, err := s.head().add(v) - if err != nil { - return err - } - - // If we get a single chunk result, then just replace the head chunk with it - // (no need to update first/last time). Otherwise, we'll need to update first - // and last time. - if newChunk != nil { - first, last, err := firstAndLastTimes(newChunk) - if err != nil { - return err - } - s.chunkDescs = append(s.chunkDescs, newDesc(newChunk, first, last)) - s.createdChunks.Inc() - } - - s.lastTime = v.Timestamp - s.lastSampleValue = v.Value - s.lastSampleValueSet = true - - return nil -} - -func firstAndLastTimes(c encoding.Chunk) (model.Time, model.Time, error) { - var ( - first model.Time - last model.Time - firstSet bool - iter = c.NewIterator(nil) - ) - for iter.Scan() { - sample := iter.Value() - if !firstSet { - first = sample.Timestamp - firstSet = true - } - last = sample.Timestamp - } - return first, last, iter.Err() -} - -// closeHead marks the head chunk closed. The caller must have locked -// the fingerprint of the memorySeries. This method will panic if this -// series has no chunk descriptors. -func (s *memorySeries) closeHead(reason flushReason) { - s.chunkDescs[0].flushReason = reason - s.headChunkClosed = true -} - -// firstTime returns the earliest known time for the series. The caller must have -// locked the fingerprint of the memorySeries. This method will panic if this -// series has no chunk descriptors. -func (s *memorySeries) firstTime() model.Time { - return s.chunkDescs[0].FirstTime -} - -// Returns time of oldest chunk in the series, that isn't flushed. If there are -// no chunks, or all chunks are flushed, returns 0. -// The caller must have locked the fingerprint of the memorySeries. -func (s *memorySeries) firstUnflushedChunkTime() model.Time { - for _, c := range s.chunkDescs { - if !c.flushed { - return c.FirstTime - } - } - - return 0 -} - -// head returns a pointer to the head chunk descriptor. The caller must have -// locked the fingerprint of the memorySeries. This method will panic if this -// series has no chunk descriptors. -func (s *memorySeries) head() *desc { - return s.chunkDescs[len(s.chunkDescs)-1] -} - -func (s *memorySeries) samplesForRange(from, through model.Time) ([]model.SamplePair, error) { - // Find first chunk with start time after "from". - fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool { - return s.chunkDescs[i].FirstTime.After(from) - }) - // Find first chunk with start time after "through". - throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool { - return s.chunkDescs[i].FirstTime.After(through) - }) - if fromIdx == len(s.chunkDescs) { - // Even the last chunk starts before "from". Find out if the - // series ends before "from" and we don't need to do anything. - lt := s.chunkDescs[len(s.chunkDescs)-1].LastTime - if lt.Before(from) { - return nil, nil - } - } - if fromIdx > 0 { - fromIdx-- - } - if throughIdx == len(s.chunkDescs) { - throughIdx-- - } - var values []model.SamplePair - in := metric.Interval{ - OldestInclusive: from, - NewestInclusive: through, - } - var reuseIter encoding.Iterator - for idx := fromIdx; idx <= throughIdx; idx++ { - cd := s.chunkDescs[idx] - reuseIter = cd.C.NewIterator(reuseIter) - chValues, err := encoding.RangeValues(reuseIter, in) - if err != nil { - return nil, err - } - values = append(values, chValues...) - } - return values, nil -} - -func (s *memorySeries) setChunks(descs []*desc) error { - if len(s.chunkDescs) != 0 { - return fmt.Errorf("series already has chunks") - } - - s.chunkDescs = descs - if len(descs) > 0 { - s.lastTime = descs[len(descs)-1].LastTime - } - return nil -} - -func (s *memorySeries) isStale() bool { - return s.lastSampleValueSet && value.IsStaleNaN(float64(s.lastSampleValue)) -} - -type desc struct { - C encoding.Chunk // nil if chunk is evicted. - FirstTime model.Time // Timestamp of first sample. Populated at creation. Immutable. - LastTime model.Time // Timestamp of last sample. Populated at creation & on append. - LastUpdate model.Time // This server's local time on last change - flushReason flushReason // If chunk is closed, holds the reason why. - flushed bool // set to true when flush succeeds -} - -func newDesc(c encoding.Chunk, firstTime model.Time, lastTime model.Time) *desc { - return &desc{ - C: c, - FirstTime: firstTime, - LastTime: lastTime, - LastUpdate: model.Now(), - } -} - -// Add adds a sample pair to the underlying chunk. For safe concurrent access, -// The chunk must be pinned, and the caller must have locked the fingerprint of -// the series. -func (d *desc) add(s model.SamplePair) (encoding.Chunk, error) { - cs, err := d.C.Add(s) - if err != nil { - return nil, err - } - - if cs == nil { - d.LastTime = s.Timestamp // sample was added to this chunk - d.LastUpdate = model.Now() - } - - return cs, nil -} - -func (d *desc) slice(start, end model.Time) *desc { - return &desc{ - C: d.C.Slice(start, end), - FirstTime: start, - LastTime: end, - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go deleted file mode 100644 index 4d4a9a5b6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go +++ /dev/null @@ -1,110 +0,0 @@ -package ingester - -import ( - "sync" - "unsafe" - - "github.com/prometheus/common/model" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" -) - -const seriesMapShards = 128 - -// seriesMap maps fingerprints to memory series. All its methods are -// goroutine-safe. A seriesMap is effectively a goroutine-safe version of -// map[model.Fingerprint]*memorySeries. -type seriesMap struct { - size atomic.Int32 - shards []shard -} - -type shard struct { - mtx sync.Mutex - m map[model.Fingerprint]*memorySeries - - // Align this struct. - _ [cacheLineSize - unsafe.Sizeof(sync.Mutex{}) - unsafe.Sizeof(map[model.Fingerprint]*memorySeries{})]byte -} - -// fingerprintSeriesPair pairs a fingerprint with a memorySeries pointer. -type fingerprintSeriesPair struct { - fp model.Fingerprint - series *memorySeries -} - -// newSeriesMap returns a newly allocated empty seriesMap. To create a seriesMap -// based on a prefilled map, use an explicit initializer. -func newSeriesMap() *seriesMap { - shards := make([]shard, seriesMapShards) - for i := 0; i < seriesMapShards; i++ { - shards[i].m = map[model.Fingerprint]*memorySeries{} - } - return &seriesMap{ - shards: shards, - } -} - -// get returns a memorySeries for a fingerprint. Return values have the same -// semantics as the native Go map. -func (sm *seriesMap) get(fp model.Fingerprint) (*memorySeries, bool) { - shard := &sm.shards[util.HashFP(fp)%seriesMapShards] - shard.mtx.Lock() - ms, ok := shard.m[fp] - shard.mtx.Unlock() - return ms, ok -} - -// put adds a mapping to the seriesMap. -func (sm *seriesMap) put(fp model.Fingerprint, s *memorySeries) { - shard := &sm.shards[util.HashFP(fp)%seriesMapShards] - shard.mtx.Lock() - _, ok := shard.m[fp] - shard.m[fp] = s - shard.mtx.Unlock() - - if !ok { - sm.size.Inc() - } -} - -// del removes a mapping from the series Map. -func (sm *seriesMap) del(fp model.Fingerprint) { - shard := &sm.shards[util.HashFP(fp)%seriesMapShards] - shard.mtx.Lock() - _, ok := shard.m[fp] - delete(shard.m, fp) - shard.mtx.Unlock() - if ok { - sm.size.Dec() - } -} - -// iter returns a channel that produces all mappings in the seriesMap. The -// channel will be closed once all fingerprints have been received. Not -// consuming all fingerprints from the channel will leak a goroutine. The -// semantics of concurrent modification of seriesMap is the similar as the one -// for iterating over a map with a 'range' clause. However, if the next element -// in iteration order is removed after the current element has been received -// from the channel, it will still be produced by the channel. -func (sm *seriesMap) iter() <-chan fingerprintSeriesPair { - ch := make(chan fingerprintSeriesPair) - go func() { - for i := range sm.shards { - sm.shards[i].mtx.Lock() - for fp, ms := range sm.shards[i].m { - sm.shards[i].mtx.Unlock() - ch <- fingerprintSeriesPair{fp, ms} - sm.shards[i].mtx.Lock() - } - sm.shards[i].mtx.Unlock() - } - close(ch) - }() - return ch -} - -func (sm *seriesMap) length() int { - return int(sm.size.Load()) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go deleted file mode 100644 index ce31e9f42..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go +++ /dev/null @@ -1,390 +0,0 @@ -package ingester - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "time" - - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/weaveworks/common/user" - - "github.com/cortexproject/cortex/pkg/chunk/encoding" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/util/backoff" -) - -var ( - errTransferNoPendingIngesters = errors.New("no pending ingesters") -) - -// returns source ingesterID, number of received series, added chunks and error -func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream client.Ingester_TransferChunksServer) (fromIngesterID string, seriesReceived int, retErr error) { - chunksAdded := 0.0 - - defer func() { - if retErr != nil { - // Ensure the in memory chunks are updated to reflect the number of dropped chunks from the transfer - i.metrics.memoryChunks.Sub(chunksAdded) - - // If an error occurs during the transfer and the user state is to be discarded, - // ensure the metrics it exports reflect this. - userStates.teardown() - } - }() - - for { - wireSeries, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - retErr = errors.Wrap(err, "TransferChunks: Recv") - return - } - - // We can't send "extra" fields with a streaming call, so we repeat - // wireSeries.FromIngesterId and assume it is the same every time - // round this loop. - if fromIngesterID == "" { - fromIngesterID = wireSeries.FromIngesterId - level.Info(i.logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) - - // Before transfer, make sure 'from' ingester is in correct state to call ClaimTokensFor later - err := i.checkFromIngesterIsInLeavingState(stream.Context(), fromIngesterID) - if err != nil { - retErr = errors.Wrap(err, "TransferChunks: checkFromIngesterIsInLeavingState") - return - } - } - descs, err := fromWireChunks(wireSeries.Chunks) - if err != nil { - retErr = errors.Wrap(err, "TransferChunks: fromWireChunks") - return - } - - state, fp, series, err := userStates.getOrCreateSeries(stream.Context(), wireSeries.UserId, wireSeries.Labels, nil) - if err != nil { - retErr = errors.Wrapf(err, "TransferChunks: getOrCreateSeries: user %s series %s", wireSeries.UserId, wireSeries.Labels) - return - } - prevNumChunks := len(series.chunkDescs) - - err = series.setChunks(descs) - state.fpLocker.Unlock(fp) // acquired in getOrCreateSeries - if err != nil { - retErr = errors.Wrapf(err, "TransferChunks: setChunks: user %s series %s", wireSeries.UserId, wireSeries.Labels) - return - } - - seriesReceived++ - chunksDelta := float64(len(series.chunkDescs) - prevNumChunks) - chunksAdded += chunksDelta - i.metrics.memoryChunks.Add(chunksDelta) - i.metrics.receivedChunks.Add(float64(len(descs))) - } - - if seriesReceived == 0 { - level.Error(i.logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) - retErr = fmt.Errorf("TransferChunks: no series") - return - } - - if fromIngesterID == "" { - level.Error(i.logger).Log("msg", "received TransferChunks request with no ID from ingester") - retErr = fmt.Errorf("no ingester id") - return - } - - if err := i.lifecycler.ClaimTokensFor(stream.Context(), fromIngesterID); err != nil { - retErr = errors.Wrap(err, "TransferChunks: ClaimTokensFor") - return - } - - return -} - -// TransferChunks receives all the chunks from another ingester. -func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) error { - fromIngesterID := "" - seriesReceived := 0 - - xfer := func() error { - userStates := newUserStates(i.limiter, i.cfg, i.metrics, i.logger) - - var err error - fromIngesterID, seriesReceived, err = i.fillUserStatesFromStream(userStates, stream) - - if err != nil { - return err - } - - i.userStatesMtx.Lock() - defer i.userStatesMtx.Unlock() - - i.userStates = userStates - - return nil - } - - if err := i.transfer(stream.Context(), xfer); err != nil { - return err - } - - // Close the stream last, as this is what tells the "from" ingester that - // it's OK to shut down. - if err := stream.SendAndClose(&client.TransferChunksResponse{}); err != nil { - level.Error(i.logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) - return err - } - level.Info(i.logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) - - return nil -} - -// Ring gossiping: check if "from" ingester is in LEAVING state. It should be, but we may not see that yet -// when using gossip ring. If we cannot see ingester is the LEAVING state yet, we don't accept this -// transfer, as claiming tokens would possibly end up with this ingester owning no tokens, due to conflict -// resolution in ring merge function. Hopefully the leaving ingester will retry transfer again. -func (i *Ingester) checkFromIngesterIsInLeavingState(ctx context.Context, fromIngesterID string) error { - v, err := i.lifecycler.KVStore.Get(ctx, i.lifecycler.RingKey) - if err != nil { - return errors.Wrap(err, "get ring") - } - if v == nil { - return fmt.Errorf("ring not found when checking state of source ingester") - } - r, ok := v.(*ring.Desc) - if !ok || r == nil { - return fmt.Errorf("ring not found, got %T", v) - } - - if r.Ingesters == nil || r.Ingesters[fromIngesterID].State != ring.LEAVING { - return fmt.Errorf("source ingester is not in a LEAVING state, found state=%v", r.Ingesters[fromIngesterID].State) - } - - // all fine - return nil -} - -func (i *Ingester) transfer(ctx context.Context, xfer func() error) error { - // Enter JOINING state (only valid from PENDING) - if err := i.lifecycler.ChangeState(ctx, ring.JOINING); err != nil { - return err - } - - // The ingesters state effectively works as a giant mutex around this whole - // method, and as such we have to ensure we unlock the mutex. - defer func() { - state := i.lifecycler.GetState() - if i.lifecycler.GetState() == ring.ACTIVE { - return - } - - level.Error(i.logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state) - - // Enter PENDING state (only valid from JOINING) - if i.lifecycler.GetState() == ring.JOINING { - if err := i.lifecycler.ChangeState(ctx, ring.PENDING); err != nil { - level.Error(i.logger).Log("msg", "error rolling back failed TransferChunks", "err", err) - os.Exit(1) - } - } - }() - - if err := xfer(); err != nil { - return err - } - - if err := i.lifecycler.ChangeState(ctx, ring.ACTIVE); err != nil { - return errors.Wrap(err, "Transfer: ChangeState") - } - - return nil -} - -// The passed wireChunks slice is for re-use. -func toWireChunks(descs []*desc, wireChunks []client.Chunk) ([]client.Chunk, error) { - if cap(wireChunks) < len(descs) { - wireChunks = make([]client.Chunk, len(descs)) - } else { - wireChunks = wireChunks[:len(descs)] - } - for i, d := range descs { - wireChunk := client.Chunk{ - StartTimestampMs: int64(d.FirstTime), - EndTimestampMs: int64(d.LastTime), - Encoding: int32(d.C.Encoding()), - } - - slice := wireChunks[i].Data[:0] // try to re-use the memory from last time - if cap(slice) < d.C.Size() { - slice = make([]byte, 0, d.C.Size()) - } - buf := bytes.NewBuffer(slice) - - if err := d.C.Marshal(buf); err != nil { - return nil, err - } - - wireChunk.Data = buf.Bytes() - wireChunks[i] = wireChunk - } - return wireChunks, nil -} - -func fromWireChunks(wireChunks []client.Chunk) ([]*desc, error) { - descs := make([]*desc, 0, len(wireChunks)) - for _, c := range wireChunks { - desc := &desc{ - FirstTime: model.Time(c.StartTimestampMs), - LastTime: model.Time(c.EndTimestampMs), - LastUpdate: model.Now(), - } - - var err error - desc.C, err = encoding.NewForEncoding(encoding.Encoding(byte(c.Encoding))) - if err != nil { - return nil, err - } - - if err := desc.C.UnmarshalFromBuf(c.Data); err != nil { - return nil, err - } - - descs = append(descs, desc) - } - return descs, nil -} - -// TransferOut finds an ingester in PENDING state and transfers our chunks to it. -// Called as part of the ingester shutdown process. -func (i *Ingester) TransferOut(ctx context.Context) error { - // The blocks storage doesn't support blocks transferring. - if i.cfg.BlocksStorageEnabled { - level.Info(i.logger).Log("msg", "transfer between a LEAVING ingester and a PENDING one is not supported for the blocks storage") - return ring.ErrTransferDisabled - } - - if i.cfg.MaxTransferRetries <= 0 { - return ring.ErrTransferDisabled - } - backoff := backoff.New(ctx, backoff.Config{ - MinBackoff: 100 * time.Millisecond, - MaxBackoff: 5 * time.Second, - MaxRetries: i.cfg.MaxTransferRetries, - }) - - // Keep track of the last error so that we can log it with the highest level - // once all retries have completed - var err error - - for backoff.Ongoing() { - err = i.transferOut(ctx) - if err == nil { - level.Info(i.logger).Log("msg", "transfer successfully completed") - return nil - } - - level.Warn(i.logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries) - - backoff.Wait() - } - - level.Error(i.logger).Log("msg", "all transfer attempts failed", "err", err) - return backoff.Err() -} - -func (i *Ingester) transferOut(ctx context.Context) error { - userStatesCopy := i.userStates.cp() - if len(userStatesCopy) == 0 { - level.Info(i.logger).Log("msg", "nothing to transfer") - return nil - } - - targetIngester, err := i.findTargetIngester(ctx) - if err != nil { - return fmt.Errorf("cannot find ingester to transfer chunks to: %w", err) - } - - level.Info(i.logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr) - c, err := i.cfg.ingesterClientFactory(targetIngester.Addr, i.clientConfig) - if err != nil { - return err - } - defer c.Close() - - ctx = user.InjectOrgID(ctx, "-1") - stream, err := c.TransferChunks(ctx) - if err != nil { - return errors.Wrap(err, "TransferChunks") - } - - var chunks []client.Chunk - for userID, state := range userStatesCopy { - for pair := range state.fpToSeries.iter() { - state.fpLocker.Lock(pair.fp) - - if len(pair.series.chunkDescs) == 0 { // Nothing to send? - state.fpLocker.Unlock(pair.fp) - continue - } - - chunks, err = toWireChunks(pair.series.chunkDescs, chunks) - if err != nil { - state.fpLocker.Unlock(pair.fp) - return errors.Wrap(err, "toWireChunks") - } - - err = client.SendTimeSeriesChunk(stream, &client.TimeSeriesChunk{ - FromIngesterId: i.lifecycler.ID, - UserId: userID, - Labels: cortexpb.FromLabelsToLabelAdapters(pair.series.metric), - Chunks: chunks, - }) - state.fpLocker.Unlock(pair.fp) - if err != nil { - return errors.Wrap(err, "Send") - } - - i.metrics.sentChunks.Add(float64(len(chunks))) - } - } - - _, err = stream.CloseAndRecv() - if err != nil { - return errors.Wrap(err, "CloseAndRecv") - } - - // Close & empty all the flush queues, to unblock waiting workers. - for _, flushQueue := range i.flushQueues { - flushQueue.DiscardAndClose() - } - i.flushQueuesDone.Wait() - - level.Info(i.logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr) - return nil -} - -// findTargetIngester finds an ingester in PENDING state. -func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.InstanceDesc, error) { - ringDesc, err := i.lifecycler.KVStore.Get(ctx, i.lifecycler.RingKey) - if err != nil { - return nil, err - } else if ringDesc == nil { - return nil, errTransferNoPendingIngesters - } - - ingesters := ringDesc.(*ring.Desc).FindIngestersByState(ring.PENDING) - if len(ingesters) <= 0 { - return nil, errTransferNoPendingIngesters - } - - return &ingesters[0], nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go deleted file mode 100644 index 8c4156f3b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go +++ /dev/null @@ -1,110 +0,0 @@ -package ingester - -import ( - "sync" - "time" - - "github.com/prometheus/prometheus/model/labels" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// userMetricsMetadata allows metric metadata of a tenant to be held by the ingester. -// Metadata is kept as a set as it can come from multiple targets that Prometheus scrapes -// with the same metric name. -type userMetricsMetadata struct { - limiter *Limiter - metrics *ingesterMetrics - userID string - - mtx sync.RWMutex - metricToMetadata map[string]metricMetadataSet -} - -func newMetadataMap(l *Limiter, m *ingesterMetrics, userID string) *userMetricsMetadata { - return &userMetricsMetadata{ - metricToMetadata: map[string]metricMetadataSet{}, - limiter: l, - metrics: m, - userID: userID, - } -} - -func (mm *userMetricsMetadata) add(metric string, metadata *cortexpb.MetricMetadata) error { - mm.mtx.Lock() - defer mm.mtx.Unlock() - - // As we get the set, we also validate two things: - // 1. The user is allowed to create new metrics to add metadata to. - // 2. If the metadata set is already present, it hasn't reached the limit of metadata we can append. - set, ok := mm.metricToMetadata[metric] - if !ok { - // Verify that the user can create more metric metadata given we don't have a set for that metric name. - if err := mm.limiter.AssertMaxMetricsWithMetadataPerUser(mm.userID, len(mm.metricToMetadata)); err != nil { - validation.DiscardedMetadata.WithLabelValues(mm.userID, perUserMetadataLimit).Inc() - return makeLimitError(perUserMetadataLimit, mm.limiter.FormatError(mm.userID, err)) - } - set = metricMetadataSet{} - mm.metricToMetadata[metric] = set - } - - if err := mm.limiter.AssertMaxMetadataPerMetric(mm.userID, len(set)); err != nil { - validation.DiscardedMetadata.WithLabelValues(mm.userID, perMetricMetadataLimit).Inc() - return makeMetricLimitError(perMetricMetadataLimit, labels.FromStrings(labels.MetricName, metric), mm.limiter.FormatError(mm.userID, err)) - } - - // if we have seen this metadata before, it is a no-op and we don't need to change our metrics. - _, ok = set[*metadata] - if !ok { - mm.metrics.memMetadata.Inc() - mm.metrics.memMetadataCreatedTotal.WithLabelValues(mm.userID).Inc() - } - - mm.metricToMetadata[metric][*metadata] = time.Now() - return nil -} - -// If deadline is zero, all metadata is purged. -func (mm *userMetricsMetadata) purge(deadline time.Time) { - mm.mtx.Lock() - defer mm.mtx.Unlock() - var deleted int - for m, s := range mm.metricToMetadata { - deleted += s.purge(deadline) - - if len(s) <= 0 { - delete(mm.metricToMetadata, m) - } - } - - mm.metrics.memMetadata.Sub(float64(deleted)) - mm.metrics.memMetadataRemovedTotal.WithLabelValues(mm.userID).Add(float64(deleted)) -} - -func (mm *userMetricsMetadata) toClientMetadata() []*cortexpb.MetricMetadata { - mm.mtx.RLock() - defer mm.mtx.RUnlock() - r := make([]*cortexpb.MetricMetadata, 0, len(mm.metricToMetadata)) - for _, set := range mm.metricToMetadata { - for m := range set { - r = append(r, &m) - } - } - return r -} - -type metricMetadataSet map[cortexpb.MetricMetadata]time.Time - -// If deadline is zero time, all metrics are purged. -func (mms metricMetadataSet) purge(deadline time.Time) int { - var deleted int - for metadata, t := range mms { - if deadline.IsZero() || deadline.After(t) { - delete(mms, metadata) - deleted++ - } - } - - return deleted -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go deleted file mode 100644 index 685dd54f3..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go +++ /dev/null @@ -1,418 +0,0 @@ -package ingester - -import ( - "context" - "net/http" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/tsdb/chunks" - tsdb_record "github.com/prometheus/prometheus/tsdb/record" - "github.com/segmentio/fasthash/fnv1a" - "github.com/weaveworks/common/httpgrpc" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/ingester/index" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/extract" - util_math "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// userStates holds the userState object for all users (tenants), -// each one containing all the in-memory series for a given user. -type userStates struct { - states sync.Map - limiter *Limiter - cfg Config - metrics *ingesterMetrics - logger log.Logger -} - -type userState struct { - limiter *Limiter - userID string - fpLocker *fingerprintLocker - fpToSeries *seriesMap - mapper *fpMapper - index *index.InvertedIndex - ingestedAPISamples *util_math.EwmaRate - ingestedRuleSamples *util_math.EwmaRate - activeSeries *ActiveSeries - logger log.Logger - - seriesInMetric *metricCounter - - // Series metrics. - memSeries prometheus.Gauge - memSeriesCreatedTotal prometheus.Counter - memSeriesRemovedTotal prometheus.Counter - discardedSamples *prometheus.CounterVec - createdChunks prometheus.Counter - activeSeriesGauge prometheus.Gauge -} - -// DiscardedSamples metric labels -const ( - perUserSeriesLimit = "per_user_series_limit" - perMetricSeriesLimit = "per_metric_series_limit" -) - -func newUserStates(limiter *Limiter, cfg Config, metrics *ingesterMetrics, logger log.Logger) *userStates { - return &userStates{ - limiter: limiter, - cfg: cfg, - metrics: metrics, - logger: logger, - } -} - -func (us *userStates) cp() map[string]*userState { - states := map[string]*userState{} - us.states.Range(func(key, value interface{}) bool { - states[key.(string)] = value.(*userState) - return true - }) - return states -} - -//nolint:unused -func (us *userStates) gc() { - us.states.Range(func(key, value interface{}) bool { - state := value.(*userState) - if state.fpToSeries.length() == 0 { - us.states.Delete(key) - state.activeSeries.clear() - state.activeSeriesGauge.Set(0) - } - return true - }) -} - -func (us *userStates) updateRates() { - us.states.Range(func(key, value interface{}) bool { - state := value.(*userState) - state.ingestedAPISamples.Tick() - state.ingestedRuleSamples.Tick() - return true - }) -} - -// Labels will be copied if they are kept. -func (us *userStates) updateActiveSeriesForUser(userID string, now time.Time, lbls []labels.Label) { - if s, ok := us.get(userID); ok { - s.activeSeries.UpdateSeries(lbls, now, func(l labels.Labels) labels.Labels { return cortexpb.CopyLabels(l) }) - } -} - -func (us *userStates) purgeAndUpdateActiveSeries(purgeTime time.Time) { - us.states.Range(func(key, value interface{}) bool { - state := value.(*userState) - state.activeSeries.Purge(purgeTime) - state.activeSeriesGauge.Set(float64(state.activeSeries.Active())) - return true - }) -} - -func (us *userStates) get(userID string) (*userState, bool) { - state, ok := us.states.Load(userID) - if !ok { - return nil, ok - } - return state.(*userState), ok -} - -func (us *userStates) getOrCreate(userID string) *userState { - state, ok := us.get(userID) - if !ok { - - logger := log.With(us.logger, "user", userID) - // Speculatively create a userState object and try to store it - // in the map. Another goroutine may have got there before - // us, in which case this userState will be discarded - state = &userState{ - userID: userID, - limiter: us.limiter, - fpToSeries: newSeriesMap(), - fpLocker: newFingerprintLocker(16 * 1024), - index: index.New(), - ingestedAPISamples: util_math.NewEWMARate(0.2, us.cfg.RateUpdatePeriod), - ingestedRuleSamples: util_math.NewEWMARate(0.2, us.cfg.RateUpdatePeriod), - seriesInMetric: newMetricCounter(us.limiter, us.cfg.getIgnoreSeriesLimitForMetricNamesMap()), - logger: logger, - - memSeries: us.metrics.memSeries, - memSeriesCreatedTotal: us.metrics.memSeriesCreatedTotal.WithLabelValues(userID), - memSeriesRemovedTotal: us.metrics.memSeriesRemovedTotal.WithLabelValues(userID), - discardedSamples: validation.DiscardedSamples.MustCurryWith(prometheus.Labels{"user": userID}), - createdChunks: us.metrics.createdChunks, - - activeSeries: NewActiveSeries(), - activeSeriesGauge: us.metrics.activeSeriesPerUser.WithLabelValues(userID), - } - state.mapper = newFPMapper(state.fpToSeries, logger) - stored, ok := us.states.LoadOrStore(userID, state) - if !ok { - us.metrics.memUsers.Inc() - } - state = stored.(*userState) - } - - return state -} - -// teardown ensures metrics are accurately updated if a userStates struct is discarded -func (us *userStates) teardown() { - for _, u := range us.cp() { - u.memSeriesRemovedTotal.Add(float64(u.fpToSeries.length())) - u.memSeries.Sub(float64(u.fpToSeries.length())) - u.activeSeriesGauge.Set(0) - us.metrics.memUsers.Dec() - } -} - -func (us *userStates) getViaContext(ctx context.Context) (*userState, bool, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, false, err - } - state, ok := us.get(userID) - return state, ok, nil -} - -// NOTE: memory for `labels` is unsafe; anything retained beyond the -// life of this function must be copied -func (us *userStates) getOrCreateSeries(ctx context.Context, userID string, labels []cortexpb.LabelAdapter, record *WALRecord) (*userState, model.Fingerprint, *memorySeries, error) { - state := us.getOrCreate(userID) - // WARNING: `err` may have a reference to unsafe memory in `labels` - fp, series, err := state.getSeries(labels, record) - return state, fp, series, err -} - -// NOTE: memory for `metric` is unsafe; anything retained beyond the -// life of this function must be copied -func (u *userState) getSeries(metric labelPairs, record *WALRecord) (model.Fingerprint, *memorySeries, error) { - rawFP := client.FastFingerprint(metric) - u.fpLocker.Lock(rawFP) - fp := u.mapper.mapFP(rawFP, metric) - if fp != rawFP { - u.fpLocker.Unlock(rawFP) - u.fpLocker.Lock(fp) - } - - series, ok := u.fpToSeries.get(fp) - if ok { - return fp, series, nil - } - - series, err := u.createSeriesWithFingerprint(fp, metric, record, false) - if err != nil { - u.fpLocker.Unlock(fp) - return 0, nil, err - } - - return fp, series, nil -} - -func (u *userState) createSeriesWithFingerprint(fp model.Fingerprint, metric labelPairs, record *WALRecord, recovery bool) (*memorySeries, error) { - // There's theoretically a relatively harmless race here if multiple - // goroutines get the length of the series map at the same time, then - // all proceed to add a new series. This is likely not worth addressing, - // as this should happen rarely (all samples from one push are added - // serially), and the overshoot in allowed series would be minimal. - - if !recovery { - if err := u.limiter.AssertMaxSeriesPerUser(u.userID, u.fpToSeries.length()); err != nil { - return nil, makeLimitError(perUserSeriesLimit, u.limiter.FormatError(u.userID, err)) - } - } - - // MetricNameFromLabelAdapters returns a copy of the string in `metric` - metricName, err := extract.MetricNameFromLabelAdapters(metric) - if err != nil { - return nil, err - } - - if !recovery { - // Check if the per-metric limit has been exceeded - if err = u.seriesInMetric.canAddSeriesFor(u.userID, metricName); err != nil { - // WARNING: returns a reference to `metric` - return nil, makeMetricLimitError(perMetricSeriesLimit, cortexpb.FromLabelAdaptersToLabels(metric), u.limiter.FormatError(u.userID, err)) - } - } - - u.memSeriesCreatedTotal.Inc() - u.memSeries.Inc() - u.seriesInMetric.increaseSeriesForMetric(metricName) - - if record != nil { - lbls := make(labels.Labels, 0, len(metric)) - for _, m := range metric { - lbls = append(lbls, labels.Label(m)) - } - record.Series = append(record.Series, tsdb_record.RefSeries{ - Ref: chunks.HeadSeriesRef(fp), - Labels: lbls, - }) - } - - labels := u.index.Add(metric, fp) // Add() returns 'interned' values so the original labels are not retained - series := newMemorySeries(labels, u.createdChunks) - u.fpToSeries.put(fp, series) - - return series, nil -} - -func (u *userState) removeSeries(fp model.Fingerprint, metric labels.Labels) { - u.fpToSeries.del(fp) - u.index.Delete(metric, fp) - - metricName := metric.Get(model.MetricNameLabel) - if metricName == "" { - // Series without a metric name should never be able to make it into - // the ingester's memory storage. - panic("No metric name label") - } - - u.seriesInMetric.decreaseSeriesForMetric(metricName) - - u.memSeriesRemovedTotal.Inc() - u.memSeries.Dec() -} - -// forSeriesMatching passes all series matching the given matchers to the -// provided callback. Deals with locking and the quirks of zero-length matcher -// values. There are 2 callbacks: -// - The `add` callback is called for each series while the lock is held, and -// is intend to be used by the caller to build a batch. -// - The `send` callback is called at certain intervals specified by batchSize -// with no locks held, and is intended to be used by the caller to send the -// built batches. -func (u *userState) forSeriesMatching(ctx context.Context, allMatchers []*labels.Matcher, - add func(context.Context, model.Fingerprint, *memorySeries) error, - send func(context.Context) error, batchSize int, -) error { - log, ctx := spanlogger.New(ctx, "forSeriesMatching") - defer log.Finish() - - filters, matchers := util.SplitFiltersAndMatchers(allMatchers) - fps := u.index.Lookup(matchers) - if len(fps) > u.limiter.MaxSeriesPerQuery(u.userID) { - return httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "exceeded maximum number of series in a query") - } - - level.Debug(log).Log("series", len(fps)) - - // We only hold one FP lock at once here, so no opportunity to deadlock. - sent := 0 -outer: - for _, fp := range fps { - if err := ctx.Err(); err != nil { - return err - } - - u.fpLocker.Lock(fp) - series, ok := u.fpToSeries.get(fp) - if !ok { - u.fpLocker.Unlock(fp) - continue - } - - for _, filter := range filters { - if !filter.Matches(series.metric.Get(filter.Name)) { - u.fpLocker.Unlock(fp) - continue outer - } - } - - err := add(ctx, fp, series) - u.fpLocker.Unlock(fp) - if err != nil { - return err - } - - sent++ - if batchSize > 0 && sent%batchSize == 0 && send != nil { - if err = send(ctx); err != nil { - return nil - } - } - } - - if batchSize > 0 && sent%batchSize > 0 && send != nil { - return send(ctx) - } - return nil -} - -const numMetricCounterShards = 128 - -type metricCounterShard struct { - mtx sync.Mutex - m map[string]int -} - -type metricCounter struct { - limiter *Limiter - shards []metricCounterShard - - ignoredMetrics map[string]struct{} -} - -func newMetricCounter(limiter *Limiter, ignoredMetricsForSeriesCount map[string]struct{}) *metricCounter { - shards := make([]metricCounterShard, 0, numMetricCounterShards) - for i := 0; i < numMetricCounterShards; i++ { - shards = append(shards, metricCounterShard{ - m: map[string]int{}, - }) - } - return &metricCounter{ - limiter: limiter, - shards: shards, - - ignoredMetrics: ignoredMetricsForSeriesCount, - } -} - -func (m *metricCounter) decreaseSeriesForMetric(metricName string) { - shard := m.getShard(metricName) - shard.mtx.Lock() - defer shard.mtx.Unlock() - - shard.m[metricName]-- - if shard.m[metricName] == 0 { - delete(shard.m, metricName) - } -} - -func (m *metricCounter) getShard(metricName string) *metricCounterShard { - shard := &m.shards[util.HashFP(model.Fingerprint(fnv1a.HashString64(metricName)))%numMetricCounterShards] - return shard -} - -func (m *metricCounter) canAddSeriesFor(userID, metric string) error { - if _, ok := m.ignoredMetrics[metric]; ok { - return nil - } - - shard := m.getShard(metric) - shard.mtx.Lock() - defer shard.mtx.Unlock() - - return m.limiter.AssertMaxSeriesPerMetric(userID, shard.m[metric]) -} - -func (m *metricCounter) increaseSeriesForMetric(metric string) { - shard := m.getShard(metric) - shard.mtx.Lock() - shard.m[metric]++ - shard.mtx.Unlock() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go deleted file mode 100644 index d714294be..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go +++ /dev/null @@ -1,1134 +0,0 @@ -package ingester - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/tsdb/encoding" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/prometheus/prometheus/tsdb/fileutil" - tsdb_record "github.com/prometheus/prometheus/tsdb/record" - "github.com/prometheus/prometheus/tsdb/wal" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" -) - -// WALConfig is config for the Write Ahead Log. -type WALConfig struct { - WALEnabled bool `yaml:"wal_enabled"` - CheckpointEnabled bool `yaml:"checkpoint_enabled"` - Recover bool `yaml:"recover_from_wal"` - Dir string `yaml:"wal_dir"` - CheckpointDuration time.Duration `yaml:"checkpoint_duration"` - FlushOnShutdown bool `yaml:"flush_on_shutdown_with_wal_enabled"` - // We always checkpoint during shutdown. This option exists for the tests. - checkpointDuringShutdown bool -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *WALConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Dir, "ingester.wal-dir", "wal", "Directory to store the WAL and/or recover from WAL.") - f.BoolVar(&cfg.Recover, "ingester.recover-from-wal", false, "Recover data from existing WAL irrespective of WAL enabled/disabled.") - f.BoolVar(&cfg.WALEnabled, "ingester.wal-enabled", false, "Enable writing of ingested data into WAL.") - f.BoolVar(&cfg.CheckpointEnabled, "ingester.checkpoint-enabled", true, "Enable checkpointing of in-memory chunks. It should always be true when using normally. Set it to false iff you are doing some small tests as there is no mechanism to delete the old WAL yet if checkpoint is disabled.") - f.DurationVar(&cfg.CheckpointDuration, "ingester.checkpoint-duration", 30*time.Minute, "Interval at which checkpoints should be created.") - f.BoolVar(&cfg.FlushOnShutdown, "ingester.flush-on-shutdown-with-wal-enabled", false, "When WAL is enabled, should chunks be flushed to long-term storage on shutdown. Useful eg. for migration to blocks engine.") - cfg.checkpointDuringShutdown = true -} - -// WAL interface allows us to have a no-op WAL when the WAL is disabled. -type WAL interface { - // Log marshalls the records and writes it into the WAL. - Log(*WALRecord) error - // Stop stops all the WAL operations. - Stop() -} - -// RecordType represents the type of the WAL/Checkpoint record. -type RecordType byte - -const ( - // WALRecordSeries is the type for the WAL record on Prometheus TSDB record for series. - WALRecordSeries RecordType = 1 - // WALRecordSamples is the type for the WAL record based on Prometheus TSDB record for samples. - WALRecordSamples RecordType = 2 - - // CheckpointRecord is the type for the Checkpoint record based on protos. - CheckpointRecord RecordType = 3 -) - -type noopWAL struct{} - -func (noopWAL) Log(*WALRecord) error { return nil } -func (noopWAL) Stop() {} - -type walWrapper struct { - cfg WALConfig - quit chan struct{} - wait sync.WaitGroup - - wal *wal.WAL - getUserStates func() map[string]*userState - checkpointMtx sync.Mutex - bytesPool sync.Pool - - logger log.Logger - - // Metrics. - checkpointDeleteFail prometheus.Counter - checkpointDeleteTotal prometheus.Counter - checkpointCreationFail prometheus.Counter - checkpointCreationTotal prometheus.Counter - checkpointDuration prometheus.Summary - checkpointLoggedBytesTotal prometheus.Counter - walLoggedBytesTotal prometheus.Counter - walRecordsLogged prometheus.Counter -} - -// newWAL creates a WAL object. If the WAL is disabled, then the returned WAL is a no-op WAL. -func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, registerer prometheus.Registerer, logger log.Logger) (WAL, error) { - if !cfg.WALEnabled { - return &noopWAL{}, nil - } - - var walRegistry prometheus.Registerer - if registerer != nil { - walRegistry = prometheus.WrapRegistererWith(prometheus.Labels{"kind": "wal"}, registerer) - } - tsdbWAL, err := wal.NewSize(logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false) - if err != nil { - return nil, err - } - - w := &walWrapper{ - cfg: cfg, - quit: make(chan struct{}), - wal: tsdbWAL, - getUserStates: userStatesFunc, - bytesPool: sync.Pool{ - New: func() interface{} { - return make([]byte, 0, 512) - }, - }, - logger: logger, - } - - w.checkpointDeleteFail = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_checkpoint_deletions_failed_total", - Help: "Total number of checkpoint deletions that failed.", - }) - w.checkpointDeleteTotal = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_checkpoint_deletions_total", - Help: "Total number of checkpoint deletions attempted.", - }) - w.checkpointCreationFail = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_checkpoint_creations_failed_total", - Help: "Total number of checkpoint creations that failed.", - }) - w.checkpointCreationTotal = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_checkpoint_creations_total", - Help: "Total number of checkpoint creations attempted.", - }) - w.checkpointDuration = promauto.With(registerer).NewSummary(prometheus.SummaryOpts{ - Name: "cortex_ingester_checkpoint_duration_seconds", - Help: "Time taken to create a checkpoint.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }) - w.walRecordsLogged = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_wal_records_logged_total", - Help: "Total number of WAL records logged.", - }) - w.checkpointLoggedBytesTotal = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_checkpoint_logged_bytes_total", - Help: "Total number of bytes written to disk for checkpointing.", - }) - w.walLoggedBytesTotal = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_wal_logged_bytes_total", - Help: "Total number of bytes written to disk for WAL records.", - }) - - w.wait.Add(1) - go w.run() - return w, nil -} - -func (w *walWrapper) Stop() { - close(w.quit) - w.wait.Wait() - w.wal.Close() -} - -func (w *walWrapper) Log(record *WALRecord) error { - if record == nil || (len(record.Series) == 0 && len(record.Samples) == 0) { - return nil - } - select { - case <-w.quit: - return nil - default: - buf := w.bytesPool.Get().([]byte)[:0] - defer func() { - w.bytesPool.Put(buf) // nolint:staticcheck - }() - - if len(record.Series) > 0 { - buf = record.encodeSeries(buf) - if err := w.wal.Log(buf); err != nil { - return err - } - w.walRecordsLogged.Inc() - w.walLoggedBytesTotal.Add(float64(len(buf))) - buf = buf[:0] - } - if len(record.Samples) > 0 { - buf = record.encodeSamples(buf) - if err := w.wal.Log(buf); err != nil { - return err - } - w.walRecordsLogged.Inc() - w.walLoggedBytesTotal.Add(float64(len(buf))) - } - return nil - } -} - -func (w *walWrapper) run() { - defer w.wait.Done() - - if !w.cfg.CheckpointEnabled { - return - } - - ticker := time.NewTicker(w.cfg.CheckpointDuration) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - start := time.Now() - level.Info(w.logger).Log("msg", "starting checkpoint") - if err := w.performCheckpoint(false); err != nil { - level.Error(w.logger).Log("msg", "error checkpointing series", "err", err) - continue - } - elapsed := time.Since(start) - level.Info(w.logger).Log("msg", "checkpoint done", "time", elapsed.String()) - w.checkpointDuration.Observe(elapsed.Seconds()) - case <-w.quit: - if w.cfg.checkpointDuringShutdown { - level.Info(w.logger).Log("msg", "creating checkpoint before shutdown") - if err := w.performCheckpoint(true); err != nil { - level.Error(w.logger).Log("msg", "error checkpointing series during shutdown", "err", err) - } - } - return - } - } -} - -const checkpointPrefix = "checkpoint." - -func (w *walWrapper) performCheckpoint(immediate bool) (err error) { - if !w.cfg.CheckpointEnabled { - return nil - } - - // This method is called during shutdown which can interfere with ongoing checkpointing. - // Hence to avoid any race between file creation and WAL truncation, we hold this lock here. - w.checkpointMtx.Lock() - defer w.checkpointMtx.Unlock() - - w.checkpointCreationTotal.Inc() - defer func() { - if err != nil { - w.checkpointCreationFail.Inc() - } - }() - - if w.getUserStates == nil { - return errors.New("function to get user states not initialised") - } - - _, lastSegment, err := wal.Segments(w.wal.Dir()) - if err != nil { - return err - } - if lastSegment < 0 { - // There are no WAL segments. No need of checkpoint yet. - return nil - } - - _, lastCh, err := lastCheckpoint(w.wal.Dir()) - if err != nil { - return err - } - - if lastCh == lastSegment { - // As the checkpoint name is taken from last WAL segment, we need to ensure - // a new segment for every checkpoint so that the old checkpoint is not overwritten. - if err := w.wal.NextSegment(); err != nil { - return err - } - - _, lastSegment, err = wal.Segments(w.wal.Dir()) - if err != nil { - return err - } - } - - // Checkpoint is named after the last WAL segment present so that when replaying the WAL - // we can start from that particular WAL segment. - checkpointDir := filepath.Join(w.wal.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", lastSegment)) - level.Info(w.logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) - checkpointDirTemp := checkpointDir + ".tmp" - - if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil { - return errors.Wrap(err, "create checkpoint dir") - } - checkpoint, err := wal.New(nil, nil, checkpointDirTemp, false) - if err != nil { - return errors.Wrap(err, "open checkpoint") - } - defer func() { - checkpoint.Close() - os.RemoveAll(checkpointDirTemp) - }() - - // Count number of series - we'll use this to rate limit checkpoints. - numSeries := 0 - us := w.getUserStates() - for _, state := range us { - numSeries += state.fpToSeries.length() - } - if numSeries == 0 { - return nil - } - - perSeriesDuration := (95 * w.cfg.CheckpointDuration) / (100 * time.Duration(numSeries)) - - var wireChunkBuf []client.Chunk - var b []byte - bytePool := sync.Pool{ - New: func() interface{} { - return make([]byte, 0, 1024) - }, - } - records := [][]byte{} - totalSize := 0 - ticker := time.NewTicker(perSeriesDuration) - defer ticker.Stop() - start := time.Now() - for userID, state := range us { - for pair := range state.fpToSeries.iter() { - state.fpLocker.Lock(pair.fp) - wireChunkBuf, b, err = w.checkpointSeries(userID, pair.fp, pair.series, wireChunkBuf, bytePool.Get().([]byte)) - state.fpLocker.Unlock(pair.fp) - if err != nil { - return err - } - - records = append(records, b) - totalSize += len(b) - if totalSize >= 1*1024*1024 { // 1 MiB. - if err := checkpoint.Log(records...); err != nil { - return err - } - w.checkpointLoggedBytesTotal.Add(float64(totalSize)) - totalSize = 0 - for i := range records { - bytePool.Put(records[i]) // nolint:staticcheck - } - records = records[:0] - } - - if !immediate { - if time.Since(start) > 2*w.cfg.CheckpointDuration { - // This could indicate a surge in number of series and continuing with - // the old estimation of ticker can make checkpointing run indefinitely in worst case - // and disk running out of space. Re-adjust the ticker might not solve the problem - // as there can be another surge again. Hence let's checkpoint this one immediately. - immediate = true - continue - } - - select { - case <-ticker.C: - case <-w.quit: // When we're trying to shutdown, finish the checkpoint as fast as possible. - } - } - } - } - - if err := checkpoint.Log(records...); err != nil { - return err - } - - if err := checkpoint.Close(); err != nil { - return errors.Wrap(err, "close checkpoint") - } - if err := fileutil.Replace(checkpointDirTemp, checkpointDir); err != nil { - return errors.Wrap(err, "rename checkpoint directory") - } - - // We delete the WAL segments which are before the previous checkpoint and not before the - // current checkpoint created. This is because if the latest checkpoint is corrupted for any reason, we - // should be able to recover from the older checkpoint which would need the older WAL segments. - if err := w.wal.Truncate(lastCh); err != nil { - // It is fine to have old WAL segments hanging around if deletion failed. - // We can try again next time. - level.Error(w.logger).Log("msg", "error deleting old WAL segments", "err", err) - } - - if lastCh >= 0 { - if err := w.deleteCheckpoints(lastCh); err != nil { - // It is fine to have old checkpoints hanging around if deletion failed. - // We can try again next time. - level.Error(w.logger).Log("msg", "error deleting old checkpoint", "err", err) - } - } - - return nil -} - -// lastCheckpoint returns the directory name and index of the most recent checkpoint. -// If dir does not contain any checkpoints, -1 is returned as index. -func lastCheckpoint(dir string) (string, int, error) { - dirs, err := ioutil.ReadDir(dir) - if err != nil { - return "", -1, err - } - var ( - maxIdx = -1 - checkpointDir string - ) - // There may be multiple checkpoints left, so select the one with max index. - for i := 0; i < len(dirs); i++ { - di := dirs[i] - - idx, err := checkpointIndex(di.Name(), false) - if err != nil { - continue - } - if !di.IsDir() { - return "", -1, fmt.Errorf("checkpoint %s is not a directory", di.Name()) - } - if idx > maxIdx { - checkpointDir = di.Name() - maxIdx = idx - } - } - if maxIdx >= 0 { - return filepath.Join(dir, checkpointDir), maxIdx, nil - } - return "", -1, nil -} - -// deleteCheckpoints deletes all checkpoints in a directory which is < maxIndex. -func (w *walWrapper) deleteCheckpoints(maxIndex int) (err error) { - w.checkpointDeleteTotal.Inc() - defer func() { - if err != nil { - w.checkpointDeleteFail.Inc() - } - }() - - errs := tsdb_errors.NewMulti() - - files, err := ioutil.ReadDir(w.wal.Dir()) - if err != nil { - return err - } - for _, fi := range files { - index, err := checkpointIndex(fi.Name(), true) - if err != nil || index >= maxIndex { - continue - } - if err := os.RemoveAll(filepath.Join(w.wal.Dir(), fi.Name())); err != nil { - errs.Add(err) - } - } - return errs.Err() -} - -var checkpointRe = regexp.MustCompile("^" + regexp.QuoteMeta(checkpointPrefix) + "(\\d+)(\\.tmp)?$") - -// checkpointIndex returns the index of a given checkpoint file. It handles -// both regular and temporary checkpoints according to the includeTmp flag. If -// the file is not a checkpoint it returns an error. -func checkpointIndex(filename string, includeTmp bool) (int, error) { - result := checkpointRe.FindStringSubmatch(filename) - if len(result) < 2 { - return 0, errors.New("file is not a checkpoint") - } - // Filter out temporary checkpoints if desired. - if !includeTmp && len(result) == 3 && result[2] != "" { - return 0, errors.New("temporary checkpoint") - } - return strconv.Atoi(result[1]) -} - -// checkpointSeries write the chunks of the series to the checkpoint. -func (w *walWrapper) checkpointSeries(userID string, fp model.Fingerprint, series *memorySeries, wireChunks []client.Chunk, b []byte) ([]client.Chunk, []byte, error) { - var err error - wireChunks, err = toWireChunks(series.chunkDescs, wireChunks) - if err != nil { - return wireChunks, b, err - } - - b, err = encodeWithTypeHeader(&Series{ - UserId: userID, - Fingerprint: uint64(fp), - Labels: cortexpb.FromLabelsToLabelAdapters(series.metric), - Chunks: wireChunks, - }, CheckpointRecord, b) - - return wireChunks, b, err -} - -type walRecoveryParameters struct { - walDir string - ingester *Ingester - numWorkers int - stateCache []map[string]*userState - seriesCache []map[string]map[uint64]*memorySeries -} - -func recoverFromWAL(ingester *Ingester) error { - params := walRecoveryParameters{ - walDir: ingester.cfg.WALConfig.Dir, - numWorkers: runtime.GOMAXPROCS(0), - ingester: ingester, - } - - params.stateCache = make([]map[string]*userState, params.numWorkers) - params.seriesCache = make([]map[string]map[uint64]*memorySeries, params.numWorkers) - for i := 0; i < params.numWorkers; i++ { - params.stateCache[i] = make(map[string]*userState) - params.seriesCache[i] = make(map[string]map[uint64]*memorySeries) - } - - level.Info(ingester.logger).Log("msg", "recovering from checkpoint") - start := time.Now() - userStates, idx, err := processCheckpointWithRepair(params) - if err != nil { - return err - } - elapsed := time.Since(start) - level.Info(ingester.logger).Log("msg", "recovered from checkpoint", "time", elapsed.String()) - - if segExists, err := segmentsExist(params.walDir); err == nil && !segExists { - level.Info(ingester.logger).Log("msg", "no segments found, skipping recover from segments") - ingester.userStatesMtx.Lock() - ingester.userStates = userStates - ingester.userStatesMtx.Unlock() - return nil - } else if err != nil { - return err - } - - level.Info(ingester.logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx) - start = time.Now() - if err := processWALWithRepair(idx, userStates, params); err != nil { - return err - } - elapsed = time.Since(start) - level.Info(ingester.logger).Log("msg", "recovered from WAL", "time", elapsed.String()) - - ingester.userStatesMtx.Lock() - ingester.userStates = userStates - ingester.userStatesMtx.Unlock() - return nil -} - -func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int, error) { - logger := params.ingester.logger - - // Use a local userStates, so we don't need to worry about locking. - userStates := newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics, params.ingester.logger) - - lastCheckpointDir, idx, err := lastCheckpoint(params.walDir) - if err != nil { - return nil, -1, err - } - if idx < 0 { - level.Info(logger).Log("msg", "no checkpoint found") - return userStates, -1, nil - } - - level.Info(logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir)) - - err = processCheckpoint(lastCheckpointDir, userStates, params) - if err == nil { - return userStates, idx, nil - } - - // We don't call repair on checkpoint as losing even a single record is like losing the entire data of a series. - // We try recovering from the older checkpoint instead. - params.ingester.metrics.walCorruptionsTotal.Inc() - level.Error(logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err) - - // Deleting this checkpoint to try the previous checkpoint. - if err := os.RemoveAll(lastCheckpointDir); err != nil { - return nil, -1, errors.Wrapf(err, "unable to delete checkpoint directory %s", lastCheckpointDir) - } - - // If we have reached this point, it means the last checkpoint was deleted. - // Now the last checkpoint will be the one before the deleted checkpoint. - lastCheckpointDir, idx, err = lastCheckpoint(params.walDir) - if err != nil { - return nil, -1, err - } - - // Creating new userStates to discard the old chunks. - userStates = newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics, params.ingester.logger) - if idx < 0 { - // There was only 1 checkpoint. We don't error in this case - // as for the first checkpoint entire WAL will/should be present. - return userStates, -1, nil - } - - level.Info(logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir)) - if err := processCheckpoint(lastCheckpointDir, userStates, params); err != nil { - // We won't attempt the repair again even if its the old checkpoint. - params.ingester.metrics.walCorruptionsTotal.Inc() - return nil, -1, err - } - - return userStates, idx, nil -} - -// segmentsExist is a stripped down version of -// https://github.com/prometheus/prometheus/blob/4c648eddf47d7e07fbc74d0b18244402200dca9e/tsdb/wal/wal.go#L739-L760. -func segmentsExist(dir string) (bool, error) { - files, err := ioutil.ReadDir(dir) - if err != nil { - return false, err - } - for _, f := range files { - if _, err := strconv.Atoi(f.Name()); err == nil { - // First filename which is a number. - // This is how Prometheus stores and this - // is how it checks too. - return true, nil - } - } - return false, nil -} - -// processCheckpoint loads the chunks of the series present in the last checkpoint. -func processCheckpoint(name string, userStates *userStates, params walRecoveryParameters) error { - - reader, closer, err := newWalReader(name, -1) - if err != nil { - return err - } - defer closer.Close() - - var ( - inputs = make([]chan *Series, params.numWorkers) - // errChan is to capture the errors from goroutine. - // The channel size is nWorkers+1 to not block any worker if all of them error out. - errChan = make(chan error, params.numWorkers) - wg = sync.WaitGroup{} - seriesPool = &sync.Pool{ - New: func() interface{} { - return &Series{} - }, - } - ) - - wg.Add(params.numWorkers) - for i := 0; i < params.numWorkers; i++ { - inputs[i] = make(chan *Series, 300) - go func(input <-chan *Series, stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries) { - processCheckpointRecord(userStates, seriesPool, stateCache, seriesCache, input, errChan, params.ingester.metrics.memoryChunks) - wg.Done() - }(inputs[i], params.stateCache[i], params.seriesCache[i]) - } - - var capturedErr error -Loop: - for reader.Next() { - s := seriesPool.Get().(*Series) - m, err := decodeCheckpointRecord(reader.Record(), s) - if err != nil { - // We don't return here in order to close/drain all the channels and - // make sure all goroutines exit. - capturedErr = err - break Loop - } - s = m.(*Series) - - // The yoloString from the unmarshal of LabelAdapter gets corrupted - // when travelling through the channel. Hence making a copy of that. - // This extra alloc during the read path is fine as it's only 1 time - // and saves extra allocs during write path by having LabelAdapter. - s.Labels = copyLabelAdapters(s.Labels) - - select { - case capturedErr = <-errChan: - // Exit early on an error. - // Only acts upon the first error received. - break Loop - default: - mod := s.Fingerprint % uint64(params.numWorkers) - inputs[mod] <- s - } - } - - for i := 0; i < params.numWorkers; i++ { - close(inputs[i]) - } - wg.Wait() - // If any worker errored out, some input channels might not be empty. - // Hence drain them. - for i := 0; i < params.numWorkers; i++ { - for range inputs[i] { - } - } - - if capturedErr != nil { - return capturedErr - } - select { - case capturedErr = <-errChan: - return capturedErr - default: - return reader.Err() - } -} - -func copyLabelAdapters(las []cortexpb.LabelAdapter) []cortexpb.LabelAdapter { - for i := range las { - n, v := make([]byte, len(las[i].Name)), make([]byte, len(las[i].Value)) - copy(n, las[i].Name) - copy(v, las[i].Value) - las[i].Name = string(n) - las[i].Value = string(v) - } - return las -} - -func processCheckpointRecord( - userStates *userStates, - seriesPool *sync.Pool, - stateCache map[string]*userState, - seriesCache map[string]map[uint64]*memorySeries, - seriesChan <-chan *Series, - errChan chan error, - memoryChunks prometheus.Counter, -) { - var la []cortexpb.LabelAdapter - for s := range seriesChan { - state, ok := stateCache[s.UserId] - if !ok { - state = userStates.getOrCreate(s.UserId) - stateCache[s.UserId] = state - seriesCache[s.UserId] = make(map[uint64]*memorySeries) - } - - la = la[:0] - for _, l := range s.Labels { - la = append(la, cortexpb.LabelAdapter{ - Name: string(l.Name), - Value: string(l.Value), - }) - } - series, err := state.createSeriesWithFingerprint(model.Fingerprint(s.Fingerprint), la, nil, true) - if err != nil { - errChan <- err - return - } - - descs, err := fromWireChunks(s.Chunks) - if err != nil { - errChan <- err - return - } - - if err := series.setChunks(descs); err != nil { - errChan <- err - return - } - memoryChunks.Add(float64(len(descs))) - - seriesCache[s.UserId][s.Fingerprint] = series - seriesPool.Put(s) - } -} - -type samplesWithUserID struct { - samples []tsdb_record.RefSample - userID string -} - -func processWALWithRepair(startSegment int, userStates *userStates, params walRecoveryParameters) error { - logger := params.ingester.logger - - corruptErr := processWAL(startSegment, userStates, params) - if corruptErr == nil { - return nil - } - - params.ingester.metrics.walCorruptionsTotal.Inc() - level.Error(logger).Log("msg", "error in replaying from WAL", "err", corruptErr) - - // Attempt repair. - level.Info(logger).Log("msg", "attempting repair of the WAL") - w, err := wal.New(logger, nil, params.walDir, true) - if err != nil { - return err - } - - err = w.Repair(corruptErr) - if err != nil { - level.Error(logger).Log("msg", "error in repairing WAL", "err", err) - } - - return tsdb_errors.NewMulti(err, w.Close()).Err() -} - -// processWAL processes the records in the WAL concurrently. -func processWAL(startSegment int, userStates *userStates, params walRecoveryParameters) error { - - reader, closer, err := newWalReader(params.walDir, startSegment) - if err != nil { - return err - } - defer closer.Close() - - var ( - wg sync.WaitGroup - inputs = make([]chan *samplesWithUserID, params.numWorkers) - outputs = make([]chan *samplesWithUserID, params.numWorkers) - // errChan is to capture the errors from goroutine. - // The channel size is nWorkers to not block any worker if all of them error out. - errChan = make(chan error, params.numWorkers) - shards = make([]*samplesWithUserID, params.numWorkers) - ) - - wg.Add(params.numWorkers) - for i := 0; i < params.numWorkers; i++ { - outputs[i] = make(chan *samplesWithUserID, 300) - inputs[i] = make(chan *samplesWithUserID, 300) - shards[i] = &samplesWithUserID{} - - go func(input <-chan *samplesWithUserID, output chan<- *samplesWithUserID, - stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries) { - processWALSamples(userStates, stateCache, seriesCache, input, output, errChan, params.ingester.logger) - wg.Done() - }(inputs[i], outputs[i], params.stateCache[i], params.seriesCache[i]) - } - - var ( - capturedErr error - walRecord = &WALRecord{} - lp labelPairs - ) -Loop: - for reader.Next() { - select { - case capturedErr = <-errChan: - // Exit early on an error. - // Only acts upon the first error received. - break Loop - default: - } - - if err := decodeWALRecord(reader.Record(), walRecord); err != nil { - // We don't return here in order to close/drain all the channels and - // make sure all goroutines exit. - capturedErr = err - break Loop - } - - if len(walRecord.Series) > 0 { - userID := walRecord.UserID - - state := userStates.getOrCreate(userID) - - for _, s := range walRecord.Series { - fp := model.Fingerprint(s.Ref) - _, ok := state.fpToSeries.get(fp) - if ok { - continue - } - - lp = lp[:0] - for _, l := range s.Labels { - lp = append(lp, cortexpb.LabelAdapter(l)) - } - if _, err := state.createSeriesWithFingerprint(fp, lp, nil, true); err != nil { - // We don't return here in order to close/drain all the channels and - // make sure all goroutines exit. - capturedErr = err - break Loop - } - } - } - - // We split up the samples into chunks of 5000 samples or less. - // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise - // cause thousands of very large in flight buffers occupying large amounts - // of unused memory. - walRecordSamples := walRecord.Samples - for len(walRecordSamples) > 0 { - m := 5000 - userID := walRecord.UserID - if len(walRecordSamples) < m { - m = len(walRecordSamples) - } - - for i := 0; i < params.numWorkers; i++ { - if len(shards[i].samples) == 0 { - // It is possible that the previous iteration did not put - // anything in this shard. In that case no need to get a new buffer. - shards[i].userID = userID - continue - } - select { - case buf := <-outputs[i]: - buf.samples = buf.samples[:0] - buf.userID = userID - shards[i] = buf - default: - shards[i] = &samplesWithUserID{ - userID: userID, - } - } - } - - for _, sam := range walRecordSamples[:m] { - mod := uint64(sam.Ref) % uint64(params.numWorkers) - shards[mod].samples = append(shards[mod].samples, sam) - } - - for i := 0; i < params.numWorkers; i++ { - if len(shards[i].samples) > 0 { - inputs[i] <- shards[i] - } - } - - walRecordSamples = walRecordSamples[m:] - } - } - - for i := 0; i < params.numWorkers; i++ { - close(inputs[i]) - for range outputs[i] { - } - } - wg.Wait() - // If any worker errored out, some input channels might not be empty. - // Hence drain them. - for i := 0; i < params.numWorkers; i++ { - for range inputs[i] { - } - } - - if capturedErr != nil { - return capturedErr - } - select { - case capturedErr = <-errChan: - return capturedErr - default: - return reader.Err() - } -} - -func processWALSamples(userStates *userStates, stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries, - input <-chan *samplesWithUserID, output chan<- *samplesWithUserID, errChan chan error, logger log.Logger) { - defer close(output) - - sp := model.SamplePair{} - for samples := range input { - state, ok := stateCache[samples.userID] - if !ok { - state = userStates.getOrCreate(samples.userID) - stateCache[samples.userID] = state - seriesCache[samples.userID] = make(map[uint64]*memorySeries) - } - sc := seriesCache[samples.userID] - for i := range samples.samples { - series, ok := sc[uint64(samples.samples[i].Ref)] - if !ok { - series, ok = state.fpToSeries.get(model.Fingerprint(samples.samples[i].Ref)) - if !ok { - // This should ideally not happen. - // If the series was not created in recovering checkpoint or - // from the labels of any records previous to this, there - // is no way to get the labels for this fingerprint. - level.Warn(logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String()) - continue - } - } - sp.Timestamp = model.Time(samples.samples[i].T) - sp.Value = model.SampleValue(samples.samples[i].V) - // There can be many out of order samples because of checkpoint and WAL overlap. - // Checking this beforehand avoids the allocation of lots of error messages. - if sp.Timestamp.After(series.lastTime) { - if err := series.add(sp); err != nil { - errChan <- err - return - } - } - } - output <- samples - } -} - -// If startSegment is <0, it means all the segments. -func newWalReader(name string, startSegment int) (*wal.Reader, io.Closer, error) { - var ( - segmentReader io.ReadCloser - err error - ) - if startSegment < 0 { - segmentReader, err = wal.NewSegmentsReader(name) - if err != nil { - return nil, nil, err - } - } else { - first, last, err := wal.Segments(name) - if err != nil { - return nil, nil, err - } - if startSegment > last { - return nil, nil, errors.New("start segment is beyond the last WAL segment") - } - if first > startSegment { - startSegment = first - } - segmentReader, err = wal.NewSegmentsRangeReader(wal.SegmentRange{ - Dir: name, - First: startSegment, - Last: -1, // Till the end. - }) - if err != nil { - return nil, nil, err - } - } - return wal.NewReader(segmentReader), segmentReader, nil -} - -func decodeCheckpointRecord(rec []byte, m proto.Message) (_ proto.Message, err error) { - switch RecordType(rec[0]) { - case CheckpointRecord: - if err := proto.Unmarshal(rec[1:], m); err != nil { - return m, err - } - default: - // The legacy proto record will have it's first byte >7. - // Hence it does not match any of the existing record types. - err := proto.Unmarshal(rec, m) - if err != nil { - return m, err - } - } - - return m, err -} - -func encodeWithTypeHeader(m proto.Message, typ RecordType, b []byte) ([]byte, error) { - buf, err := proto.Marshal(m) - if err != nil { - return b, err - } - - b = append(b[:0], byte(typ)) - b = append(b, buf...) - return b, nil -} - -// WALRecord is a struct combining the series and samples record. -type WALRecord struct { - UserID string - Series []tsdb_record.RefSeries - Samples []tsdb_record.RefSample -} - -func (record *WALRecord) encodeSeries(b []byte) []byte { - buf := encoding.Encbuf{B: b} - buf.PutByte(byte(WALRecordSeries)) - buf.PutUvarintStr(record.UserID) - - var enc tsdb_record.Encoder - // The 'encoded' already has the type header and userID here, hence re-using - // the remaining part of the slice (i.e. encoded[len(encoded):])) to encode the series. - encoded := buf.Get() - encoded = append(encoded, enc.Series(record.Series, encoded[len(encoded):])...) - - return encoded -} - -func (record *WALRecord) encodeSamples(b []byte) []byte { - buf := encoding.Encbuf{B: b} - buf.PutByte(byte(WALRecordSamples)) - buf.PutUvarintStr(record.UserID) - - var enc tsdb_record.Encoder - // The 'encoded' already has the type header and userID here, hence re-using - // the remaining part of the slice (i.e. encoded[len(encoded):]))to encode the samples. - encoded := buf.Get() - encoded = append(encoded, enc.Samples(record.Samples, encoded[len(encoded):])...) - - return encoded -} - -func decodeWALRecord(b []byte, walRec *WALRecord) (err error) { - var ( - userID string - dec tsdb_record.Decoder - rseries []tsdb_record.RefSeries - rsamples []tsdb_record.RefSample - - decbuf = encoding.Decbuf{B: b} - t = RecordType(decbuf.Byte()) - ) - - walRec.Series = walRec.Series[:0] - walRec.Samples = walRec.Samples[:0] - switch t { - case WALRecordSamples: - userID = decbuf.UvarintStr() - rsamples, err = dec.Samples(decbuf.B, walRec.Samples) - case WALRecordSeries: - userID = decbuf.UvarintStr() - rseries, err = dec.Series(decbuf.B, walRec.Series) - default: - return errors.New("unknown record type") - } - - // We reach here only if its a record with type header. - if decbuf.Err() != nil { - return decbuf.Err() - } - - if err != nil { - return err - } - - walRec.UserID = userID - walRec.Samples = rsamples - walRec.Series = rseries - - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.pb.go deleted file mode 100644 index 65f042147..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.pb.go +++ /dev/null @@ -1,607 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: wal.proto - -package ingester - -import ( - fmt "fmt" - _ "github.com/cortexproject/cortex/pkg/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - client "github.com/cortexproject/cortex/pkg/ingester/client" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Series struct { - UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Fingerprint uint64 `protobuf:"varint,2,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"labels"` - Chunks []client.Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` -} - -func (m *Series) Reset() { *m = Series{} } -func (*Series) ProtoMessage() {} -func (*Series) Descriptor() ([]byte, []int) { - return fileDescriptor_ae6364fc8077884f, []int{0} -} -func (m *Series) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Series.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Series) XXX_Merge(src proto.Message) { - xxx_messageInfo_Series.Merge(m, src) -} -func (m *Series) XXX_Size() int { - return m.Size() -} -func (m *Series) XXX_DiscardUnknown() { - xxx_messageInfo_Series.DiscardUnknown(m) -} - -var xxx_messageInfo_Series proto.InternalMessageInfo - -func (m *Series) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *Series) GetFingerprint() uint64 { - if m != nil { - return m.Fingerprint - } - return 0 -} - -func (m *Series) GetChunks() []client.Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -func init() { - proto.RegisterType((*Series)(nil), "ingester.Series") -} - -func init() { proto.RegisterFile("wal.proto", fileDescriptor_ae6364fc8077884f) } - -var fileDescriptor_ae6364fc8077884f = []byte{ - // 323 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x4e, 0xc3, 0x30, - 0x18, 0x85, 0x6d, 0x5a, 0x05, 0xea, 0x8a, 0x25, 0x0c, 0x44, 0x1d, 0xfe, 0x46, 0x4c, 0x95, 0x10, - 0x89, 0x04, 0x13, 0x0b, 0x52, 0xc3, 0x84, 0xc4, 0x80, 0xc2, 0xc6, 0x82, 0x92, 0xd4, 0x4d, 0x4d, - 0x43, 0x1c, 0x39, 0x8e, 0x60, 0xe4, 0x08, 0x1c, 0x83, 0xa3, 0x74, 0xec, 0x58, 0x31, 0x54, 0xad, - 0xbb, 0x30, 0xf6, 0x08, 0x28, 0xae, 0x5b, 0x75, 0x84, 0xed, 0x7f, 0x2f, 0xef, 0xcb, 0xfb, 0x6d, - 0x93, 0xd6, 0x5b, 0x94, 0x79, 0x85, 0xe0, 0x92, 0xdb, 0x47, 0x2c, 0x4f, 0x69, 0x29, 0xa9, 0xe8, - 0x5c, 0xa4, 0x4c, 0x8e, 0xaa, 0xd8, 0x4b, 0xf8, 0xab, 0x9f, 0xf2, 0x94, 0xfb, 0x3a, 0x10, 0x57, - 0x43, 0xad, 0xb4, 0xd0, 0xd3, 0x06, 0xec, 0x5c, 0xef, 0xc5, 0x13, 0x2e, 0x24, 0x7d, 0x2f, 0x04, - 0x7f, 0xa1, 0x89, 0x34, 0xca, 0x2f, 0xc6, 0xe9, 0xf6, 0x43, 0x6c, 0x06, 0x83, 0x06, 0x7f, 0x41, - 0xb7, 0x7b, 0xf9, 0x49, 0xc6, 0x68, 0x2e, 0x77, 0x7a, 0xf3, 0x8f, 0xb3, 0x05, 0x26, 0xd6, 0x23, - 0x15, 0x8c, 0x96, 0xf6, 0x29, 0x39, 0xac, 0x4a, 0x2a, 0x9e, 0xd9, 0xc0, 0xc1, 0x2e, 0xee, 0xb5, - 0x42, 0xab, 0x96, 0x77, 0x03, 0xdb, 0x25, 0xed, 0x61, 0x8d, 0x89, 0x42, 0xb0, 0x5c, 0x3a, 0x07, - 0x2e, 0xee, 0x35, 0xc3, 0x7d, 0xcb, 0xce, 0x89, 0x95, 0x45, 0x31, 0xcd, 0x4a, 0xa7, 0xe1, 0x36, - 0x7a, 0xed, 0xcb, 0x13, 0x6f, 0xbb, 0xb1, 0x77, 0x5f, 0xfb, 0x0f, 0x11, 0x13, 0x41, 0x7f, 0x32, - 0xef, 0xa2, 0xef, 0x79, 0xf7, 0x5f, 0x27, 0xde, 0xf0, 0xfd, 0x41, 0x54, 0x48, 0x2a, 0x42, 0xd3, - 0x62, 0x9f, 0x13, 0x2b, 0x19, 0x55, 0xf9, 0xb8, 0x74, 0x9a, 0xba, 0xef, 0xd8, 0xf4, 0x79, 0xb7, - 0xb5, 0x1b, 0x34, 0xeb, 0xa6, 0xd0, 0x44, 0x82, 0x9b, 0xe9, 0x12, 0xd0, 0x6c, 0x09, 0x68, 0xbd, - 0x04, 0xfc, 0xa1, 0x00, 0x7f, 0x29, 0xc0, 0x13, 0x05, 0x78, 0xaa, 0x00, 0x2f, 0x14, 0xe0, 0x1f, - 0x05, 0x68, 0xad, 0x00, 0x7f, 0xae, 0x00, 0x4d, 0x57, 0x80, 0x66, 0x2b, 0x40, 0x4f, 0xbb, 0x07, - 0x8d, 0x2d, 0x7d, 0x53, 0x57, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x67, 0x44, 0x9d, 0xee, - 0x01, 0x00, 0x00, -} - -func (this *Series) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Series) - if !ok { - that2, ok := that.(Series) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.UserId != that1.UserId { - return false - } - if this.Fingerprint != that1.Fingerprint { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(&that1.Chunks[i]) { - return false - } - } - return true -} -func (this *Series) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&ingester.Series{") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - s = append(s, "Fingerprint: "+fmt.Sprintf("%#v", this.Fingerprint)+",\n") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Chunks != nil { - vs := make([]*client.Chunk, len(this.Chunks)) - for i := range vs { - vs[i] = &this.Chunks[i] - } - s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringWal(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Series) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Series) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Series) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintWal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintWal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Fingerprint != 0 { - i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) - i-- - dAtA[i] = 0x10 - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintWal(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintWal(dAtA []byte, offset int, v uint64) int { - offset -= sovWal(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Series) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovWal(uint64(l)) - } - if m.Fingerprint != 0 { - n += 1 + sovWal(uint64(m.Fingerprint)) - } - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovWal(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovWal(uint64(l)) - } - } - return n -} - -func sovWal(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozWal(x uint64) (n int) { - return sovWal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Series) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += fmt.Sprintf("%v", f) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&Series{`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Fingerprint:` + fmt.Sprintf("%v", this.Fingerprint) + `,`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func valueToStringWal(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Series) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Series: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Series: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWal - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Fingerprint", wireType) - } - m.Fingerprint = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Fingerprint |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, client.Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthWal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipWal(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthWal - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthWal - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipWal(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthWal - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthWal = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowWal = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.proto deleted file mode 100644 index 1cd86f13c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package ingester; - -option go_package = "ingester"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; -import "github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto"; - -message Series { - string user_id = 1; - uint64 fingerprint = 2; - repeated cortexpb.LabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"]; - repeated cortex.Chunk chunks = 4 [(gogoproto.nullable) = false]; -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/batch.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/batch.go deleted file mode 100644 index 051dd9bc9..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/batch.go +++ /dev/null @@ -1,133 +0,0 @@ -package batch - -import ( - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/encoding" - promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" -) - -// GenericChunk is a generic chunk used by the batch iterator, in order to make the batch -// iterator general purpose. -type GenericChunk struct { - MinTime int64 - MaxTime int64 - - iterator func(reuse encoding.Iterator) encoding.Iterator -} - -func NewGenericChunk(minTime, maxTime int64, iterator func(reuse encoding.Iterator) encoding.Iterator) GenericChunk { - return GenericChunk{ - MinTime: minTime, - MaxTime: maxTime, - iterator: iterator, - } -} - -func (c GenericChunk) Iterator(reuse encoding.Iterator) encoding.Iterator { - return c.iterator(reuse) -} - -// iterator iterates over batches. -type iterator interface { - // Seek to the batch at (or after) time t. - Seek(t int64, size int) bool - - // Next moves to the next batch. - Next(size int) bool - - // AtTime returns the start time of the next batch. Must only be called after - // Seek or Next have returned true. - AtTime() int64 - - // Batch returns the current batch. Must only be called after Seek or Next - // have returned true. - Batch() promchunk.Batch - - Err() error -} - -// NewChunkMergeIterator returns a chunkenc.Iterator that merges Cortex chunks together. -func NewChunkMergeIterator(chunks []chunk.Chunk, _, _ model.Time) chunkenc.Iterator { - converted := make([]GenericChunk, len(chunks)) - for i, c := range chunks { - converted[i] = NewGenericChunk(int64(c.From), int64(c.Through), c.Data.NewIterator) - } - - return NewGenericChunkMergeIterator(converted) -} - -// NewGenericChunkMergeIterator returns a chunkenc.Iterator that merges generic chunks together. -func NewGenericChunkMergeIterator(chunks []GenericChunk) chunkenc.Iterator { - iter := newMergeIterator(chunks) - return newIteratorAdapter(iter) -} - -// iteratorAdapter turns a batchIterator into a chunkenc.Iterator. -// It fetches ever increasing batchSizes (up to promchunk.BatchSize) on each -// call to Next; on calls to Seek, resets batch size to 1. -type iteratorAdapter struct { - batchSize int - curr promchunk.Batch - underlying iterator -} - -func newIteratorAdapter(underlying iterator) chunkenc.Iterator { - return &iteratorAdapter{ - batchSize: 1, - underlying: underlying, - } -} - -// Seek implements chunkenc.Iterator. -func (a *iteratorAdapter) Seek(t int64) bool { - - // Optimisation: fulfill the seek using current batch if possible. - if a.curr.Length > 0 && a.curr.Index < a.curr.Length { - if t <= a.curr.Timestamps[a.curr.Index] { - //In this case, the interface's requirement is met, so state of this - //iterator does not need any change. - return true - } else if t <= a.curr.Timestamps[a.curr.Length-1] { - //In this case, some timestamp between current sample and end of batch can fulfill - //the seek. Let's find it. - for a.curr.Index < a.curr.Length && t > a.curr.Timestamps[a.curr.Index] { - a.curr.Index++ - } - return true - } - } - - a.curr.Length = -1 - a.batchSize = 1 - if a.underlying.Seek(t, a.batchSize) { - a.curr = a.underlying.Batch() - return a.curr.Index < a.curr.Length - } - return false -} - -// Next implements chunkenc.Iterator. -func (a *iteratorAdapter) Next() bool { - a.curr.Index++ - for a.curr.Index >= a.curr.Length && a.underlying.Next(a.batchSize) { - a.curr = a.underlying.Batch() - a.batchSize = a.batchSize * 2 - if a.batchSize > promchunk.BatchSize { - a.batchSize = promchunk.BatchSize - } - } - return a.curr.Index < a.curr.Length -} - -// At implements chunkenc.Iterator. -func (a *iteratorAdapter) At() (int64, float64) { - return a.curr.Timestamps[a.curr.Index], a.curr.Values[a.curr.Index] -} - -// Err implements chunkenc.Iterator. -func (a *iteratorAdapter) Err() error { - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/chunk.go deleted file mode 100644 index 5f45e8e13..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/chunk.go +++ /dev/null @@ -1,70 +0,0 @@ -package batch - -import ( - "github.com/prometheus/common/model" - - promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" -) - -// chunkIterator implement batchIterator over a chunk. Its is designed to be -// reused by calling reset() with a fresh chunk. -type chunkIterator struct { - chunk GenericChunk - it promchunk.Iterator - batch promchunk.Batch -} - -func (i *chunkIterator) reset(chunk GenericChunk) { - i.chunk = chunk - i.it = chunk.Iterator(i.it) - i.batch.Length = 0 - i.batch.Index = 0 -} - -// Seek advances the iterator forward to the value at or after -// the given timestamp. -func (i *chunkIterator) Seek(t int64, size int) bool { - // We assume seeks only care about a specific window; if this chunk doesn't - // contain samples in that window, we can shortcut. - if i.chunk.MaxTime < t { - return false - } - - // If the seek is to the middle of the current batch, and size fits, we can - // shortcut. - if i.batch.Length > 0 && t >= i.batch.Timestamps[0] && t <= i.batch.Timestamps[i.batch.Length-1] { - i.batch.Index = 0 - for i.batch.Index < i.batch.Length && t > i.batch.Timestamps[i.batch.Index] { - i.batch.Index++ - } - if i.batch.Index+size < i.batch.Length { - return true - } - } - - if i.it.FindAtOrAfter(model.Time(t)) { - i.batch = i.it.Batch(size) - return i.batch.Length > 0 - } - return false -} - -func (i *chunkIterator) Next(size int) bool { - if i.it.Scan() { - i.batch = i.it.Batch(size) - return i.batch.Length > 0 - } - return false -} - -func (i *chunkIterator) AtTime() int64 { - return i.batch.Timestamps[0] -} - -func (i *chunkIterator) Batch() promchunk.Batch { - return i.batch -} - -func (i *chunkIterator) Err() error { - return i.it.Err() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/merge.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/merge.go deleted file mode 100644 index 7764b3746..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/merge.go +++ /dev/null @@ -1,187 +0,0 @@ -package batch - -import ( - "container/heap" - "sort" - - promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" -) - -type mergeIterator struct { - its []*nonOverlappingIterator - h iteratorHeap - - // Store the current sorted batchStream - batches batchStream - - // Buffers to merge in. - batchesBuf batchStream - nextBatchBuf [1]promchunk.Batch - - currErr error -} - -func newMergeIterator(cs []GenericChunk) *mergeIterator { - css := partitionChunks(cs) - its := make([]*nonOverlappingIterator, 0, len(css)) - for _, cs := range css { - its = append(its, newNonOverlappingIterator(cs)) - } - - c := &mergeIterator{ - its: its, - h: make(iteratorHeap, 0, len(its)), - batches: make(batchStream, 0, len(its)), - batchesBuf: make(batchStream, len(its)), - } - - for _, iter := range c.its { - if iter.Next(1) { - c.h = append(c.h, iter) - continue - } - - if err := iter.Err(); err != nil { - c.currErr = err - } - } - - heap.Init(&c.h) - return c -} - -func (c *mergeIterator) Seek(t int64, size int) bool { - - // Optimisation to see if the seek is within our current caches batches. -found: - for len(c.batches) > 0 { - batch := &c.batches[0] - if t >= batch.Timestamps[0] && t <= batch.Timestamps[batch.Length-1] { - batch.Index = 0 - for batch.Index < batch.Length && t > batch.Timestamps[batch.Index] { - batch.Index++ - } - break found - } - copy(c.batches, c.batches[1:]) - c.batches = c.batches[:len(c.batches)-1] - } - - // If we didn't find anything in the current set of batches, reset the heap - // and seek. - if len(c.batches) == 0 { - c.h = c.h[:0] - c.batches = c.batches[:0] - - for _, iter := range c.its { - if iter.Seek(t, size) { - c.h = append(c.h, iter) - continue - } - - if err := iter.Err(); err != nil { - c.currErr = err - return false - } - } - - heap.Init(&c.h) - } - - return c.buildNextBatch(size) -} - -func (c *mergeIterator) Next(size int) bool { - // Pop the last built batch in a way that doesn't extend the slice. - if len(c.batches) > 0 { - copy(c.batches, c.batches[1:]) - c.batches = c.batches[:len(c.batches)-1] - } - - return c.buildNextBatch(size) -} - -func (c *mergeIterator) nextBatchEndTime() int64 { - batch := &c.batches[0] - return batch.Timestamps[batch.Length-1] -} - -func (c *mergeIterator) buildNextBatch(size int) bool { - // All we need to do is get enough batches that our first batch's last entry - // is before all iterators next entry. - for len(c.h) > 0 && (len(c.batches) == 0 || c.nextBatchEndTime() >= c.h[0].AtTime()) { - c.nextBatchBuf[0] = c.h[0].Batch() - c.batchesBuf = mergeStreams(c.batches, c.nextBatchBuf[:], c.batchesBuf, size) - c.batches = append(c.batches[:0], c.batchesBuf...) - - if c.h[0].Next(size) { - heap.Fix(&c.h, 0) - } else { - heap.Pop(&c.h) - } - } - - return len(c.batches) > 0 -} - -func (c *mergeIterator) AtTime() int64 { - return c.batches[0].Timestamps[0] -} - -func (c *mergeIterator) Batch() promchunk.Batch { - return c.batches[0] -} - -func (c *mergeIterator) Err() error { - return c.currErr -} - -type iteratorHeap []iterator - -func (h *iteratorHeap) Len() int { return len(*h) } -func (h *iteratorHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } - -func (h *iteratorHeap) Less(i, j int) bool { - iT := (*h)[i].AtTime() - jT := (*h)[j].AtTime() - return iT < jT -} - -func (h *iteratorHeap) Push(x interface{}) { - *h = append(*h, x.(iterator)) -} - -func (h *iteratorHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -// Build a list of lists of non-overlapping chunks. -func partitionChunks(cs []GenericChunk) [][]GenericChunk { - sort.Sort(byMinTime(cs)) - - css := [][]GenericChunk{} -outer: - for _, c := range cs { - for i, cs := range css { - if cs[len(cs)-1].MaxTime < c.MinTime { - css[i] = append(css[i], c) - continue outer - } - } - cs := make([]GenericChunk, 0, len(cs)/(len(css)+1)) - cs = append(cs, c) - css = append(css, cs) - } - - return css -} - -type byMinTime []GenericChunk - -func (b byMinTime) Len() int { return len(b) } -func (b byMinTime) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byMinTime) Less(i, j int) bool { return b[i].MinTime < b[j].MinTime } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/non_overlapping.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/non_overlapping.go deleted file mode 100644 index a1c6bf010..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/non_overlapping.go +++ /dev/null @@ -1,68 +0,0 @@ -package batch - -import ( - promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" -) - -type nonOverlappingIterator struct { - curr int - chunks []GenericChunk - iter chunkIterator -} - -// newNonOverlappingIterator returns a single iterator over an slice of sorted, -// non-overlapping iterators. -func newNonOverlappingIterator(chunks []GenericChunk) *nonOverlappingIterator { - it := &nonOverlappingIterator{ - chunks: chunks, - } - it.iter.reset(it.chunks[0]) - return it -} - -func (it *nonOverlappingIterator) Seek(t int64, size int) bool { - for { - if it.iter.Seek(t, size) { - return true - } else if it.iter.Err() != nil { - return false - } else if !it.next() { - return false - } - } -} - -func (it *nonOverlappingIterator) Next(size int) bool { - for { - if it.iter.Next(size) { - return true - } else if it.iter.Err() != nil { - return false - } else if !it.next() { - return false - } - } -} - -func (it *nonOverlappingIterator) next() bool { - it.curr++ - if it.curr < len(it.chunks) { - it.iter.reset(it.chunks[it.curr]) - } - return it.curr < len(it.chunks) -} - -func (it *nonOverlappingIterator) AtTime() int64 { - return it.iter.AtTime() -} - -func (it *nonOverlappingIterator) Batch() promchunk.Batch { - return it.iter.Batch() -} - -func (it *nonOverlappingIterator) Err() error { - if it.curr < len(it.chunks) { - return it.iter.Err() - } - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/stream.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/stream.go deleted file mode 100644 index 66343b424..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/stream.go +++ /dev/null @@ -1,110 +0,0 @@ -package batch - -import ( - promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" -) - -// batchStream deals with iteratoring through multiple, non-overlapping batches, -// and building new slices of non-overlapping batches. Designed to be used -// without allocations. -type batchStream []promchunk.Batch - -// reset, hasNext, next, atTime etc are all inlined in go1.11. - -func (bs *batchStream) reset() { - for i := range *bs { - (*bs)[i].Index = 0 - } -} - -func (bs *batchStream) hasNext() bool { - return len(*bs) > 0 -} - -func (bs *batchStream) next() { - (*bs)[0].Index++ - if (*bs)[0].Index >= (*bs)[0].Length { - *bs = (*bs)[1:] - } -} - -func (bs *batchStream) atTime() int64 { - return (*bs)[0].Timestamps[(*bs)[0].Index] -} - -func (bs *batchStream) at() (int64, float64) { - b := &(*bs)[0] - return b.Timestamps[b.Index], b.Values[b.Index] -} - -func mergeStreams(left, right batchStream, result batchStream, size int) batchStream { - // Reset the Index and Length of existing batches. - for i := range result { - result[i].Index = 0 - result[i].Length = 0 - } - resultLen := 1 // Number of batches in the final result. - b := &result[0] - - // This function adds a new batch to the result - // if the current batch being appended is full. - checkForFullBatch := func() { - if b.Index == size { - // The batch reached it intended size. - // Add another batch the the result - // and use it for further appending. - - // The Index is the place at which new sample - // has to be appended, hence it tells the length. - b.Length = b.Index - resultLen++ - if resultLen > len(result) { - // It is possible that result can grow longer - // then the one provided. - result = append(result, promchunk.Batch{}) - } - b = &result[resultLen-1] - } - } - - for left.hasNext() && right.hasNext() { - checkForFullBatch() - t1, t2 := left.atTime(), right.atTime() - if t1 < t2 { - b.Timestamps[b.Index], b.Values[b.Index] = left.at() - left.next() - } else if t1 > t2 { - b.Timestamps[b.Index], b.Values[b.Index] = right.at() - right.next() - } else { - b.Timestamps[b.Index], b.Values[b.Index] = left.at() - left.next() - right.next() - } - b.Index++ - } - - // This function adds all the samples from the provided - // batchStream into the result in the same order. - addToResult := func(bs batchStream) { - for ; bs.hasNext(); bs.next() { - checkForFullBatch() - b.Timestamps[b.Index], b.Values[b.Index] = bs.at() - b.Index++ - b.Length++ - } - } - - addToResult(left) - addToResult(right) - - // The Index is the place at which new sample - // has to be appended, hence it tells the length. - b.Length = b.Index - - // The provided 'result' slice might be bigger - // than the actual result, hence return the subslice. - result = result[:resultLen] - result.reset() - return result -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go deleted file mode 100644 index ec7c4c141..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go +++ /dev/null @@ -1,213 +0,0 @@ -package querier - -import ( - "math" - "sort" - - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/thanos-io/thanos/pkg/store/labelpb" - "github.com/thanos-io/thanos/pkg/store/storepb" - - "github.com/cortexproject/cortex/pkg/querier/series" -) - -func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMatcher { - var converted []storepb.LabelMatcher - for _, m := range matchers { - var t storepb.LabelMatcher_Type - switch m.Type { - case labels.MatchEqual: - t = storepb.LabelMatcher_EQ - case labels.MatchNotEqual: - t = storepb.LabelMatcher_NEQ - case labels.MatchRegexp: - t = storepb.LabelMatcher_RE - case labels.MatchNotRegexp: - t = storepb.LabelMatcher_NRE - } - - converted = append(converted, storepb.LabelMatcher{ - Type: t, - Name: m.Name, - Value: m.Value, - }) - } - return converted -} - -// Implementation of storage.SeriesSet, based on individual responses from store client. -type blockQuerierSeriesSet struct { - series []*storepb.Series - warnings storage.Warnings - - // next response to process - next int - - currSeries storage.Series -} - -func (bqss *blockQuerierSeriesSet) Next() bool { - bqss.currSeries = nil - - if bqss.next >= len(bqss.series) { - return false - } - - currLabels := labelpb.ZLabelsToPromLabels(bqss.series[bqss.next].Labels) - currChunks := bqss.series[bqss.next].Chunks - - bqss.next++ - - // Merge chunks for current series. Chunks may come in multiple responses, but as soon - // as the response has chunks for a new series, we can stop searching. Series are sorted. - // See documentation for StoreClient.Series call for details. - for bqss.next < len(bqss.series) && labels.Compare(currLabels, labelpb.ZLabelsToPromLabels(bqss.series[bqss.next].Labels)) == 0 { - currChunks = append(currChunks, bqss.series[bqss.next].Chunks...) - bqss.next++ - } - - bqss.currSeries = newBlockQuerierSeries(currLabels, currChunks) - return true -} - -func (bqss *blockQuerierSeriesSet) At() storage.Series { - return bqss.currSeries -} - -func (bqss *blockQuerierSeriesSet) Err() error { - return nil -} - -func (bqss *blockQuerierSeriesSet) Warnings() storage.Warnings { - return bqss.warnings -} - -// newBlockQuerierSeries makes a new blockQuerierSeries. Input labels must be already sorted by name. -func newBlockQuerierSeries(lbls []labels.Label, chunks []storepb.AggrChunk) *blockQuerierSeries { - sort.Slice(chunks, func(i, j int) bool { - return chunks[i].MinTime < chunks[j].MinTime - }) - - return &blockQuerierSeries{labels: lbls, chunks: chunks} -} - -type blockQuerierSeries struct { - labels labels.Labels - chunks []storepb.AggrChunk -} - -func (bqs *blockQuerierSeries) Labels() labels.Labels { - return bqs.labels -} - -func (bqs *blockQuerierSeries) Iterator() chunkenc.Iterator { - if len(bqs.chunks) == 0 { - // should not happen in practice, but we have a unit test for it - return series.NewErrIterator(errors.New("no chunks")) - } - - its := make([]chunkenc.Iterator, 0, len(bqs.chunks)) - - for _, c := range bqs.chunks { - ch, err := chunkenc.FromData(chunkenc.EncXOR, c.Raw.Data) - if err != nil { - return series.NewErrIterator(errors.Wrapf(err, "failed to initialize chunk from XOR encoded raw data (series: %v min time: %d max time: %d)", bqs.Labels(), c.MinTime, c.MaxTime)) - } - - it := ch.Iterator(nil) - its = append(its, it) - } - - return newBlockQuerierSeriesIterator(bqs.Labels(), its) -} - -func newBlockQuerierSeriesIterator(labels labels.Labels, its []chunkenc.Iterator) *blockQuerierSeriesIterator { - return &blockQuerierSeriesIterator{labels: labels, iterators: its, lastT: math.MinInt64} -} - -// blockQuerierSeriesIterator implements a series iterator on top -// of a list of time-sorted, non-overlapping chunks. -type blockQuerierSeriesIterator struct { - // only used for error reporting - labels labels.Labels - - iterators []chunkenc.Iterator - i int - lastT int64 -} - -func (it *blockQuerierSeriesIterator) Seek(t int64) bool { - // We generally expect the chunks already to be cut down - // to the range we are interested in. There's not much to be gained from - // hopping across chunks so we just call next until we reach t. - for { - ct, _ := it.At() - if ct >= t { - return true - } - if !it.Next() { - return false - } - } -} - -func (it *blockQuerierSeriesIterator) At() (int64, float64) { - if it.i >= len(it.iterators) { - return 0, 0 - } - - t, v := it.iterators[it.i].At() - it.lastT = t - return t, v -} - -func (it *blockQuerierSeriesIterator) Next() bool { - if it.i >= len(it.iterators) { - return false - } - - if it.iterators[it.i].Next() { - return true - } - if it.iterators[it.i].Err() != nil { - return false - } - - for { - it.i++ - - if it.i >= len(it.iterators) { - return false - } - - // we must advance iterator first, to see if it has any samples. - // Seek will call At() as its first operation. - if !it.iterators[it.i].Next() { - if it.iterators[it.i].Err() != nil { - return false - } - - // Found empty iterator without error, skip it. - continue - } - - // Chunks are guaranteed to be ordered but not generally guaranteed to not overlap. - // We must ensure to skip any overlapping range between adjacent chunks. - return it.Seek(it.lastT + 1) - } -} - -func (it *blockQuerierSeriesIterator) Err() error { - if it.i >= len(it.iterators) { - return nil - } - - err := it.iterators[it.i].Err() - if err != nil { - return errors.Wrapf(err, "cannot iterate chunk for series: %v", it.labels) - } - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go deleted file mode 100644 index cbba178fc..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go +++ /dev/null @@ -1,86 +0,0 @@ -package querier - -import ( - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" -) - -type BlocksConsistencyChecker struct { - uploadGracePeriod time.Duration - deletionGracePeriod time.Duration - logger log.Logger - - checksTotal prometheus.Counter - checksFailed prometheus.Counter -} - -func NewBlocksConsistencyChecker(uploadGracePeriod, deletionGracePeriod time.Duration, logger log.Logger, reg prometheus.Registerer) *BlocksConsistencyChecker { - return &BlocksConsistencyChecker{ - uploadGracePeriod: uploadGracePeriod, - deletionGracePeriod: deletionGracePeriod, - logger: logger, - checksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_querier_blocks_consistency_checks_total", - Help: "Total number of consistency checks run on queried blocks.", - }), - checksFailed: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_querier_blocks_consistency_checks_failed_total", - Help: "Total number of consistency checks failed on queried blocks.", - }), - } -} - -func (c *BlocksConsistencyChecker) Check(knownBlocks bucketindex.Blocks, knownDeletionMarks map[ulid.ULID]*bucketindex.BlockDeletionMark, queriedBlocks []ulid.ULID) (missingBlocks []ulid.ULID) { - c.checksTotal.Inc() - - // Reverse the map of queried blocks, so that we can easily look for missing ones. - actualBlocks := map[ulid.ULID]struct{}{} - for _, blockID := range queriedBlocks { - actualBlocks[blockID] = struct{}{} - } - - // Look for any missing block. - for _, block := range knownBlocks { - // Some recently uploaded blocks, already discovered by the querier, may not have been discovered - // and loaded by the store-gateway yet. In order to avoid false positives, we grant some time - // to the store-gateway to discover them. It's safe to exclude recently uploaded blocks because: - // - Blocks uploaded by ingesters: we will continue querying them from ingesters for a while (depends - // on the configured retention period). - // - Blocks uploaded by compactor: the source blocks are marked for deletion but will continue to be - // queried by queriers for a while (depends on the configured deletion marks delay). - if c.uploadGracePeriod > 0 && time.Since(block.GetUploadedAt()) < c.uploadGracePeriod { - level.Debug(c.logger).Log("msg", "block skipped from consistency check because it was uploaded recently", "block", block.ID.String(), "uploadedAt", block.GetUploadedAt().String()) - continue - } - - // The store-gateway may offload blocks before the querier. If that happens, the querier will run a consistency check - // on blocks that can't be queried because they were offloaded. For this reason, we don't run the consistency check on any block - // which has been marked for deletion more then "grace period" time ago. Basically, the grace period is the time - // we still expect a block marked for deletion to be still queried. - if mark := knownDeletionMarks[block.ID]; mark != nil { - deletionTime := time.Unix(mark.DeletionTime, 0) - - if c.deletionGracePeriod > 0 && time.Since(deletionTime) > c.deletionGracePeriod { - level.Debug(c.logger).Log("msg", "block skipped from consistency check because it is marked for deletion", "block", block.ID.String(), "deletionTime", deletionTime.String()) - continue - } - } - - if _, ok := actualBlocks[block.ID]; !ok { - missingBlocks = append(missingBlocks, block.ID) - } - } - - if len(missingBlocks) > 0 { - c.checksFailed.Inc() - } - - return missingBlocks -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go deleted file mode 100644 index ab760eabb..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go +++ /dev/null @@ -1,109 +0,0 @@ -package querier - -import ( - "context" - "time" - - "github.com/go-kit/log" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/util/services" -) - -var ( - errBucketIndexBlocksFinderNotRunning = errors.New("bucket index blocks finder is not running") - errBucketIndexTooOld = errors.New("bucket index is too old and the last time it was updated exceeds the allowed max staleness") -) - -type BucketIndexBlocksFinderConfig struct { - IndexLoader bucketindex.LoaderConfig - MaxStalePeriod time.Duration - IgnoreDeletionMarksDelay time.Duration -} - -// BucketIndexBlocksFinder implements BlocksFinder interface and find blocks in the bucket -// looking up the bucket index. -type BucketIndexBlocksFinder struct { - services.Service - - cfg BucketIndexBlocksFinderConfig - loader *bucketindex.Loader -} - -func NewBucketIndexBlocksFinder(cfg BucketIndexBlocksFinderConfig, bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) *BucketIndexBlocksFinder { - loader := bucketindex.NewLoader(cfg.IndexLoader, bkt, cfgProvider, logger, reg) - - return &BucketIndexBlocksFinder{ - cfg: cfg, - loader: loader, - Service: loader, - } -} - -// GetBlocks implements BlocksFinder. -func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { - if f.State() != services.Running { - return nil, nil, errBucketIndexBlocksFinderNotRunning - } - if maxT < minT { - return nil, nil, errInvalidBlocksRange - } - - // Get the bucket index for this user. - idx, err := f.loader.GetIndex(ctx, userID) - if errors.Is(err, bucketindex.ErrIndexNotFound) { - // This is a legit edge case, happening when a new tenant has not shipped blocks to the storage yet - // so the bucket index hasn't been created yet. - return nil, nil, nil - } - if err != nil { - return nil, nil, err - } - - // Ensure the bucket index is not too old. - if time.Since(idx.GetUpdatedAt()) > f.cfg.MaxStalePeriod { - return nil, nil, errBucketIndexTooOld - } - - var ( - matchingBlocks = map[ulid.ULID]*bucketindex.Block{} - matchingDeletionMarks = map[ulid.ULID]*bucketindex.BlockDeletionMark{} - ) - - // Filter blocks containing samples within the range. - for _, block := range idx.Blocks { - if !block.Within(minT, maxT) { - continue - } - - matchingBlocks[block.ID] = block - } - - for _, mark := range idx.BlockDeletionMarks { - // Filter deletion marks by matching blocks only. - if _, ok := matchingBlocks[mark.ID]; !ok { - continue - } - - // Exclude blocks marked for deletion. This is the same logic as Thanos IgnoreDeletionMarkFilter. - if time.Since(time.Unix(mark.DeletionTime, 0)).Seconds() > f.cfg.IgnoreDeletionMarksDelay.Seconds() { - delete(matchingBlocks, mark.ID) - continue - } - - matchingDeletionMarks[mark.ID] = mark - } - - // Convert matching blocks into a list. - blocks := make(bucketindex.Blocks, 0, len(matchingBlocks)) - for _, b := range matchingBlocks { - blocks = append(blocks, b) - } - - return blocks, matchingDeletionMarks, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_scan.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_scan.go deleted file mode 100644 index 259d9b16b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_scan.go +++ /dev/null @@ -1,433 +0,0 @@ -package querier - -import ( - "context" - "path" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/backoff" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -var ( - errBucketScanBlocksFinderNotRunning = errors.New("bucket scan blocks finder is not running") - errInvalidBlocksRange = errors.New("invalid blocks time range") -) - -type BucketScanBlocksFinderConfig struct { - ScanInterval time.Duration - TenantsConcurrency int - MetasConcurrency int - CacheDir string - ConsistencyDelay time.Duration - IgnoreDeletionMarksDelay time.Duration -} - -// BucketScanBlocksFinder is a BlocksFinder implementation periodically scanning the bucket to discover blocks. -type BucketScanBlocksFinder struct { - services.Service - - cfg BucketScanBlocksFinderConfig - cfgProvider bucket.TenantConfigProvider - logger log.Logger - bucketClient objstore.Bucket - fetchersMetrics *storegateway.MetadataFetcherMetrics - usersScanner *cortex_tsdb.UsersScanner - - // We reuse the metadata fetcher instance for a given tenant both because of performance - // reasons (the fetcher keeps a in-memory cache) and being able to collect and group metrics. - fetchersMx sync.Mutex - fetchers map[string]userFetcher - - // Keep the per-tenant/user metas found during the last run. - userMx sync.RWMutex - userMetas map[string]bucketindex.Blocks - userMetasLookup map[string]map[ulid.ULID]*bucketindex.Block - userDeletionMarks map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark - - scanDuration prometheus.Histogram - scanLastSuccess prometheus.Gauge -} - -func NewBucketScanBlocksFinder(cfg BucketScanBlocksFinderConfig, bucketClient objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) *BucketScanBlocksFinder { - d := &BucketScanBlocksFinder{ - cfg: cfg, - cfgProvider: cfgProvider, - logger: logger, - bucketClient: bucketClient, - fetchers: make(map[string]userFetcher), - usersScanner: cortex_tsdb.NewUsersScanner(bucketClient, cortex_tsdb.AllUsers, logger), - userMetas: make(map[string]bucketindex.Blocks), - userMetasLookup: make(map[string]map[ulid.ULID]*bucketindex.Block), - userDeletionMarks: map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark{}, - fetchersMetrics: storegateway.NewMetadataFetcherMetrics(), - scanDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_querier_blocks_scan_duration_seconds", - Help: "The total time it takes to run a full blocks scan across the storage.", - Buckets: []float64{1, 10, 20, 30, 60, 120, 180, 240, 300, 600}, - }), - scanLastSuccess: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_querier_blocks_last_successful_scan_timestamp_seconds", - Help: "Unix timestamp of the last successful blocks scan.", - }), - } - - if reg != nil { - prometheus.WrapRegistererWith(prometheus.Labels{"component": "querier"}, reg).MustRegister(d.fetchersMetrics) - } - - // Apply a jitter to the sync frequency in order to increase the probability - // of hitting the shared cache (if any). - scanInterval := util.DurationWithJitter(cfg.ScanInterval, 0.2) - d.Service = services.NewTimerService(scanInterval, d.starting, d.scan, nil) - - return d -} - -// GetBlocks returns known blocks for userID containing samples within the range minT -// and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. -func (d *BucketScanBlocksFinder) GetBlocks(_ context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { - // We need to ensure the initial full bucket scan succeeded. - if d.State() != services.Running { - return nil, nil, errBucketScanBlocksFinderNotRunning - } - if maxT < minT { - return nil, nil, errInvalidBlocksRange - } - - d.userMx.RLock() - defer d.userMx.RUnlock() - - userMetas, ok := d.userMetas[userID] - if !ok { - return nil, nil, nil - } - - // Given we do expect the large majority of queries to have a time range close - // to "now", we're going to find matching blocks iterating the list in reverse order. - var matchingMetas bucketindex.Blocks - for i := len(userMetas) - 1; i >= 0; i-- { - if userMetas[i].Within(minT, maxT) { - matchingMetas = append(matchingMetas, userMetas[i]) - } - - // We can safely break the loop because metas are sorted by MaxTime. - if userMetas[i].MaxTime <= minT { - break - } - } - - // Filter deletion marks by matching blocks only. - matchingDeletionMarks := map[ulid.ULID]*bucketindex.BlockDeletionMark{} - if userDeletionMarks, ok := d.userDeletionMarks[userID]; ok { - for _, m := range matchingMetas { - if d := userDeletionMarks[m.ID]; d != nil { - matchingDeletionMarks[m.ID] = d - } - } - } - - return matchingMetas, matchingDeletionMarks, nil -} - -func (d *BucketScanBlocksFinder) starting(ctx context.Context) error { - // Before the service is in the running state it must have successfully - // complete the initial scan. - if err := d.scanBucket(ctx); err != nil { - level.Error(d.logger).Log("msg", "unable to run the initial blocks scan", "err", err) - return err - } - - return nil -} - -func (d *BucketScanBlocksFinder) scan(ctx context.Context) error { - if err := d.scanBucket(ctx); err != nil { - level.Error(d.logger).Log("msg", "failed to scan bucket storage to find blocks", "err", err) - } - - // Never return error, otherwise the service terminates. - return nil -} - -func (d *BucketScanBlocksFinder) scanBucket(ctx context.Context) (returnErr error) { - defer func(start time.Time) { - d.scanDuration.Observe(time.Since(start).Seconds()) - if returnErr == nil { - d.scanLastSuccess.SetToCurrentTime() - } - }(time.Now()) - - // Discover all users first. This helps cacheability of the object store call. - userIDs, _, err := d.usersScanner.ScanUsers(ctx) - if err != nil { - return err - } - - jobsChan := make(chan string) - resMx := sync.Mutex{} - resMetas := map[string]bucketindex.Blocks{} - resMetasLookup := map[string]map[ulid.ULID]*bucketindex.Block{} - resDeletionMarks := map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark{} - resErrs := tsdb_errors.NewMulti() - - // Create a pool of workers which will synchronize metas. The pool size - // is limited in order to avoid to concurrently sync a lot of tenants in - // a large cluster. - wg := &sync.WaitGroup{} - wg.Add(d.cfg.TenantsConcurrency) - - for i := 0; i < d.cfg.TenantsConcurrency; i++ { - go func() { - defer wg.Done() - - for userID := range jobsChan { - metas, deletionMarks, err := d.scanUserBlocksWithRetries(ctx, userID) - - // Build the lookup map. - lookup := map[ulid.ULID]*bucketindex.Block{} - for _, m := range metas { - lookup[m.ID] = m - } - - resMx.Lock() - if err != nil { - resErrs.Add(err) - } else { - resMetas[userID] = metas - resMetasLookup[userID] = lookup - resDeletionMarks[userID] = deletionMarks - } - resMx.Unlock() - } - }() - } - - // Push a job for each user whose blocks need to be discovered. -pushJobsLoop: - for _, userID := range userIDs { - select { - case jobsChan <- userID: - // Nothing to do. - case <-ctx.Done(): - resMx.Lock() - resErrs.Add(ctx.Err()) - resMx.Unlock() - break pushJobsLoop - } - } - - // Wait until all workers completed. - close(jobsChan) - wg.Wait() - - d.userMx.Lock() - if len(resErrs) == 0 { - // Replace the map, so that we discard tenants fully deleted from storage. - d.userMetas = resMetas - d.userMetasLookup = resMetasLookup - d.userDeletionMarks = resDeletionMarks - } else { - // If an error occurred, we prefer to partially update the metas map instead of - // not updating it at all. At least we'll update blocks for the successful tenants. - for userID, metas := range resMetas { - d.userMetas[userID] = metas - } - - for userID, metas := range resMetasLookup { - d.userMetasLookup[userID] = metas - } - - for userID, deletionMarks := range resDeletionMarks { - d.userDeletionMarks[userID] = deletionMarks - } - } - d.userMx.Unlock() - - return resErrs.Err() -} - -// scanUserBlocksWithRetries runs scanUserBlocks() retrying multiple times -// in case of error. -func (d *BucketScanBlocksFinder) scanUserBlocksWithRetries(ctx context.Context, userID string) (metas bucketindex.Blocks, deletionMarks map[ulid.ULID]*bucketindex.BlockDeletionMark, err error) { - retries := backoff.New(ctx, backoff.Config{ - MinBackoff: time.Second, - MaxBackoff: 30 * time.Second, - MaxRetries: 3, - }) - - for retries.Ongoing() { - metas, deletionMarks, err = d.scanUserBlocks(ctx, userID) - if err == nil { - return - } - - retries.Wait() - } - - return -} - -func (d *BucketScanBlocksFinder) scanUserBlocks(ctx context.Context, userID string) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { - fetcher, userBucket, deletionMarkFilter, err := d.getOrCreateMetaFetcher(userID) - if err != nil { - return nil, nil, errors.Wrapf(err, "create meta fetcher for user %s", userID) - } - - metas, partials, err := fetcher.Fetch(ctx) - if err != nil { - return nil, nil, errors.Wrapf(err, "scan blocks for user %s", userID) - } - - // In case we've found any partial block we log about it but continue cause we don't want - // to break the scanner just because there's a spurious block. - if len(partials) > 0 { - logPartialBlocks(userID, partials, d.logger) - } - - res := make(bucketindex.Blocks, 0, len(metas)) - for _, m := range metas { - blockMeta := bucketindex.BlockFromThanosMeta(*m) - - // If the block is already known, we can get the remaining attributes from there - // because a block is immutable. - prevMeta := d.getBlockMeta(userID, m.ULID) - if prevMeta != nil { - blockMeta.UploadedAt = prevMeta.UploadedAt - } else { - attrs, err := userBucket.Attributes(ctx, path.Join(m.ULID.String(), metadata.MetaFilename)) - if err != nil { - return nil, nil, errors.Wrapf(err, "read %s attributes of block %s for user %s", metadata.MetaFilename, m.ULID.String(), userID) - } - - // Since the meta.json file is the last file of a block being uploaded and it's immutable - // we can safely assume that the last modified timestamp of the meta.json is the time when - // the block has completed to be uploaded. - blockMeta.UploadedAt = attrs.LastModified.Unix() - } - - res = append(res, blockMeta) - } - - // The blocks scanner expects all blocks to be sorted by max time. - sortBlocksByMaxTime(res) - - // Convert deletion marks to our own data type. - marks := map[ulid.ULID]*bucketindex.BlockDeletionMark{} - for id, m := range deletionMarkFilter.DeletionMarkBlocks() { - marks[id] = bucketindex.BlockDeletionMarkFromThanosMarker(m) - } - - return res, marks, nil -} - -func (d *BucketScanBlocksFinder) getOrCreateMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { - d.fetchersMx.Lock() - defer d.fetchersMx.Unlock() - - if f, ok := d.fetchers[userID]; ok { - return f.metadataFetcher, f.userBucket, f.deletionMarkFilter, nil - } - - fetcher, userBucket, deletionMarkFilter, err := d.createMetaFetcher(userID) - if err != nil { - return nil, nil, nil, err - } - - d.fetchers[userID] = userFetcher{ - metadataFetcher: fetcher, - deletionMarkFilter: deletionMarkFilter, - userBucket: userBucket, - } - - return fetcher, userBucket, deletionMarkFilter, nil -} - -func (d *BucketScanBlocksFinder) createMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { - userLogger := util_log.WithUserID(userID, d.logger) - userBucket := bucket.NewUserBucketClient(userID, d.bucketClient, d.cfgProvider) - userReg := prometheus.NewRegistry() - - // The following filters have been intentionally omitted: - // - Consistency delay filter: omitted because we should discover all uploaded blocks. - // The consistency delay is taken in account when running the consistency check at query time. - // - Deduplicate filter: omitted because it could cause troubles with the consistency check if - // we "hide" source blocks because recently compacted by the compactor before the store-gateway instances - // discover and load the compacted ones. - deletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, d.cfg.IgnoreDeletionMarksDelay, d.cfg.MetasConcurrency) - filters := []block.MetadataFilter{deletionMarkFilter} - - f, err := block.NewMetaFetcher( - userLogger, - d.cfg.MetasConcurrency, - userBucket, - // The fetcher stores cached metas in the "meta-syncer/" sub directory. - filepath.Join(d.cfg.CacheDir, userID), - userReg, - filters, - nil, - ) - if err != nil { - return nil, nil, nil, err - } - - d.fetchersMetrics.AddUserRegistry(userID, userReg) - return f, userBucket, deletionMarkFilter, nil -} - -func (d *BucketScanBlocksFinder) getBlockMeta(userID string, blockID ulid.ULID) *bucketindex.Block { - d.userMx.RLock() - defer d.userMx.RUnlock() - - metas, ok := d.userMetasLookup[userID] - if !ok { - return nil - } - - return metas[blockID] -} - -func sortBlocksByMaxTime(blocks bucketindex.Blocks) { - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].MaxTime < blocks[j].MaxTime - }) -} - -func logPartialBlocks(userID string, partials map[ulid.ULID]error, logger log.Logger) { - ids := make([]string, 0, len(partials)) - errs := make([]string, 0, len(partials)) - - for id, err := range partials { - ids = append(ids, id.String()) - errs = append(errs, err.Error()) - } - - level.Warn(logger).Log("msg", "found partial blocks", "user", userID, "blocks", strings.Join(ids, ","), "err", strings.Join(errs, ",")) -} - -type userFetcher struct { - metadataFetcher block.MetadataFetcher - deletionMarkFilter *block.IgnoreDeletionMarkFilter - userBucket objstore.Bucket -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go deleted file mode 100644 index 73278b061..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go +++ /dev/null @@ -1,103 +0,0 @@ -package querier - -import ( - "context" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/discovery/dns" - "github.com/thanos-io/thanos/pkg/extprom" - - "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// BlocksStoreSet implementation used when the blocks are not sharded in the store-gateway -// and so requests are balanced across the set of store-gateway instances. -type blocksStoreBalancedSet struct { - services.Service - - serviceAddresses []string - clientsPool *client.Pool - dnsProvider *dns.Provider - - logger log.Logger -} - -func newBlocksStoreBalancedSet(serviceAddresses []string, clientConfig ClientConfig, logger log.Logger, reg prometheus.Registerer) *blocksStoreBalancedSet { - const dnsResolveInterval = 10 * time.Second - - dnsProviderReg := extprom.WrapRegistererWithPrefix("cortex_storegateway_client_", reg) - - s := &blocksStoreBalancedSet{ - serviceAddresses: serviceAddresses, - dnsProvider: dns.NewProvider(logger, dnsProviderReg, dns.GolangResolverType), - clientsPool: newStoreGatewayClientPool(nil, clientConfig, logger, reg), - logger: logger, - } - - s.Service = services.NewTimerService(dnsResolveInterval, s.starting, s.resolve, nil) - return s -} - -func (s *blocksStoreBalancedSet) starting(ctx context.Context) error { - // Initial DNS resolution. - return s.resolve(ctx) -} - -func (s *blocksStoreBalancedSet) resolve(ctx context.Context) error { - if err := s.dnsProvider.Resolve(ctx, s.serviceAddresses); err != nil { - level.Error(s.logger).Log("msg", "failed to resolve store-gateway addresses", "err", err, "addresses", s.serviceAddresses) - } - return nil -} - -func (s *blocksStoreBalancedSet) GetClientsFor(_ string, blockIDs []ulid.ULID, exclude map[ulid.ULID][]string) (map[BlocksStoreClient][]ulid.ULID, error) { - addresses := s.dnsProvider.Addresses() - if len(addresses) == 0 { - return nil, fmt.Errorf("no address resolved for the store-gateway service addresses %s", strings.Join(s.serviceAddresses, ",")) - } - - // Randomize the list of addresses to not always query the same address. - rand.Shuffle(len(addresses), func(i, j int) { - addresses[i], addresses[j] = addresses[j], addresses[i] - }) - - // Pick a non excluded client for each block. - clients := map[BlocksStoreClient][]ulid.ULID{} - - for _, blockID := range blockIDs { - // Pick the first non excluded store-gateway instance. - addr := getFirstNonExcludedAddr(addresses, exclude[blockID]) - if addr == "" { - return nil, fmt.Errorf("no store-gateway instance left after filtering out excluded instances for block %s", blockID.String()) - } - - c, err := s.clientsPool.GetClientFor(addr) - if err != nil { - return nil, errors.Wrapf(err, "failed to get store-gateway client for %s", addr) - } - - clients[c.(BlocksStoreClient)] = append(clients[c.(BlocksStoreClient)], blockID) - } - - return clients, nil -} - -func getFirstNonExcludedAddr(addresses, exclude []string) string { - for _, addr := range addresses { - if !util.StringsContain(exclude, addr) { - return addr - } - } - - return "" -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go deleted file mode 100644 index 7526a4f4b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go +++ /dev/null @@ -1,969 +0,0 @@ -package querier - -import ( - "context" - "fmt" - "io" - "sort" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/types" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/store/hintspb" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/strutil" - "go.uber.org/atomic" - "golang.org/x/sync/errgroup" - grpc_metadata "google.golang.org/grpc/metadata" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/limiter" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -const ( - // The maximum number of times we attempt fetching missing blocks from different - // store-gateways. If no more store-gateways are left (ie. due to lower replication - // factor) than we'll end the retries earlier. - maxFetchSeriesAttempts = 3 -) - -var ( - errNoStoreGatewayAddress = errors.New("no store-gateway address configured") - errMaxChunksPerQueryLimit = "the query hit the max number of chunks limit while fetching chunks from store-gateways for %s (limit: %d)" -) - -// BlocksStoreSet is the interface used to get the clients to query series on a set of blocks. -type BlocksStoreSet interface { - services.Service - - // GetClientsFor returns the store gateway clients that should be used to - // query the set of blocks in input. The exclude parameter is the map of - // blocks -> store-gateway addresses that should be excluded. - GetClientsFor(userID string, blockIDs []ulid.ULID, exclude map[ulid.ULID][]string) (map[BlocksStoreClient][]ulid.ULID, error) -} - -// BlocksFinder is the interface used to find blocks for a given user and time range. -type BlocksFinder interface { - services.Service - - // GetBlocks returns known blocks for userID containing samples within the range minT - // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. - GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) -} - -// BlocksStoreClient is the interface that should be implemented by any client used -// to query a backend store-gateway. -type BlocksStoreClient interface { - storegatewaypb.StoreGatewayClient - - // RemoteAddress returns the address of the remote store-gateway and is used to uniquely - // identify a store-gateway backend instance. - RemoteAddress() string -} - -// BlocksStoreLimits is the interface that should be implemented by the limits provider. -type BlocksStoreLimits interface { - bucket.TenantConfigProvider - - MaxChunksPerQueryFromStore(userID string) int - StoreGatewayTenantShardSize(userID string) int -} - -type blocksStoreQueryableMetrics struct { - storesHit prometheus.Histogram - refetches prometheus.Histogram -} - -func newBlocksStoreQueryableMetrics(reg prometheus.Registerer) *blocksStoreQueryableMetrics { - return &blocksStoreQueryableMetrics{ - storesHit: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_storegateway_instances_hit_per_query", - Help: "Number of store-gateway instances hit for a single query.", - Buckets: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - }), - refetches: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_storegateway_refetches_per_query", - Help: "Number of re-fetches attempted while querying store-gateway instances due to missing blocks.", - Buckets: []float64{0, 1, 2}, - }), - } -} - -// BlocksStoreQueryable is a queryable which queries blocks storage via -// the store-gateway. -type BlocksStoreQueryable struct { - services.Service - - stores BlocksStoreSet - finder BlocksFinder - consistency *BlocksConsistencyChecker - logger log.Logger - queryStoreAfter time.Duration - metrics *blocksStoreQueryableMetrics - limits BlocksStoreLimits - - // Subservices manager. - subservices *services.Manager - subservicesWatcher *services.FailureWatcher -} - -func NewBlocksStoreQueryable( - stores BlocksStoreSet, - finder BlocksFinder, - consistency *BlocksConsistencyChecker, - limits BlocksStoreLimits, - queryStoreAfter time.Duration, - logger log.Logger, - reg prometheus.Registerer, -) (*BlocksStoreQueryable, error) { - manager, err := services.NewManager(stores, finder) - if err != nil { - return nil, errors.Wrap(err, "register blocks storage queryable subservices") - } - - q := &BlocksStoreQueryable{ - stores: stores, - finder: finder, - consistency: consistency, - queryStoreAfter: queryStoreAfter, - logger: logger, - subservices: manager, - subservicesWatcher: services.NewFailureWatcher(), - metrics: newBlocksStoreQueryableMetrics(reg), - limits: limits, - } - - q.Service = services.NewBasicService(q.starting, q.running, q.stopping) - - return q, nil -} - -func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits BlocksStoreLimits, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { - var stores BlocksStoreSet - - bucketClient, err := bucket.NewClient(context.Background(), storageCfg.Bucket, "querier", logger, reg) - if err != nil { - return nil, errors.Wrap(err, "failed to create bucket client") - } - - // Blocks finder doesn't use chunks, but we pass config for consistency. - cachingBucket, err := cortex_tsdb.CreateCachingBucket(storageCfg.BucketStore.ChunksCache, storageCfg.BucketStore.MetadataCache, bucketClient, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "querier"}, reg)) - if err != nil { - return nil, errors.Wrap(err, "create caching bucket") - } - bucketClient = cachingBucket - - // Create the blocks finder. - var finder BlocksFinder - if storageCfg.BucketStore.BucketIndex.Enabled { - finder = NewBucketIndexBlocksFinder(BucketIndexBlocksFinderConfig{ - IndexLoader: bucketindex.LoaderConfig{ - CheckInterval: time.Minute, - UpdateOnStaleInterval: storageCfg.BucketStore.SyncInterval, - UpdateOnErrorInterval: storageCfg.BucketStore.BucketIndex.UpdateOnErrorInterval, - IdleTimeout: storageCfg.BucketStore.BucketIndex.IdleTimeout, - }, - MaxStalePeriod: storageCfg.BucketStore.BucketIndex.MaxStalePeriod, - IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, - }, bucketClient, limits, logger, reg) - } else { - finder = NewBucketScanBlocksFinder(BucketScanBlocksFinderConfig{ - ScanInterval: storageCfg.BucketStore.SyncInterval, - TenantsConcurrency: storageCfg.BucketStore.TenantSyncConcurrency, - MetasConcurrency: storageCfg.BucketStore.MetaSyncConcurrency, - CacheDir: storageCfg.BucketStore.SyncDir, - IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, - }, bucketClient, limits, logger, reg) - } - - if gatewayCfg.ShardingEnabled { - storesRingCfg := gatewayCfg.ShardingRing.ToRingConfig() - storesRingBackend, err := kv.NewClient( - storesRingCfg.KVStore, - ring.GetCodec(), - kv.RegistererWithKVName(prometheus.WrapRegistererWithPrefix("cortex_", reg), "querier-store-gateway"), - logger, - ) - if err != nil { - return nil, errors.Wrap(err, "failed to create store-gateway ring backend") - } - - storesRing, err := ring.NewWithStoreClientAndStrategy(storesRingCfg, storegateway.RingNameForClient, storegateway.RingKey, storesRingBackend, ring.NewIgnoreUnhealthyInstancesReplicationStrategy(), prometheus.WrapRegistererWithPrefix("cortex_", reg), logger) - if err != nil { - return nil, errors.Wrap(err, "failed to create store-gateway ring client") - } - - stores, err = newBlocksStoreReplicationSet(storesRing, gatewayCfg.ShardingStrategy, randomLoadBalancing, limits, querierCfg.StoreGatewayClient, logger, reg) - if err != nil { - return nil, errors.Wrap(err, "failed to create store set") - } - } else { - if len(querierCfg.GetStoreGatewayAddresses()) == 0 { - return nil, errNoStoreGatewayAddress - } - - stores = newBlocksStoreBalancedSet(querierCfg.GetStoreGatewayAddresses(), querierCfg.StoreGatewayClient, logger, reg) - } - - consistency := NewBlocksConsistencyChecker( - // Exclude blocks which have been recently uploaded, in order to give enough time to store-gateways - // to discover and load them (3 times the sync interval). - storageCfg.BucketStore.ConsistencyDelay+(3*storageCfg.BucketStore.SyncInterval), - // To avoid any false positive in the consistency check, we do exclude blocks which have been - // recently marked for deletion, until the "ignore delay / 2". This means the consistency checker - // exclude such blocks about 50% of the time before querier and store-gateway stops querying them. - storageCfg.BucketStore.IgnoreDeletionMarksDelay/2, - logger, - reg, - ) - - return NewBlocksStoreQueryable(stores, finder, consistency, limits, querierCfg.QueryStoreAfter, logger, reg) -} - -func (q *BlocksStoreQueryable) starting(ctx context.Context) error { - q.subservicesWatcher.WatchManager(q.subservices) - - if err := services.StartManagerAndAwaitHealthy(ctx, q.subservices); err != nil { - return errors.Wrap(err, "unable to start blocks storage queryable subservices") - } - - return nil -} - -func (q *BlocksStoreQueryable) running(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return nil - case err := <-q.subservicesWatcher.Chan(): - return errors.Wrap(err, "block storage queryable subservice failed") - } - } -} - -func (q *BlocksStoreQueryable) stopping(_ error) error { - return services.StopManagerAndAwaitStopped(context.Background(), q.subservices) -} - -// Querier returns a new Querier on the storage. -func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - if s := q.State(); s != services.Running { - return nil, errors.Errorf("BlocksStoreQueryable is not running: %v", s) - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - return &blocksStoreQuerier{ - ctx: ctx, - minT: mint, - maxT: maxt, - userID: userID, - finder: q.finder, - stores: q.stores, - metrics: q.metrics, - limits: q.limits, - consistency: q.consistency, - logger: q.logger, - queryStoreAfter: q.queryStoreAfter, - }, nil -} - -type blocksStoreQuerier struct { - ctx context.Context - minT, maxT int64 - userID string - finder BlocksFinder - stores BlocksStoreSet - metrics *blocksStoreQueryableMetrics - consistency *BlocksConsistencyChecker - limits BlocksStoreLimits - logger log.Logger - - // If set, the querier manipulates the max time to not be greater than - // "now - queryStoreAfter" so that most recent blocks are not queried. - queryStoreAfter time.Duration -} - -// Select implements storage.Querier interface. -// The bool passed is ignored because the series is always sorted. -func (q *blocksStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - return q.selectSorted(sp, matchers...) -} - -func (q *blocksStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.LabelNames") - defer spanLog.Span.Finish() - - minT, maxT := q.minT, q.maxT - - var ( - resMtx sync.Mutex - resNameSets = [][]string{} - resWarnings = storage.Warnings(nil) - convertedMatchers = convertMatchersToLabelMatcher(matchers) - ) - - queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - nameSets, warnings, queriedBlocks, err := q.fetchLabelNamesFromStore(spanCtx, clients, minT, maxT, convertedMatchers) - if err != nil { - return nil, err - } - - resMtx.Lock() - resNameSets = append(resNameSets, nameSets...) - resWarnings = append(resWarnings, warnings...) - resMtx.Unlock() - - return queriedBlocks, nil - } - - err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) - if err != nil { - return nil, nil, err - } - - return strutil.MergeSlices(resNameSets...), resWarnings, nil -} - -func (q *blocksStoreQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.LabelValues") - defer spanLog.Span.Finish() - - minT, maxT := q.minT, q.maxT - - var ( - resValueSets = [][]string{} - resWarnings = storage.Warnings(nil) - - resultMtx sync.Mutex - ) - - queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - valueSets, warnings, queriedBlocks, err := q.fetchLabelValuesFromStore(spanCtx, name, clients, minT, maxT, matchers...) - if err != nil { - return nil, err - } - - resultMtx.Lock() - resValueSets = append(resValueSets, valueSets...) - resWarnings = append(resWarnings, warnings...) - resultMtx.Unlock() - - return queriedBlocks, nil - } - - err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) - if err != nil { - return nil, nil, err - } - - return strutil.MergeSlices(resValueSets...), resWarnings, nil -} - -func (q *blocksStoreQuerier) Close() error { - return nil -} - -func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.selectSorted") - defer spanLog.Span.Finish() - - minT, maxT := q.minT, q.maxT - if sp != nil { - minT, maxT = sp.Start, sp.End - } - - var ( - convertedMatchers = convertMatchersToLabelMatcher(matchers) - resSeriesSets = []storage.SeriesSet(nil) - resWarnings = storage.Warnings(nil) - - maxChunksLimit = q.limits.MaxChunksPerQueryFromStore(q.userID) - leftChunksLimit = maxChunksLimit - - resultMtx sync.Mutex - ) - - queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - seriesSets, queriedBlocks, warnings, numChunks, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, matchers, convertedMatchers, maxChunksLimit, leftChunksLimit) - if err != nil { - return nil, err - } - - resultMtx.Lock() - - resSeriesSets = append(resSeriesSets, seriesSets...) - resWarnings = append(resWarnings, warnings...) - - // Given a single block is guaranteed to not be queried twice, we can safely decrease the number of - // chunks we can still read before hitting the limit (max == 0 means disabled). - if maxChunksLimit > 0 { - leftChunksLimit -= numChunks - } - resultMtx.Unlock() - - return queriedBlocks, nil - } - - err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) - if err != nil { - return storage.ErrSeriesSet(err) - } - - if len(resSeriesSets) == 0 { - storage.EmptySeriesSet() - } - - return series.NewSeriesSetWithWarnings( - storage.NewMergeSeriesSet(resSeriesSets, storage.ChainedSeriesMerge), - resWarnings) -} - -func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, - queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error)) error { - // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until - // now - queryStoreAfter, because the most recent time range is covered by ingesters. This - // optimization is particularly important for the blocks storage because can be used to skip - // querying most recent not-compacted-yet blocks from the storage. - if q.queryStoreAfter > 0 { - now := time.Now() - origMaxT := maxT - maxT = math.Min64(maxT, util.TimeToMillis(now.Add(-q.queryStoreAfter))) - - if origMaxT != maxT { - level.Debug(logger).Log("msg", "the max time of the query to blocks storage has been manipulated", "original", origMaxT, "updated", maxT) - } - - if maxT < minT { - q.metrics.storesHit.Observe(0) - level.Debug(logger).Log("msg", "empty query time range after max time manipulation") - return nil - } - } - - // Find the list of blocks we need to query given the time range. - knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, q.userID, minT, maxT) - if err != nil { - return err - } - - if len(knownBlocks) == 0 { - q.metrics.storesHit.Observe(0) - level.Debug(logger).Log("msg", "no blocks found") - return nil - } - - level.Debug(logger).Log("msg", "found blocks to query", "expected", knownBlocks.String()) - - var ( - // At the beginning the list of blocks to query are all known blocks. - remainingBlocks = knownBlocks.GetULIDs() - attemptedBlocks = map[ulid.ULID][]string{} - touchedStores = map[string]struct{}{} - - resQueriedBlocks = []ulid.ULID(nil) - ) - - for attempt := 1; attempt <= maxFetchSeriesAttempts; attempt++ { - // Find the set of store-gateway instances having the blocks. The exclude parameter is the - // map of blocks queried so far, with the list of store-gateway addresses for each block. - clients, err := q.stores.GetClientsFor(q.userID, remainingBlocks, attemptedBlocks) - if err != nil { - // If it's a retry and we get an error, it means there are no more store-gateways left - // from which running another attempt, so we're just stopping retrying. - if attempt > 1 { - level.Warn(logger).Log("msg", "unable to get store-gateway clients while retrying to fetch missing blocks", "err", err) - break - } - - return err - } - level.Debug(logger).Log("msg", "found store-gateway instances to query", "num instances", len(clients), "attempt", attempt) - - // Fetch series from stores. If an error occur we do not retry because retries - // are only meant to cover missing blocks. - queriedBlocks, err := queryFunc(clients, minT, maxT) - if err != nil { - return err - } - level.Debug(logger).Log("msg", "received series from all store-gateways", "queried blocks", strings.Join(convertULIDsToString(queriedBlocks), " ")) - - resQueriedBlocks = append(resQueriedBlocks, queriedBlocks...) - - // Update the map of blocks we attempted to query. - for client, blockIDs := range clients { - touchedStores[client.RemoteAddress()] = struct{}{} - - for _, blockID := range blockIDs { - attemptedBlocks[blockID] = append(attemptedBlocks[blockID], client.RemoteAddress()) - } - } - - // Ensure all expected blocks have been queried (during all tries done so far). - missingBlocks := q.consistency.Check(knownBlocks, knownDeletionMarks, resQueriedBlocks) - if len(missingBlocks) == 0 { - q.metrics.storesHit.Observe(float64(len(touchedStores))) - q.metrics.refetches.Observe(float64(attempt - 1)) - - return nil - } - - level.Debug(logger).Log("msg", "consistency check failed", "attempt", attempt, "missing blocks", strings.Join(convertULIDsToString(missingBlocks), " ")) - - // The next attempt should just query the missing blocks. - remainingBlocks = missingBlocks - } - - // We've not been able to query all expected blocks after all retries. - level.Warn(util_log.WithContext(ctx, logger)).Log("msg", "failed consistency check", "err", err) - return fmt.Errorf("consistency check failed because some blocks were not queried: %s", strings.Join(convertULIDsToString(remainingBlocks), " ")) -} - -func (q *blocksStoreQuerier) fetchSeriesFromStores( - ctx context.Context, - sp *storage.SelectHints, - clients map[BlocksStoreClient][]ulid.ULID, - minT int64, - maxT int64, - matchers []*labels.Matcher, - convertedMatchers []storepb.LabelMatcher, - maxChunksLimit int, - leftChunksLimit int, -) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, int, error) { - var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) - g, gCtx = errgroup.WithContext(reqCtx) - mtx = sync.Mutex{} - seriesSets = []storage.SeriesSet(nil) - warnings = storage.Warnings(nil) - queriedBlocks = []ulid.ULID(nil) - numChunks = atomic.NewInt32(0) - spanLog = spanlogger.FromContext(ctx) - queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) - reqStats = stats.FromContext(ctx) - ) - - // Concurrently fetch series from all clients. - for c, blockIDs := range clients { - // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs - - g.Go(func() error { - // See: https://github.com/prometheus/prometheus/pull/8050 - // TODO(goutham): we should ideally be passing the hints down to the storage layer - // and let the TSDB return us data with no chunks as in prometheus#8050. - // But this is an acceptable workaround for now. - skipChunks := sp != nil && sp.Func == "series" - - req, err := createSeriesRequest(minT, maxT, convertedMatchers, skipChunks, blockIDs) - if err != nil { - return errors.Wrapf(err, "failed to create series request") - } - - stream, err := c.Series(gCtx, req) - if err != nil { - return errors.Wrapf(err, "failed to fetch series from %s", c.RemoteAddress()) - } - - mySeries := []*storepb.Series(nil) - myWarnings := storage.Warnings(nil) - myQueriedBlocks := []ulid.ULID(nil) - - for { - // Ensure the context hasn't been canceled in the meanwhile (eg. an error occurred - // in another goroutine). - if gCtx.Err() != nil { - return gCtx.Err() - } - - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return errors.Wrapf(err, "failed to receive series from %s", c.RemoteAddress()) - } - - // Response may either contain series, warning or hints. - if s := resp.GetSeries(); s != nil { - mySeries = append(mySeries, s) - - // Add series fingerprint to query limiter; will return error if we are over the limit - limitErr := queryLimiter.AddSeries(cortexpb.FromLabelsToLabelAdapters(s.PromLabels())) - if limitErr != nil { - return validation.LimitError(limitErr.Error()) - } - - // Ensure the max number of chunks limit hasn't been reached (max == 0 means disabled). - if maxChunksLimit > 0 { - actual := numChunks.Add(int32(len(s.Chunks))) - if actual > int32(leftChunksLimit) { - return validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, util.LabelMatchersToString(matchers), maxChunksLimit)) - } - } - chunksSize := countChunkBytes(s) - if chunkBytesLimitErr := queryLimiter.AddChunkBytes(chunksSize); chunkBytesLimitErr != nil { - return validation.LimitError(chunkBytesLimitErr.Error()) - } - if chunkLimitErr := queryLimiter.AddChunks(len(s.Chunks)); chunkLimitErr != nil { - return validation.LimitError(chunkLimitErr.Error()) - } - } - - if w := resp.GetWarning(); w != "" { - myWarnings = append(myWarnings, errors.New(w)) - } - - if h := resp.GetHints(); h != nil { - hints := hintspb.SeriesResponseHints{} - if err := types.UnmarshalAny(h, &hints); err != nil { - return errors.Wrapf(err, "failed to unmarshal series hints from %s", c.RemoteAddress()) - } - - ids, err := convertBlockHintsToULIDs(hints.QueriedBlocks) - if err != nil { - return errors.Wrapf(err, "failed to parse queried block IDs from received hints") - } - - myQueriedBlocks = append(myQueriedBlocks, ids...) - } - } - - numSeries := len(mySeries) - chunkBytes := countChunkBytes(mySeries...) - - reqStats.AddFetchedSeries(uint64(numSeries)) - reqStats.AddFetchedChunkBytes(uint64(chunkBytes)) - - level.Debug(spanLog).Log("msg", "received series from store-gateway", - "instance", c.RemoteAddress(), - "fetched series", numSeries, - "fetched chunk bytes", chunkBytes, - "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), - "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) - - // Store the result. - mtx.Lock() - seriesSets = append(seriesSets, &blockQuerierSeriesSet{series: mySeries}) - warnings = append(warnings, myWarnings...) - queriedBlocks = append(queriedBlocks, myQueriedBlocks...) - mtx.Unlock() - - return nil - }) - } - - // Wait until all client requests complete. - if err := g.Wait(); err != nil { - return nil, nil, nil, 0, err - } - - return seriesSets, queriedBlocks, warnings, int(numChunks.Load()), nil -} - -func (q *blocksStoreQuerier) fetchLabelNamesFromStore( - ctx context.Context, - clients map[BlocksStoreClient][]ulid.ULID, - minT int64, - maxT int64, - matchers []storepb.LabelMatcher, -) ([][]string, storage.Warnings, []ulid.ULID, error) { - var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) - g, gCtx = errgroup.WithContext(reqCtx) - mtx = sync.Mutex{} - nameSets = [][]string{} - warnings = storage.Warnings(nil) - queriedBlocks = []ulid.ULID(nil) - spanLog = spanlogger.FromContext(ctx) - ) - - // Concurrently fetch series from all clients. - for c, blockIDs := range clients { - // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs - - g.Go(func() error { - req, err := createLabelNamesRequest(minT, maxT, blockIDs, matchers) - if err != nil { - return errors.Wrapf(err, "failed to create label names request") - } - - namesResp, err := c.LabelNames(gCtx, req) - if err != nil { - return errors.Wrapf(err, "failed to fetch series from %s", c.RemoteAddress()) - } - - myQueriedBlocks := []ulid.ULID(nil) - if namesResp.Hints != nil { - hints := hintspb.LabelNamesResponseHints{} - if err := types.UnmarshalAny(namesResp.Hints, &hints); err != nil { - return errors.Wrapf(err, "failed to unmarshal label names hints from %s", c.RemoteAddress()) - } - - ids, err := convertBlockHintsToULIDs(hints.QueriedBlocks) - if err != nil { - return errors.Wrapf(err, "failed to parse queried block IDs from received hints") - } - - myQueriedBlocks = ids - } - - level.Debug(spanLog).Log("msg", "received label names from store-gateway", - "instance", c, - "num labels", len(namesResp.Names), - "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), - "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) - - // Store the result. - mtx.Lock() - nameSets = append(nameSets, namesResp.Names) - for _, w := range namesResp.Warnings { - warnings = append(warnings, errors.New(w)) - } - queriedBlocks = append(queriedBlocks, myQueriedBlocks...) - mtx.Unlock() - - return nil - }) - } - - // Wait until all client requests complete. - if err := g.Wait(); err != nil { - return nil, nil, nil, err - } - - return nameSets, warnings, queriedBlocks, nil -} - -func (q *blocksStoreQuerier) fetchLabelValuesFromStore( - ctx context.Context, - name string, - clients map[BlocksStoreClient][]ulid.ULID, - minT int64, - maxT int64, - matchers ...*labels.Matcher, -) ([][]string, storage.Warnings, []ulid.ULID, error) { - var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) - g, gCtx = errgroup.WithContext(reqCtx) - mtx = sync.Mutex{} - valueSets = [][]string{} - warnings = storage.Warnings(nil) - queriedBlocks = []ulid.ULID(nil) - spanLog = spanlogger.FromContext(ctx) - ) - - // Concurrently fetch series from all clients. - for c, blockIDs := range clients { - // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs - - g.Go(func() error { - req, err := createLabelValuesRequest(minT, maxT, name, blockIDs, matchers...) - if err != nil { - return errors.Wrapf(err, "failed to create label values request") - } - - valuesResp, err := c.LabelValues(gCtx, req) - if err != nil { - return errors.Wrapf(err, "failed to fetch series from %s", c.RemoteAddress()) - } - - myQueriedBlocks := []ulid.ULID(nil) - if valuesResp.Hints != nil { - hints := hintspb.LabelValuesResponseHints{} - if err := types.UnmarshalAny(valuesResp.Hints, &hints); err != nil { - return errors.Wrapf(err, "failed to unmarshal label values hints from %s", c.RemoteAddress()) - } - - ids, err := convertBlockHintsToULIDs(hints.QueriedBlocks) - if err != nil { - return errors.Wrapf(err, "failed to parse queried block IDs from received hints") - } - - myQueriedBlocks = ids - } - - level.Debug(spanLog).Log("msg", "received label values from store-gateway", - "instance", c.RemoteAddress(), - "num values", len(valuesResp.Values), - "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), - "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) - - // Values returned need not be sorted, but we need them to be sorted so we can merge. - sort.Strings(valuesResp.Values) - - // Store the result. - mtx.Lock() - valueSets = append(valueSets, valuesResp.Values) - for _, w := range valuesResp.Warnings { - warnings = append(warnings, errors.New(w)) - } - queriedBlocks = append(queriedBlocks, myQueriedBlocks...) - mtx.Unlock() - - return nil - }) - } - - // Wait until all client requests complete. - if err := g.Wait(); err != nil { - return nil, nil, nil, err - } - - return valueSets, warnings, queriedBlocks, nil -} - -func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, skipChunks bool, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { - // Selectively query only specific blocks. - hints := &hintspb.SeriesRequestHints{ - BlockMatchers: []storepb.LabelMatcher{ - { - Type: storepb.LabelMatcher_RE, - Name: block.BlockIDLabel, - Value: strings.Join(convertULIDsToString(blockIDs), "|"), - }, - }, - } - - anyHints, err := types.MarshalAny(hints) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal series request hints") - } - - return &storepb.SeriesRequest{ - MinTime: minT, - MaxTime: maxT, - Matchers: matchers, - PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT, - Hints: anyHints, - SkipChunks: skipChunks, - }, nil -} - -func createLabelNamesRequest(minT, maxT int64, blockIDs []ulid.ULID, matchers []storepb.LabelMatcher) (*storepb.LabelNamesRequest, error) { - req := &storepb.LabelNamesRequest{ - Start: minT, - End: maxT, - Matchers: matchers, - } - - // Selectively query only specific blocks. - hints := &hintspb.LabelNamesRequestHints{ - BlockMatchers: []storepb.LabelMatcher{ - { - Type: storepb.LabelMatcher_RE, - Name: block.BlockIDLabel, - Value: strings.Join(convertULIDsToString(blockIDs), "|"), - }, - }, - } - - anyHints, err := types.MarshalAny(hints) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal label names request hints") - } - - req.Hints = anyHints - - return req, nil -} - -func createLabelValuesRequest(minT, maxT int64, label string, blockIDs []ulid.ULID, matchers ...*labels.Matcher) (*storepb.LabelValuesRequest, error) { - req := &storepb.LabelValuesRequest{ - Start: minT, - End: maxT, - Label: label, - Matchers: convertMatchersToLabelMatcher(matchers), - } - - // Selectively query only specific blocks. - hints := &hintspb.LabelValuesRequestHints{ - BlockMatchers: []storepb.LabelMatcher{ - { - Type: storepb.LabelMatcher_RE, - Name: block.BlockIDLabel, - Value: strings.Join(convertULIDsToString(blockIDs), "|"), - }, - }, - } - - anyHints, err := types.MarshalAny(hints) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal label values request hints") - } - - req.Hints = anyHints - - return req, nil -} - -func convertULIDsToString(ids []ulid.ULID) []string { - res := make([]string, len(ids)) - for idx, id := range ids { - res[idx] = id.String() - } - return res -} - -func convertBlockHintsToULIDs(hints []hintspb.Block) ([]ulid.ULID, error) { - res := make([]ulid.ULID, len(hints)) - - for idx, hint := range hints { - blockID, err := ulid.Parse(hint.Id) - if err != nil { - return nil, err - } - - res[idx] = blockID - } - - return res, nil -} - -// countChunkBytes returns the size of the chunks making up the provided series in bytes -func countChunkBytes(series ...*storepb.Series) (count int) { - for _, s := range series { - for _, c := range s.Chunks { - count += c.Size() - } - } - - return count -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go deleted file mode 100644 index bd63fddf0..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go +++ /dev/null @@ -1,159 +0,0 @@ -package querier - -import ( - "context" - "fmt" - "math/rand" - - "github.com/go-kit/log" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/client" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/services" -) - -type loadBalancingStrategy int - -const ( - noLoadBalancing = loadBalancingStrategy(iota) - randomLoadBalancing -) - -// BlocksStoreSet implementation used when the blocks are sharded and replicated across -// a set of store-gateway instances. -type blocksStoreReplicationSet struct { - services.Service - - storesRing *ring.Ring - clientsPool *client.Pool - shardingStrategy string - balancingStrategy loadBalancingStrategy - limits BlocksStoreLimits - - // Subservices manager. - subservices *services.Manager - subservicesWatcher *services.FailureWatcher -} - -func newBlocksStoreReplicationSet( - storesRing *ring.Ring, - shardingStrategy string, - balancingStrategy loadBalancingStrategy, - limits BlocksStoreLimits, - clientConfig ClientConfig, - logger log.Logger, - reg prometheus.Registerer, -) (*blocksStoreReplicationSet, error) { - s := &blocksStoreReplicationSet{ - storesRing: storesRing, - clientsPool: newStoreGatewayClientPool(client.NewRingServiceDiscovery(storesRing), clientConfig, logger, reg), - shardingStrategy: shardingStrategy, - balancingStrategy: balancingStrategy, - limits: limits, - } - - var err error - s.subservices, err = services.NewManager(s.storesRing, s.clientsPool) - if err != nil { - return nil, err - } - - s.Service = services.NewBasicService(s.starting, s.running, s.stopping) - - return s, nil -} - -func (s *blocksStoreReplicationSet) starting(ctx context.Context) error { - s.subservicesWatcher.WatchManager(s.subservices) - - if err := services.StartManagerAndAwaitHealthy(ctx, s.subservices); err != nil { - return errors.Wrap(err, "unable to start blocks store set subservices") - } - - return nil -} - -func (s *blocksStoreReplicationSet) running(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return nil - case err := <-s.subservicesWatcher.Chan(): - return errors.Wrap(err, "blocks store set subservice failed") - } - } -} - -func (s *blocksStoreReplicationSet) stopping(_ error) error { - return services.StopManagerAndAwaitStopped(context.Background(), s.subservices) -} - -func (s *blocksStoreReplicationSet) GetClientsFor(userID string, blockIDs []ulid.ULID, exclude map[ulid.ULID][]string) (map[BlocksStoreClient][]ulid.ULID, error) { - shards := map[string][]ulid.ULID{} - - // If shuffle sharding is enabled, we should build a subring for the user, - // otherwise we just use the full ring. - var userRing ring.ReadRing - if s.shardingStrategy == util.ShardingStrategyShuffle { - userRing = storegateway.GetShuffleShardingSubring(s.storesRing, userID, s.limits) - } else { - userRing = s.storesRing - } - - // Find the replication set of each block we need to query. - for _, blockID := range blockIDs { - // Do not reuse the same buffer across multiple Get() calls because we do retain the - // returned replication set. - bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet() - - set, err := userRing.Get(cortex_tsdb.HashBlockID(blockID), storegateway.BlocksRead, bufDescs, bufHosts, bufZones) - if err != nil { - return nil, errors.Wrapf(err, "failed to get store-gateway replication set owning the block %s", blockID.String()) - } - - // Pick a non excluded store-gateway instance. - addr := getNonExcludedInstanceAddr(set, exclude[blockID], s.balancingStrategy) - if addr == "" { - return nil, fmt.Errorf("no store-gateway instance left after checking exclude for block %s", blockID.String()) - } - - shards[addr] = append(shards[addr], blockID) - } - - clients := map[BlocksStoreClient][]ulid.ULID{} - - // Get the client for each store-gateway. - for addr, blockIDs := range shards { - c, err := s.clientsPool.GetClientFor(addr) - if err != nil { - return nil, errors.Wrapf(err, "failed to get store-gateway client for %s", addr) - } - - clients[c.(BlocksStoreClient)] = blockIDs - } - - return clients, nil -} - -func getNonExcludedInstanceAddr(set ring.ReplicationSet, exclude []string, balancingStrategy loadBalancingStrategy) string { - if balancingStrategy == randomLoadBalancing { - // Randomize the list of instances to not always query the same one. - rand.Shuffle(len(set.Instances), func(i, j int) { - set.Instances[i], set.Instances[j] = set.Instances[j], set.Instances[i] - }) - } - - for _, instance := range set.Instances { - if !util.StringsContain(exclude, instance.Addr) { - return instance.Addr - } - } - - return "" -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go deleted file mode 100644 index 8c97f9eee..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go +++ /dev/null @@ -1,121 +0,0 @@ -package querier - -import ( - "context" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/querier/chunkstore" - seriesset "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/cortexproject/cortex/pkg/tenant" -) - -type chunkIteratorFunc func(chunks []chunk.Chunk, from, through model.Time) chunkenc.Iterator - -func newChunkStoreQueryable(store chunkstore.ChunkStore, chunkIteratorFunc chunkIteratorFunc) storage.Queryable { - return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return &chunkStoreQuerier{ - store: store, - chunkIteratorFunc: chunkIteratorFunc, - ctx: ctx, - mint: mint, - maxt: maxt, - }, nil - }) -} - -type chunkStoreQuerier struct { - store chunkstore.ChunkStore - chunkIteratorFunc chunkIteratorFunc - ctx context.Context - mint, maxt int64 -} - -// Select implements storage.Querier interface. -// The bool passed is ignored because the series is always sorted. -func (q *chunkStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - userID, err := tenant.TenantID(q.ctx) - if err != nil { - return storage.ErrSeriesSet(err) - } - - minT, maxT := q.mint, q.maxt - if sp != nil { - minT, maxT = sp.Start, sp.End - } - - // We will hit this for /series lookup when -querier.query-store-for-labels-enabled is set. - // If we don't skip here, it'll make /series lookups extremely slow as all the chunks will be loaded. - // That flag is only to be set with blocks storage engine, and this is a protective measure. - if sp != nil && sp.Func == "series" { - return storage.EmptySeriesSet() - } - - chunks, err := q.store.Get(q.ctx, userID, model.Time(minT), model.Time(maxT), matchers...) - if err != nil { - return storage.ErrSeriesSet(err) - } - - return partitionChunks(chunks, q.mint, q.maxt, q.chunkIteratorFunc) -} - -// Series in the returned set are sorted alphabetically by labels. -func partitionChunks(chunks []chunk.Chunk, mint, maxt int64, iteratorFunc chunkIteratorFunc) storage.SeriesSet { - chunksBySeries := map[string][]chunk.Chunk{} - for _, c := range chunks { - key := client.LabelsToKeyString(c.Metric) - chunksBySeries[key] = append(chunksBySeries[key], c) - } - - series := make([]storage.Series, 0, len(chunksBySeries)) - for i := range chunksBySeries { - series = append(series, &chunkSeries{ - labels: chunksBySeries[i][0].Metric, - chunks: chunksBySeries[i], - chunkIteratorFunc: iteratorFunc, - mint: mint, - maxt: maxt, - }) - } - - return seriesset.NewConcreteSeriesSet(series) -} - -func (q *chunkStoreQuerier) LabelValues(name string, labels ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, nil -} - -func (q *chunkStoreQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, nil -} - -func (q *chunkStoreQuerier) Close() error { - return nil -} - -// Implements SeriesWithChunks -type chunkSeries struct { - labels labels.Labels - chunks []chunk.Chunk - chunkIteratorFunc chunkIteratorFunc - mint, maxt int64 -} - -func (s *chunkSeries) Labels() labels.Labels { - return s.labels -} - -// Iterator returns a new iterator of the data of the series. -func (s *chunkSeries) Iterator() chunkenc.Iterator { - return s.chunkIteratorFunc(s.chunks, model.Time(s.mint), model.Time(s.maxt)) -} - -// Chunks implements SeriesWithChunks interface. -func (s *chunkSeries) Chunks() []chunk.Chunk { - return s.chunks -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go deleted file mode 100644 index d85987630..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go +++ /dev/null @@ -1,93 +0,0 @@ -package querier - -import ( - "archive/tar" - "compress/gzip" - "net/http" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/querier/chunkstore" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" -) - -// ChunksHandler allows you to fetch a compressed tar of all the chunks for a -// given time range and set of matchers. -// Only works with the new unified chunk querier, which is enabled when you turn -// on ingester chunk query streaming. -func ChunksHandler(queryable storage.Queryable) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userID, err := tenant.TenantID(r.Context()) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - mint, err := util.ParseTime(r.FormValue("start")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - maxt, err := util.ParseTime(r.FormValue("end")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - matchers, err := parser.ParseMetricSelector(r.FormValue("matcher")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - querier, err := queryable.Querier(r.Context(), mint, maxt) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - store, ok := querier.(chunkstore.ChunkStore) - if !ok { - http.Error(w, "not supported", http.StatusServiceUnavailable) - return - } - - chunks, err := store.Get(r.Context(), userID, model.Time(mint), model.Time(maxt), matchers...) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Add("Content-Type", "application/tar+gzip") - gw := gzip.NewWriter(w) - defer gw.Close() - - writer := tar.NewWriter(gw) - defer writer.Close() - - for _, chunk := range chunks { - buf, err := chunk.Encoded() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if err := writer.WriteHeader(&tar.Header{ - Name: chunk.ExternalKey(), - Size: int64(len(buf)), - Mode: 0600, - }); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if _, err := writer.Write(buf); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - } - }) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunkstore/chunkstore.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunkstore/chunkstore.go deleted file mode 100644 index 647ef6a28..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/chunkstore/chunkstore.go +++ /dev/null @@ -1,16 +0,0 @@ -package chunkstore - -import ( - "context" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -// ChunkStore is the read-interface to the Chunk Store. Made an interface here -// to reduce package coupling. -type ChunkStore interface { - Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go deleted file mode 100644 index 93e97e1e0..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go +++ /dev/null @@ -1,324 +0,0 @@ -package querier - -import ( - "context" - "sort" - "time" - - "github.com/go-kit/log/level" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/prom1/storage/metric" - "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/chunkcompat" - "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/spanlogger" -) - -// Distributor is the read interface to the distributor, made an interface here -// to reduce package coupling. -type Distributor interface { - Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) - QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (*client.QueryStreamResponse, error) - QueryExemplars(ctx context.Context, from, to model.Time, matchers ...[]*labels.Matcher) (*client.ExemplarQueryResponse, error) - LabelValuesForLabelName(ctx context.Context, from, to model.Time, label model.LabelName, matchers ...*labels.Matcher) ([]string, error) - LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, label model.LabelName, matchers ...*labels.Matcher) ([]string, error) - LabelNames(context.Context, model.Time, model.Time) ([]string, error) - LabelNamesStream(context.Context, model.Time, model.Time) ([]string, error) - MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) - MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) - MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) -} - -func newDistributorQueryable(distributor Distributor, streaming bool, streamingMetdata bool, iteratorFn chunkIteratorFunc, queryIngestersWithin time.Duration) QueryableWithFilter { - return distributorQueryable{ - distributor: distributor, - streaming: streaming, - streamingMetdata: streamingMetdata, - iteratorFn: iteratorFn, - queryIngestersWithin: queryIngestersWithin, - } -} - -type distributorQueryable struct { - distributor Distributor - streaming bool - streamingMetdata bool - iteratorFn chunkIteratorFunc - queryIngestersWithin time.Duration -} - -func (d distributorQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return &distributorQuerier{ - distributor: d.distributor, - ctx: ctx, - mint: mint, - maxt: maxt, - streaming: d.streaming, - streamingMetadata: d.streamingMetdata, - chunkIterFn: d.iteratorFn, - queryIngestersWithin: d.queryIngestersWithin, - }, nil -} - -func (d distributorQueryable) UseQueryable(now time.Time, _, queryMaxT int64) bool { - // Include ingester only if maxt is within QueryIngestersWithin w.r.t. current time. - return d.queryIngestersWithin == 0 || queryMaxT >= util.TimeToMillis(now.Add(-d.queryIngestersWithin)) -} - -type distributorQuerier struct { - distributor Distributor - ctx context.Context - mint, maxt int64 - streaming bool - streamingMetadata bool - chunkIterFn chunkIteratorFunc - queryIngestersWithin time.Duration -} - -// Select implements storage.Querier interface. -// The bool passed is ignored because the series is always sorted. -func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, ctx := spanlogger.New(q.ctx, "distributorQuerier.Select") - defer log.Span.Finish() - - minT, maxT := q.mint, q.maxt - if sp != nil { - minT, maxT = sp.Start, sp.End - } - - // If the querier receives a 'series' query, it means only metadata is needed. - // For this specific case we shouldn't apply the queryIngestersWithin - // time range manipulation, otherwise we'll end up returning no series at all for - // older time ranges (while in Cortex we do ignore the start/end and always return - // series in ingesters). - // Also, in the recent versions of Prometheus, we pass in the hint but with Func set to "series". - // See: https://github.com/prometheus/prometheus/pull/8050 - if sp != nil && sp.Func == "series" { - var ( - ms []metric.Metric - err error - ) - - if q.streamingMetadata { - ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) - } else { - ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) - } - - if err != nil { - return storage.ErrSeriesSet(err) - } - return series.MetricsToSeriesSet(ms) - } - - // If queryIngestersWithin is enabled, we do manipulate the query mint to query samples up until - // now - queryIngestersWithin, because older time ranges are covered by the storage. This - // optimization is particularly important for the blocks storage where the blocks retention in the - // ingesters could be way higher than queryIngestersWithin. - if q.queryIngestersWithin > 0 { - now := time.Now() - origMinT := minT - minT = math.Max64(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin))) - - if origMinT != minT { - level.Debug(log).Log("msg", "the min time of the query to ingesters has been manipulated", "original", origMinT, "updated", minT) - } - - if minT > maxT { - level.Debug(log).Log("msg", "empty query time range after min time manipulation") - return storage.EmptySeriesSet() - } - } - - if q.streaming { - return q.streamingSelect(ctx, minT, maxT, matchers) - } - - matrix, err := q.distributor.Query(ctx, model.Time(minT), model.Time(maxT), matchers...) - if err != nil { - return storage.ErrSeriesSet(err) - } - - // Using MatrixToSeriesSet (and in turn NewConcreteSeriesSet), sorts the series. - return series.MatrixToSeriesSet(matrix) -} - -func (q *distributorQuerier) streamingSelect(ctx context.Context, minT, maxT int64, matchers []*labels.Matcher) storage.SeriesSet { - userID, err := tenant.TenantID(ctx) - if err != nil { - return storage.ErrSeriesSet(err) - } - - results, err := q.distributor.QueryStream(ctx, model.Time(minT), model.Time(maxT), matchers...) - if err != nil { - return storage.ErrSeriesSet(err) - } - - sets := []storage.SeriesSet(nil) - if len(results.Timeseries) > 0 { - sets = append(sets, newTimeSeriesSeriesSet(results.Timeseries)) - } - - serieses := make([]storage.Series, 0, len(results.Chunkseries)) - for _, result := range results.Chunkseries { - // Sometimes the ingester can send series that have no data. - if len(result.Chunks) == 0 { - continue - } - - ls := cortexpb.FromLabelAdaptersToLabels(result.Labels) - sort.Sort(ls) - - chunks, err := chunkcompat.FromChunks(userID, ls, result.Chunks) - if err != nil { - return storage.ErrSeriesSet(err) - } - - serieses = append(serieses, &chunkSeries{ - labels: ls, - chunks: chunks, - chunkIteratorFunc: q.chunkIterFn, - mint: minT, - maxt: maxT, - }) - } - - if len(serieses) > 0 { - sets = append(sets, series.NewConcreteSeriesSet(serieses)) - } - - if len(sets) == 0 { - return storage.EmptySeriesSet() - } - if len(sets) == 1 { - return sets[0] - } - // Sets need to be sorted. Both series.NewConcreteSeriesSet and newTimeSeriesSeriesSet take care of that. - return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) -} - -func (q *distributorQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - var ( - lvs []string - err error - ) - - if q.streamingMetadata { - lvs, err = q.distributor.LabelValuesForLabelNameStream(q.ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) - } else { - lvs, err = q.distributor.LabelValuesForLabelName(q.ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name), matchers...) - } - - return lvs, nil, err -} - -func (q *distributorQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - if len(matchers) > 0 { - return q.labelNamesWithMatchers(matchers...) - } - - log, ctx := spanlogger.New(q.ctx, "distributorQuerier.LabelNames") - defer log.Span.Finish() - - var ( - ln []string - err error - ) - - if q.streamingMetadata { - ln, err = q.distributor.LabelNamesStream(ctx, model.Time(q.mint), model.Time(q.maxt)) - } else { - ln, err = q.distributor.LabelNames(ctx, model.Time(q.mint), model.Time(q.maxt)) - } - - return ln, nil, err -} - -// labelNamesWithMatchers performs the LabelNames call by calling ingester's MetricsForLabelMatchers method -func (q *distributorQuerier) labelNamesWithMatchers(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - log, ctx := spanlogger.New(q.ctx, "distributorQuerier.labelNamesWithMatchers") - defer log.Span.Finish() - - var ( - ms []metric.Metric - err error - ) - - if q.streamingMetadata { - ms, err = q.distributor.MetricsForLabelMatchersStream(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) - } else { - ms, err = q.distributor.MetricsForLabelMatchers(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) - } - - if err != nil { - return nil, nil, err - } - namesMap := make(map[string]struct{}) - - for _, m := range ms { - for name := range m.Metric { - namesMap[string(name)] = struct{}{} - } - } - - names := make([]string, 0, len(namesMap)) - for name := range namesMap { - names = append(names, name) - } - sort.Strings(names) - - return names, nil, nil -} - -func (q *distributorQuerier) Close() error { - return nil -} - -type distributorExemplarQueryable struct { - distributor Distributor -} - -func newDistributorExemplarQueryable(d Distributor) storage.ExemplarQueryable { - return &distributorExemplarQueryable{ - distributor: d, - } -} - -func (d distributorExemplarQueryable) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { - return &distributorExemplarQuerier{ - distributor: d.distributor, - ctx: ctx, - }, nil -} - -type distributorExemplarQuerier struct { - distributor Distributor - ctx context.Context -} - -// Select querys for exemplars, prometheus' storage.ExemplarQuerier's Select function takes the time range as two int64 values. -func (q *distributorExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { - allResults, err := q.distributor.QueryExemplars(q.ctx, model.Time(start), model.Time(end), matchers...) - - if err != nil { - return nil, err - } - - var e exemplar.QueryResult - ret := make([]exemplar.QueryResult, len(allResults.Timeseries)) - for i, ts := range allResults.Timeseries { - e.SeriesLabels = cortexpb.FromLabelAdaptersToLabels(ts.Labels) - e.Exemplars = cortexpb.FromExemplarProtosToExemplars(ts.Exemplars) - ret[i] = e - } - return ret, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/dummy.go b/vendor/github.com/cortexproject/cortex/pkg/querier/dummy.go deleted file mode 100644 index 609eebe85..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/dummy.go +++ /dev/null @@ -1,43 +0,0 @@ -package querier - -import ( - "net/url" - - "github.com/prometheus/prometheus/rules" - "github.com/prometheus/prometheus/scrape" -) - -// DummyTargetRetriever implements github.com/prometheus/prometheus/web/api/v1.targetRetriever. -type DummyTargetRetriever struct{} - -// TargetsActive implements targetRetriever. -func (DummyTargetRetriever) TargetsActive() map[string][]*scrape.Target { - return map[string][]*scrape.Target{} -} - -// TargetsDropped implements targetRetriever. -func (DummyTargetRetriever) TargetsDropped() map[string][]*scrape.Target { - return map[string][]*scrape.Target{} -} - -// DummyAlertmanagerRetriever implements AlertmanagerRetriever. -type DummyAlertmanagerRetriever struct{} - -// Alertmanagers implements AlertmanagerRetriever. -func (DummyAlertmanagerRetriever) Alertmanagers() []*url.URL { return nil } - -// DroppedAlertmanagers implements AlertmanagerRetriever. -func (DummyAlertmanagerRetriever) DroppedAlertmanagers() []*url.URL { return nil } - -// DummyRulesRetriever implements RulesRetriever. -type DummyRulesRetriever struct{} - -// RuleGroups implements RulesRetriever. -func (DummyRulesRetriever) RuleGroups() []*rules.Group { - return nil -} - -// AlertingRules implements RulesRetriever. -func (DummyRulesRetriever) AlertingRules() []*rules.AlertingRule { - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/error_translate_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/error_translate_queryable.go deleted file mode 100644 index 95f006b7a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/error_translate_queryable.go +++ /dev/null @@ -1,206 +0,0 @@ -package querier - -import ( - "context" - - "github.com/gogo/status" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// TranslateToPromqlAPIError converts error to one of promql.Errors for consumption in PromQL API. -// PromQL API only recognizes few errors, and converts everything else to HTTP status code 422. -// -// Specifically, it supports: -// -// promql.ErrQueryCanceled, mapped to 503 -// promql.ErrQueryTimeout, mapped to 503 -// promql.ErrStorage mapped to 500 -// anything else is mapped to 422 -// -// Querier code produces different kinds of errors, and we want to map them to above-mentioned HTTP status codes correctly. -// -// Details: -// - vendor/github.com/prometheus/prometheus/web/api/v1/api.go, respondError function only accepts *apiError types. -// - translation of error to *apiError happens in vendor/github.com/prometheus/prometheus/web/api/v1/api.go, returnAPIError method. -func TranslateToPromqlAPIError(err error) error { - if err == nil { - return err - } - - switch errors.Cause(err).(type) { - case promql.ErrStorage, promql.ErrTooManySamples, promql.ErrQueryCanceled, promql.ErrQueryTimeout: - // Don't translate those, just in case we use them internally. - return err - case chunk.QueryError, validation.LimitError: - // This will be returned with status code 422 by Prometheus API. - return err - default: - if errors.Is(err, context.Canceled) { - return err // 422 - } - - s, ok := status.FromError(err) - - if !ok { - s, ok = status.FromError(errors.Cause(err)) - } - - if ok { - code := s.Code() - - // Treat these as HTTP status codes, even though they are supposed to be grpc codes. - if code >= 400 && code < 500 { - // Return directly, will be mapped to 422 - return err - } else if code >= 500 && code < 599 { - // Wrap into ErrStorage for mapping to 500 - return promql.ErrStorage{Err: err} - } - } - - // All other errors will be returned as 500. - return promql.ErrStorage{Err: err} - } -} - -// ErrTranslateFn is used to translate or wrap error before returning it by functions in -// storage.SampleAndChunkQueryable interface. -// Input error may be nil. -type ErrTranslateFn func(err error) error - -func NewErrorTranslateQueryable(q storage.Queryable) storage.Queryable { - return NewErrorTranslateQueryableWithFn(q, TranslateToPromqlAPIError) -} - -func NewErrorTranslateQueryableWithFn(q storage.Queryable, fn ErrTranslateFn) storage.Queryable { - return errorTranslateQueryable{q: q, fn: fn} -} - -func NewErrorTranslateSampleAndChunkQueryable(q storage.SampleAndChunkQueryable) storage.SampleAndChunkQueryable { - return NewErrorTranslateSampleAndChunkQueryableWithFn(q, TranslateToPromqlAPIError) -} - -func NewErrorTranslateSampleAndChunkQueryableWithFn(q storage.SampleAndChunkQueryable, fn ErrTranslateFn) storage.SampleAndChunkQueryable { - return errorTranslateSampleAndChunkQueryable{q: q, fn: fn} -} - -type errorTranslateQueryable struct { - q storage.Queryable - fn ErrTranslateFn -} - -func (e errorTranslateQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := e.q.Querier(ctx, mint, maxt) - return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) -} - -type errorTranslateSampleAndChunkQueryable struct { - q storage.SampleAndChunkQueryable - fn ErrTranslateFn -} - -func (e errorTranslateSampleAndChunkQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := e.q.Querier(ctx, mint, maxt) - return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) -} - -func (e errorTranslateSampleAndChunkQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - q, err := e.q.ChunkQuerier(ctx, mint, maxt) - return errorTranslateChunkQuerier{q: q, fn: e.fn}, e.fn(err) -} - -type errorTranslateQuerier struct { - q storage.Querier - fn ErrTranslateFn -} - -func (e errorTranslateQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelValues(name, matchers...) - return values, warnings, e.fn(err) -} - -func (e errorTranslateQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelNames(matchers...) - return values, warnings, e.fn(err) -} - -func (e errorTranslateQuerier) Close() error { - return e.fn(e.q.Close()) -} - -func (e errorTranslateQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - s := e.q.Select(sortSeries, hints, matchers...) - return errorTranslateSeriesSet{s: s, fn: e.fn} -} - -type errorTranslateChunkQuerier struct { - q storage.ChunkQuerier - fn ErrTranslateFn -} - -func (e errorTranslateChunkQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelValues(name, matchers...) - return values, warnings, e.fn(err) -} - -func (e errorTranslateChunkQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - values, warnings, err := e.q.LabelNames(matchers...) - return values, warnings, e.fn(err) -} - -func (e errorTranslateChunkQuerier) Close() error { - return e.fn(e.q.Close()) -} - -func (e errorTranslateChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { - s := e.q.Select(sortSeries, hints, matchers...) - return errorTranslateChunkSeriesSet{s: s, fn: e.fn} -} - -type errorTranslateSeriesSet struct { - s storage.SeriesSet - fn ErrTranslateFn -} - -func (e errorTranslateSeriesSet) Next() bool { - return e.s.Next() -} - -func (e errorTranslateSeriesSet) At() storage.Series { - return e.s.At() -} - -func (e errorTranslateSeriesSet) Err() error { - return e.fn(e.s.Err()) -} - -func (e errorTranslateSeriesSet) Warnings() storage.Warnings { - return e.s.Warnings() -} - -type errorTranslateChunkSeriesSet struct { - s storage.ChunkSeriesSet - fn ErrTranslateFn -} - -func (e errorTranslateChunkSeriesSet) Next() bool { - return e.s.Next() -} - -func (e errorTranslateChunkSeriesSet) At() storage.ChunkSeries { - return e.s.At() -} - -func (e errorTranslateChunkSeriesSet) Err() error { - return e.fn(e.s.Err()) -} - -func (e errorTranslateChunkSeriesSet) Warnings() storage.Warnings { - return e.s.Warnings() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_iterator.go b/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_iterator.go deleted file mode 100644 index 0f8f7347e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_iterator.go +++ /dev/null @@ -1,64 +0,0 @@ -package iterators - -import ( - "github.com/prometheus/common/model" - - "github.com/cortexproject/cortex/pkg/chunk" - promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" -) - -type chunkIterator struct { - chunk.Chunk - it promchunk.Iterator - - // At() is called often in the heap code, so caching its result seems like - // a good idea. - cacheValid bool - cachedTime int64 - cachedValue float64 -} - -// Seek advances the iterator forward to the value at or after -// the given timestamp. -func (i *chunkIterator) Seek(t int64) bool { - i.cacheValid = false - - // We assume seeks only care about a specific window; if this chunk doesn't - // contain samples in that window, we can shortcut. - if int64(i.Through) < t { - return false - } - - return i.it.FindAtOrAfter(model.Time(t)) -} - -func (i *chunkIterator) AtTime() int64 { - if i.cacheValid { - return i.cachedTime - } - - v := i.it.Value() - i.cachedTime, i.cachedValue = int64(v.Timestamp), float64(v.Value) - i.cacheValid = true - return i.cachedTime -} - -func (i *chunkIterator) At() (int64, float64) { - if i.cacheValid { - return i.cachedTime, i.cachedValue - } - - v := i.it.Value() - i.cachedTime, i.cachedValue = int64(v.Timestamp), float64(v.Value) - i.cacheValid = true - return i.cachedTime, i.cachedValue -} - -func (i *chunkIterator) Next() bool { - i.cacheValid = false - return i.it.Scan() -} - -func (i *chunkIterator) Err() error { - return i.it.Err() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_merge_iterator.go b/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_merge_iterator.go deleted file mode 100644 index c056fa209..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_merge_iterator.go +++ /dev/null @@ -1,208 +0,0 @@ -package iterators - -import ( - "container/heap" - "sort" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -type chunkMergeIterator struct { - its []*nonOverlappingIterator - h seriesIteratorHeap - - currTime int64 - currValue float64 - currErr error -} - -// NewChunkMergeIterator creates a chunkenc.Iterator for a set of chunks. -func NewChunkMergeIterator(cs []chunk.Chunk, _, _ model.Time) chunkenc.Iterator { - its := buildIterators(cs) - c := &chunkMergeIterator{ - currTime: -1, - its: its, - h: make(seriesIteratorHeap, 0, len(its)), - } - - for _, iter := range c.its { - if iter.Next() { - c.h = append(c.h, iter) - continue - } - - if err := iter.Err(); err != nil { - c.currErr = err - } - } - - heap.Init(&c.h) - return c -} - -// Build a list of lists of non-overlapping chunk iterators. -func buildIterators(cs []chunk.Chunk) []*nonOverlappingIterator { - chunks := make([]*chunkIterator, len(cs)) - for i := range cs { - chunks[i] = &chunkIterator{ - Chunk: cs[i], - it: cs[i].Data.NewIterator(nil), - } - } - sort.Sort(byFrom(chunks)) - - chunkLists := [][]*chunkIterator{} -outer: - for _, chunk := range chunks { - for i, chunkList := range chunkLists { - if chunkList[len(chunkList)-1].Through.Before(chunk.From) { - chunkLists[i] = append(chunkLists[i], chunk) - continue outer - } - } - chunkLists = append(chunkLists, []*chunkIterator{chunk}) - } - - its := make([]*nonOverlappingIterator, 0, len(chunkLists)) - for _, chunkList := range chunkLists { - its = append(its, newNonOverlappingIterator(chunkList)) - } - return its -} - -func (c *chunkMergeIterator) Seek(t int64) bool { - c.h = c.h[:0] - - for _, iter := range c.its { - if iter.Seek(t) { - c.h = append(c.h, iter) - continue - } - - if err := iter.Err(); err != nil { - c.currErr = err - return false - } - } - - heap.Init(&c.h) - - if len(c.h) > 0 { - c.currTime, c.currValue = c.h[0].At() - return true - } - - return false -} - -func (c *chunkMergeIterator) Next() bool { - if len(c.h) == 0 { - return false - } - - lastTime := c.currTime - for c.currTime == lastTime && len(c.h) > 0 { - c.currTime, c.currValue = c.h[0].At() - - if c.h[0].Next() { - heap.Fix(&c.h, 0) - continue - } - - iter := heap.Pop(&c.h).(chunkenc.Iterator) - if err := iter.Err(); err != nil { - c.currErr = err - return false - } - } - - return c.currTime != lastTime -} - -func (c *chunkMergeIterator) At() (t int64, v float64) { - return c.currTime, c.currValue -} - -func (c *chunkMergeIterator) Err() error { - return c.currErr -} - -type extraIterator interface { - chunkenc.Iterator - AtTime() int64 -} - -type seriesIteratorHeap []extraIterator - -func (h *seriesIteratorHeap) Len() int { return len(*h) } -func (h *seriesIteratorHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } - -func (h *seriesIteratorHeap) Less(i, j int) bool { - iT := (*h)[i].AtTime() - jT := (*h)[j].AtTime() - return iT < jT -} - -func (h *seriesIteratorHeap) Push(x interface{}) { - *h = append(*h, x.(extraIterator)) -} - -func (h *seriesIteratorHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -type byFrom []*chunkIterator - -func (b byFrom) Len() int { return len(b) } -func (b byFrom) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byFrom) Less(i, j int) bool { return b[i].From < b[j].From } - -type nonOverlappingIterator struct { - curr int - chunks []*chunkIterator -} - -// newNonOverlappingIterator returns a single iterator over an slice of sorted, -// non-overlapping iterators. -func newNonOverlappingIterator(chunks []*chunkIterator) *nonOverlappingIterator { - return &nonOverlappingIterator{ - chunks: chunks, - } -} - -func (it *nonOverlappingIterator) Seek(t int64) bool { - for ; it.curr < len(it.chunks); it.curr++ { - if it.chunks[it.curr].Seek(t) { - return true - } - } - - return false -} - -func (it *nonOverlappingIterator) Next() bool { - for it.curr < len(it.chunks) && !it.chunks[it.curr].Next() { - it.curr++ - } - - return it.curr < len(it.chunks) -} - -func (it *nonOverlappingIterator) AtTime() int64 { - return it.chunks[it.curr].AtTime() -} - -func (it *nonOverlappingIterator) At() (int64, float64) { - return it.chunks[it.curr].At() -} - -func (it *nonOverlappingIterator) Err() error { - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go b/vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go deleted file mode 100644 index 6bdaf4d89..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go +++ /dev/null @@ -1,117 +0,0 @@ -package lazyquery - -import ( - "context" - "fmt" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/querier/chunkstore" -) - -// LazyQueryable wraps a storage.Queryable -type LazyQueryable struct { - q storage.Queryable -} - -// Querier implements storage.Queryable -func (lq LazyQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := lq.q.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - - return NewLazyQuerier(q), nil -} - -// NewLazyQueryable returns a lazily wrapped queryable -func NewLazyQueryable(q storage.Queryable) storage.Queryable { - return LazyQueryable{q} -} - -// LazyQuerier is a lazy-loaded adapter for a storage.Querier -type LazyQuerier struct { - next storage.Querier -} - -// NewLazyQuerier wraps a storage.Querier, does the Select in the background. -// Return value cannot be used from more than one goroutine simultaneously. -func NewLazyQuerier(next storage.Querier) storage.Querier { - return LazyQuerier{next} -} - -// Select implements Storage.Querier -func (l LazyQuerier) Select(selectSorted bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - // make sure there is space in the buffer, to unblock the goroutine and let it die even if nobody is - // waiting for the result yet (or anymore). - future := make(chan storage.SeriesSet, 1) - go func() { - future <- l.next.Select(selectSorted, params, matchers...) - }() - - return &lazySeriesSet{ - future: future, - } -} - -// LabelValues implements Storage.Querier -func (l LazyQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelValues(name, matchers...) -} - -// LabelNames implements Storage.Querier -func (l LazyQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelNames(matchers...) -} - -// Close implements Storage.Querier -func (l LazyQuerier) Close() error { - return l.next.Close() -} - -// Get implements chunk.Store for the chunk tar HTTP handler. -func (l LazyQuerier) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { - store, ok := l.next.(chunkstore.ChunkStore) - if !ok { - return nil, fmt.Errorf("not supported") - } - - return store.Get(ctx, userID, from, through, matchers...) -} - -type lazySeriesSet struct { - next storage.SeriesSet - future chan storage.SeriesSet -} - -// Next implements storage.SeriesSet. NB not thread safe! -func (s *lazySeriesSet) Next() bool { - if s.next == nil { - s.next = <-s.future - } - return s.next.Next() -} - -// At implements storage.SeriesSet. -func (s *lazySeriesSet) At() storage.Series { - if s.next == nil { - s.next = <-s.future - } - return s.next.At() -} - -// Err implements storage.SeriesSet. -func (s *lazySeriesSet) Err() error { - if s.next == nil { - s.next = <-s.future - } - return s.next.Err() -} - -// Warnings implements storage.SeriesSet. -func (s *lazySeriesSet) Warnings() storage.Warnings { - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/matrix.go b/vendor/github.com/cortexproject/cortex/pkg/querier/matrix.go deleted file mode 100644 index 6367bc791..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/matrix.go +++ /dev/null @@ -1,25 +0,0 @@ -package querier - -import ( - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/cortexproject/cortex/pkg/util" -) - -func mergeChunks(chunks []chunk.Chunk, from, through model.Time) chunkenc.Iterator { - samples := make([][]model.SamplePair, 0, len(chunks)) - for _, c := range chunks { - ss, err := c.Samples(from, through) - if err != nil { - return series.NewErrIterator(err) - } - - samples = append(samples, ss) - } - - merged := util.MergeNSampleSets(samples...) - return series.NewConcreteSeriesIterator(series.NewConcreteSeries(nil, merged)) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/metadata_handler.go b/vendor/github.com/cortexproject/cortex/pkg/querier/metadata_handler.go deleted file mode 100644 index 1db757d35..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/metadata_handler.go +++ /dev/null @@ -1,51 +0,0 @@ -package querier - -import ( - "net/http" - - "github.com/cortexproject/cortex/pkg/util" -) - -type metricMetadata struct { - Type string `json:"type"` - Help string `json:"help"` - Unit string `json:"unit"` -} - -const ( - statusSuccess = "success" - statusError = "error" -) - -type metadataResult struct { - Status string `json:"status"` - Data map[string][]metricMetadata `json:"data,omitempty"` - Error string `json:"error,omitempty"` -} - -// MetadataHandler returns metric metadata held by Cortex for a given tenant. -// It is kept and returned as a set. -func MetadataHandler(d Distributor) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - resp, err := d.MetricsMetadata(r.Context()) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - util.WriteJSONResponse(w, metadataResult{Status: statusError, Error: err.Error()}) - return - } - - // Put all the elements of the pseudo-set into a map of slices for marshalling. - metrics := map[string][]metricMetadata{} - for _, m := range resp { - ms, ok := metrics[m.Metric] - if !ok { - // Most metrics will only hold 1 copy of the same metadata. - ms = make([]metricMetadata, 0, 1) - metrics[m.Metric] = ms - } - metrics[m.Metric] = append(ms, metricMetadata{Type: string(m.Type), Help: m.Help, Unit: m.Unit}) - } - - util.WriteJSONResponse(w, metadataResult{Status: statusSuccess, Data: metrics}) - }) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go deleted file mode 100644 index 8993ea806..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ /dev/null @@ -1,646 +0,0 @@ -package querier - -import ( - "context" - "errors" - "flag" - "fmt" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - "github.com/thanos-io/thanos/pkg/strutil" - "golang.org/x/sync/errgroup" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/querier/batch" - "github.com/cortexproject/cortex/pkg/querier/chunkstore" - "github.com/cortexproject/cortex/pkg/querier/iterators" - "github.com/cortexproject/cortex/pkg/querier/lazyquery" - "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/cortexproject/cortex/pkg/util/limiter" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// Config contains the configuration require to create a querier -type Config struct { - MaxConcurrent int `yaml:"max_concurrent"` - Timeout time.Duration `yaml:"timeout"` - Iterators bool `yaml:"iterators"` - BatchIterators bool `yaml:"batch_iterators"` - IngesterStreaming bool `yaml:"ingester_streaming"` - IngesterMetadataStreaming bool `yaml:"ingester_metadata_streaming"` - MaxSamples int `yaml:"max_samples"` - QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` - QueryStoreForLabels bool `yaml:"query_store_for_labels_enabled"` - AtModifierEnabled bool `yaml:"at_modifier_enabled"` - EnablePerStepStats bool `yaml:"per_step_stats_enabled"` - - // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. - QueryStoreAfter time.Duration `yaml:"query_store_after"` - MaxQueryIntoFuture time.Duration `yaml:"max_query_into_future"` - - // The default evaluation interval for the promql engine. - // Needs to be configured for subqueries to work as it is the default - // step if not specified. - DefaultEvaluationInterval time.Duration `yaml:"default_evaluation_interval"` - - // Directory for ActiveQueryTracker. If empty, ActiveQueryTracker will be disabled and MaxConcurrent will not be applied (!). - // ActiveQueryTracker logs queries that were active during the last crash, but logs them on the next startup. - // However, we need to use active query tracker, otherwise we cannot limit Max Concurrent queries in the PromQL - // engine. - ActiveQueryTrackerDir string `yaml:"active_query_tracker_dir"` - // LookbackDelta determines the time since the last sample after which a time - // series is considered stale. - LookbackDelta time.Duration `yaml:"lookback_delta"` - - // Blocks storage only. - StoreGatewayAddresses string `yaml:"store_gateway_addresses"` - StoreGatewayClient ClientConfig `yaml:"store_gateway_client"` - - SecondStoreEngine string `yaml:"second_store_engine"` - UseSecondStoreBeforeTime flagext.Time `yaml:"use_second_store_before_time"` - - ShuffleShardingIngestersLookbackPeriod time.Duration `yaml:"shuffle_sharding_ingesters_lookback_period"` -} - -var ( - errBadLookbackConfigs = errors.New("bad settings, query_store_after >= query_ingesters_within which can result in queries not being sent") - errShuffleShardingLookbackLessThanQueryStoreAfter = errors.New("the shuffle-sharding lookback period should be greater or equal than the configured 'query store after'") - errEmptyTimeRange = errors.New("empty time range") -) - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.StoreGatewayClient.RegisterFlagsWithPrefix("querier.store-gateway-client", f) - f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.") - f.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, "The timeout for a query.") - f.BoolVar(&cfg.Iterators, "querier.iterators", false, "Use iterators to execute query, as opposed to fully materialising the series in memory.") - f.BoolVar(&cfg.BatchIterators, "querier.batch-iterators", true, "Use batch iterators to execute query, as opposed to fully materialising the series in memory. Takes precedent over the -querier.iterators flag.") - f.BoolVar(&cfg.IngesterStreaming, "querier.ingester-streaming", true, "Use streaming RPCs to query ingester.") - f.BoolVar(&cfg.IngesterMetadataStreaming, "querier.ingester-metadata-streaming", false, "Use streaming RPCs for metadata APIs from ingester.") - f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") - f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") - f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") - f.BoolVar(&cfg.AtModifierEnabled, "querier.at-modifier-enabled", false, "Enable the @ modifier in PromQL.") - f.BoolVar(&cfg.EnablePerStepStats, "querier.per-step-stats-enabled", false, "Enable returning samples stats per steps in query response.") - f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") - f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") - f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") - f.StringVar(&cfg.ActiveQueryTrackerDir, "querier.active-query-tracker-dir", "./active-query-tracker", "Active query tracker monitors active queries, and writes them to the file in given directory. If Cortex discovers any queries in this log during startup, it will log them to the log file. Setting to empty value disables active query tracker, which also disables -querier.max-concurrent option.") - f.StringVar(&cfg.StoreGatewayAddresses, "querier.store-gateway-addresses", "", "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).") - f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", 5*time.Minute, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") - f.StringVar(&cfg.SecondStoreEngine, "querier.second-store-engine", "", "Second store engine to use for querying. Empty = disabled.") - f.Var(&cfg.UseSecondStoreBeforeTime, "querier.use-second-store-before-time", "If specified, second store is only used for queries before this timestamp. Default value 0 means secondary store is always queried.") - f.DurationVar(&cfg.ShuffleShardingIngestersLookbackPeriod, "querier.shuffle-sharding-ingesters-lookback-period", 0, "When distributor's sharding strategy is shuffle-sharding and this setting is > 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured 'query store after' and 'query ingesters within'. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).") -} - -// Validate the config -func (cfg *Config) Validate() error { - // Ensure the config wont create a situation where no queriers are returned. - if cfg.QueryIngestersWithin != 0 && cfg.QueryStoreAfter != 0 { - if cfg.QueryStoreAfter >= cfg.QueryIngestersWithin { - return errBadLookbackConfigs - } - } - - if cfg.ShuffleShardingIngestersLookbackPeriod > 0 { - if cfg.ShuffleShardingIngestersLookbackPeriod < cfg.QueryStoreAfter { - return errShuffleShardingLookbackLessThanQueryStoreAfter - } - } - - return nil -} - -func (cfg *Config) GetStoreGatewayAddresses() []string { - if cfg.StoreGatewayAddresses == "" { - return nil - } - - return strings.Split(cfg.StoreGatewayAddresses, ",") -} - -func getChunksIteratorFunction(cfg Config) chunkIteratorFunc { - if cfg.BatchIterators { - return batch.NewChunkMergeIterator - } else if cfg.Iterators { - return iterators.NewChunkMergeIterator - } - return mergeChunks -} - -// NewChunkStoreQueryable returns the storage.Queryable implementation against the chunks store. -func NewChunkStoreQueryable(cfg Config, chunkStore chunkstore.ChunkStore) storage.Queryable { - return newChunkStoreQueryable(chunkStore, getChunksIteratorFunction(cfg)) -} - -// New builds a queryable and promql engine. -func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, tombstonesLoader *purger.TombstonesLoader, reg prometheus.Registerer, logger log.Logger) (storage.SampleAndChunkQueryable, storage.ExemplarQueryable, *promql.Engine) { - iteratorFunc := getChunksIteratorFunction(cfg) - - distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterStreaming, cfg.IngesterMetadataStreaming, iteratorFunc, cfg.QueryIngestersWithin) - - ns := make([]QueryableWithFilter, len(stores)) - for ix, s := range stores { - ns[ix] = storeQueryable{ - QueryableWithFilter: s, - QueryStoreAfter: cfg.QueryStoreAfter, - } - } - queryable := NewQueryable(distributorQueryable, ns, iteratorFunc, cfg, limits, tombstonesLoader) - exemplarQueryable := newDistributorExemplarQueryable(distributor) - - lazyQueryable := storage.QueryableFunc(func(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { - querier, err := queryable.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - return lazyquery.NewLazyQuerier(querier), nil - }) - - engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, - Reg: reg, - ActiveQueryTracker: createActiveQueryTracker(cfg, logger), - MaxSamples: cfg.MaxSamples, - Timeout: cfg.Timeout, - LookbackDelta: cfg.LookbackDelta, - EnablePerStepStats: cfg.EnablePerStepStats, - EnableAtModifier: cfg.AtModifierEnabled, - NoStepSubqueryIntervalFn: func(int64) int64 { - return cfg.DefaultEvaluationInterval.Milliseconds() - }, - }) - return NewSampleAndChunkQueryable(lazyQueryable), exemplarQueryable, engine -} - -// NewSampleAndChunkQueryable creates a SampleAndChunkQueryable from a -// Queryable with a ChunkQueryable stub, that errors once it get's called. -func NewSampleAndChunkQueryable(q storage.Queryable) storage.SampleAndChunkQueryable { - return &sampleAndChunkQueryable{q} -} - -type sampleAndChunkQueryable struct { - storage.Queryable -} - -func (q *sampleAndChunkQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { - return nil, errors.New("ChunkQuerier not implemented") -} - -func createActiveQueryTracker(cfg Config, logger log.Logger) promql.QueryTracker { - dir := cfg.ActiveQueryTrackerDir - - if dir != "" { - return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, logger) - } - - return nil -} - -// QueryableWithFilter extends Queryable interface with `UseQueryable` filtering function. -type QueryableWithFilter interface { - storage.Queryable - - // UseQueryable returns true if this queryable should be used to satisfy the query for given time range. - // Query min and max time are in milliseconds since epoch. - UseQueryable(now time.Time, queryMinT, queryMaxT int64) bool -} - -// NewQueryable creates a new Queryable for cortex. -func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, chunkIterFn chunkIteratorFunc, cfg Config, limits *validation.Overrides, tombstonesLoader *purger.TombstonesLoader) storage.Queryable { - return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - now := time.Now() - - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(limits.MaxFetchedSeriesPerQuery(userID), limits.MaxFetchedChunkBytesPerQuery(userID), limits.MaxChunksPerQuery(userID))) - - mint, maxt, err = validateQueryTimeRange(ctx, userID, mint, maxt, limits, cfg.MaxQueryIntoFuture) - if err == errEmptyTimeRange { - return storage.NoopQuerier(), nil - } else if err != nil { - return nil, err - } - - q := querier{ - ctx: ctx, - mint: mint, - maxt: maxt, - chunkIterFn: chunkIterFn, - tombstonesLoader: tombstonesLoader, - limits: limits, - maxQueryIntoFuture: cfg.MaxQueryIntoFuture, - queryStoreForLabels: cfg.QueryStoreForLabels, - } - - dqr, err := distributor.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - - q.metadataQuerier = dqr - - if distributor.UseQueryable(now, mint, maxt) { - q.queriers = append(q.queriers, dqr) - } - - for _, s := range stores { - if !s.UseQueryable(now, mint, maxt) { - continue - } - - cqr, err := s.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - - q.queriers = append(q.queriers, cqr) - } - - return q, nil - }) -} - -type querier struct { - // used for labels and metadata queries - metadataQuerier storage.Querier - - // used for selecting series - queriers []storage.Querier - - chunkIterFn chunkIteratorFunc - ctx context.Context - mint, maxt int64 - - tombstonesLoader *purger.TombstonesLoader - limits *validation.Overrides - maxQueryIntoFuture time.Duration - queryStoreForLabels bool -} - -// Select implements storage.Querier interface. -// The bool passed is ignored because the series is always sorted. -func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, ctx := spanlogger.New(q.ctx, "querier.Select") - defer log.Span.Finish() - - if sp != nil { - level.Debug(log).Log("start", util.TimeFromMillis(sp.Start).UTC().String(), "end", util.TimeFromMillis(sp.End).UTC().String(), "step", sp.Step, "matchers", matchers) - } - - if sp == nil { - // if SelectHints is null, rely on minT, maxT of querier to scope in range for Select stmt - sp = &storage.SelectHints{Start: q.mint, End: q.maxt} - } else if sp.Func == "series" && !q.queryStoreForLabels { - // Else if the querier receives a 'series' query, it means only metadata is needed. - // Here we expect that metadataQuerier querier will handle that. - // Also, in the recent versions of Prometheus, we pass in the hint but with Func set to "series". - // See: https://github.com/prometheus/prometheus/pull/8050 - - // In this case, the query time range has already been validated when the querier has been - // created. - return q.metadataQuerier.Select(true, sp, matchers...) - } - - userID, err := tenant.TenantID(ctx) - if err != nil { - return storage.ErrSeriesSet(err) - } - - // Validate query time range. Even if the time range has already been validated when we created - // the querier, we need to check it again here because the time range specified in hints may be - // different. - startMs, endMs, err := validateQueryTimeRange(ctx, userID, sp.Start, sp.End, q.limits, q.maxQueryIntoFuture) - if err == errEmptyTimeRange { - return storage.NoopSeriesSet() - } else if err != nil { - return storage.ErrSeriesSet(err) - } - - // The time range may have been manipulated during the validation, - // so we make sure changes are reflected back to hints. - sp.Start = startMs - sp.End = endMs - - startTime := model.Time(startMs) - endTime := model.Time(endMs) - - // Validate query time range. This validation should be done only for instant / range queries and - // NOT for metadata queries (series, labels) because the query-frontend doesn't support splitting - // of such queries. - if maxQueryLength := q.limits.MaxQueryLength(userID); maxQueryLength > 0 && endTime.Sub(startTime) > maxQueryLength { - limitErr := validation.LimitError(fmt.Sprintf(validation.ErrQueryTooLong, endTime.Sub(startTime), maxQueryLength)) - return storage.ErrSeriesSet(limitErr) - } - - tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, startTime, endTime) - if err != nil { - return storage.ErrSeriesSet(err) - } - - if len(q.queriers) == 1 { - seriesSet := q.queriers[0].Select(true, sp, matchers...) - - if tombstones.Len() != 0 { - seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: startTime, End: endTime}) - } - - return seriesSet - } - - sets := make(chan storage.SeriesSet, len(q.queriers)) - for _, querier := range q.queriers { - go func(querier storage.Querier) { - sets <- querier.Select(true, sp, matchers...) - }(querier) - } - - var result []storage.SeriesSet - for range q.queriers { - select { - case set := <-sets: - result = append(result, set) - case <-ctx.Done(): - return storage.ErrSeriesSet(ctx.Err()) - } - } - - // we have all the sets from different sources (chunk from store, chunks from ingesters, - // time series from store and time series from ingesters). - // mergeSeriesSets will return sorted set. - seriesSet := q.mergeSeriesSets(result) - - if tombstones.Len() != 0 { - seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: startTime, End: endTime}) - } - return seriesSet -} - -// LabelsValue implements storage.Querier. -func (q querier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - if !q.queryStoreForLabels { - return q.metadataQuerier.LabelValues(name, matchers...) - } - - if len(q.queriers) == 1 { - return q.queriers[0].LabelValues(name, matchers...) - } - - var ( - g, _ = errgroup.WithContext(q.ctx) - sets = [][]string{} - warnings = storage.Warnings(nil) - - resMtx sync.Mutex - ) - - for _, querier := range q.queriers { - // Need to reassign as the original variable will change and can't be relied on in a goroutine. - querier := querier - g.Go(func() error { - // NB: Values are sorted in Cortex already. - myValues, myWarnings, err := querier.LabelValues(name, matchers...) - if err != nil { - return err - } - - resMtx.Lock() - sets = append(sets, myValues) - warnings = append(warnings, myWarnings...) - resMtx.Unlock() - - return nil - }) - } - - err := g.Wait() - if err != nil { - return nil, nil, err - } - - return strutil.MergeSlices(sets...), warnings, nil -} - -func (q querier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - if !q.queryStoreForLabels { - return q.metadataQuerier.LabelNames(matchers...) - } - - if len(q.queriers) == 1 { - return q.queriers[0].LabelNames(matchers...) - } - - var ( - g, _ = errgroup.WithContext(q.ctx) - sets = [][]string{} - warnings = storage.Warnings(nil) - - resMtx sync.Mutex - ) - - for _, querier := range q.queriers { - // Need to reassign as the original variable will change and can't be relied on in a goroutine. - querier := querier - g.Go(func() error { - // NB: Names are sorted in Cortex already. - myNames, myWarnings, err := querier.LabelNames(matchers...) - if err != nil { - return err - } - - resMtx.Lock() - sets = append(sets, myNames) - warnings = append(warnings, myWarnings...) - resMtx.Unlock() - - return nil - }) - } - - err := g.Wait() - if err != nil { - return nil, nil, err - } - - return strutil.MergeSlices(sets...), warnings, nil -} - -func (querier) Close() error { - return nil -} - -func (q querier) mergeSeriesSets(sets []storage.SeriesSet) storage.SeriesSet { - // Here we deal with sets that are based on chunks and build single set from them. - // Remaining sets are merged with chunks-based one using storage.NewMergeSeriesSet - - otherSets := []storage.SeriesSet(nil) - chunks := []chunk.Chunk(nil) - - for _, set := range sets { - nonChunkSeries := []storage.Series(nil) - - // SeriesSet may have some series backed up by chunks, and some not. - for set.Next() { - s := set.At() - - if sc, ok := s.(SeriesWithChunks); ok { - chunks = append(chunks, sc.Chunks()...) - } else { - nonChunkSeries = append(nonChunkSeries, s) - } - } - - if err := set.Err(); err != nil { - otherSets = append(otherSets, storage.ErrSeriesSet(err)) - } else if len(nonChunkSeries) > 0 { - otherSets = append(otherSets, &sliceSeriesSet{series: nonChunkSeries, ix: -1}) - } - } - - if len(chunks) == 0 { - return storage.NewMergeSeriesSet(otherSets, storage.ChainedSeriesMerge) - } - - // partitionChunks returns set with sorted series, so it can be used by NewMergeSeriesSet - chunksSet := partitionChunks(chunks, q.mint, q.maxt, q.chunkIterFn) - - if len(otherSets) == 0 { - return chunksSet - } - - otherSets = append(otherSets, chunksSet) - return storage.NewMergeSeriesSet(otherSets, storage.ChainedSeriesMerge) -} - -type sliceSeriesSet struct { - series []storage.Series - ix int -} - -func (s *sliceSeriesSet) Next() bool { - s.ix++ - return s.ix < len(s.series) -} - -func (s *sliceSeriesSet) At() storage.Series { - if s.ix < 0 || s.ix >= len(s.series) { - return nil - } - return s.series[s.ix] -} - -func (s *sliceSeriesSet) Err() error { - return nil -} - -func (s *sliceSeriesSet) Warnings() storage.Warnings { - return nil -} - -type storeQueryable struct { - QueryableWithFilter - QueryStoreAfter time.Duration -} - -func (s storeQueryable) UseQueryable(now time.Time, queryMinT, queryMaxT int64) bool { - // Include this store only if mint is within QueryStoreAfter w.r.t current time. - if s.QueryStoreAfter != 0 && queryMinT > util.TimeToMillis(now.Add(-s.QueryStoreAfter)) { - return false - } - return s.QueryableWithFilter.UseQueryable(now, queryMinT, queryMaxT) -} - -type alwaysTrueFilterQueryable struct { - storage.Queryable -} - -func (alwaysTrueFilterQueryable) UseQueryable(_ time.Time, _, _ int64) bool { - return true -} - -// Wraps storage.Queryable into QueryableWithFilter, with no query filtering. -func UseAlwaysQueryable(q storage.Queryable) QueryableWithFilter { - return alwaysTrueFilterQueryable{Queryable: q} -} - -type useBeforeTimestampQueryable struct { - storage.Queryable - ts int64 // Timestamp in milliseconds -} - -func (u useBeforeTimestampQueryable) UseQueryable(_ time.Time, queryMinT, _ int64) bool { - if u.ts == 0 { - return true - } - return queryMinT < u.ts -} - -// Returns QueryableWithFilter, that is used only if query starts before given timestamp. -// If timestamp is zero (time.IsZero), queryable is always used. -func UseBeforeTimestampQueryable(queryable storage.Queryable, ts time.Time) QueryableWithFilter { - t := int64(0) - if !ts.IsZero() { - t = util.TimeToMillis(ts) - } - return useBeforeTimestampQueryable{ - Queryable: queryable, - ts: t, - } -} - -func validateQueryTimeRange(ctx context.Context, userID string, startMs, endMs int64, limits *validation.Overrides, maxQueryIntoFuture time.Duration) (int64, int64, error) { - now := model.Now() - startTime := model.Time(startMs) - endTime := model.Time(endMs) - - // Clamp time range based on max query into future. - if maxQueryIntoFuture > 0 && endTime.After(now.Add(maxQueryIntoFuture)) { - origEndTime := endTime - endTime = now.Add(maxQueryIntoFuture) - - // Make sure to log it in traces to ease debugging. - level.Debug(spanlogger.FromContext(ctx)).Log( - "msg", "the end time of the query has been manipulated because of the 'max query into future' setting", - "original", util.FormatTimeModel(origEndTime), - "updated", util.FormatTimeModel(endTime)) - - if endTime.Before(startTime) { - return 0, 0, errEmptyTimeRange - } - } - - // Clamp the time range based on the max query lookback. - if maxQueryLookback := limits.MaxQueryLookback(userID); maxQueryLookback > 0 && startTime.Before(now.Add(-maxQueryLookback)) { - origStartTime := startTime - startTime = now.Add(-maxQueryLookback) - - // Make sure to log it in traces to ease debugging. - level.Debug(spanlogger.FromContext(ctx)).Log( - "msg", "the start time of the query has been manipulated because of the 'max query lookback' setting", - "original", util.FormatTimeModel(origStartTime), - "updated", util.FormatTimeModel(startTime)) - - if endTime.Before(startTime) { - return 0, 0, errEmptyTimeRange - } - } - - return int64(startTime), int64(endTime), nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/instrumentation.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/instrumentation.go deleted file mode 100644 index 39f6ac379..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/instrumentation.go +++ /dev/null @@ -1,64 +0,0 @@ -package queryrange - -import ( - "context" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/instrument" -) - -// InstrumentMiddleware can be inserted into the middleware chain to expose timing information. -func InstrumentMiddleware(name string, metrics *InstrumentMiddlewareMetrics) Middleware { - var durationCol instrument.Collector - - // Support the case metrics shouldn't be tracked (ie. unit tests). - if metrics != nil { - durationCol = instrument.NewHistogramCollector(metrics.duration) - } else { - durationCol = &NoopCollector{} - } - - return MiddlewareFunc(func(next Handler) Handler { - return HandlerFunc(func(ctx context.Context, req Request) (Response, error) { - var resp Response - err := instrument.CollectedRequest(ctx, name, durationCol, instrument.ErrorCode, func(ctx context.Context) error { - var err error - resp, err = next.Do(ctx, req) - return err - }) - return resp, err - }) - }) -} - -// InstrumentMiddlewareMetrics holds the metrics tracked by InstrumentMiddleware. -type InstrumentMiddlewareMetrics struct { - duration *prometheus.HistogramVec -} - -// NewInstrumentMiddlewareMetrics makes a new InstrumentMiddlewareMetrics. -func NewInstrumentMiddlewareMetrics(registerer prometheus.Registerer) *InstrumentMiddlewareMetrics { - return &InstrumentMiddlewareMetrics{ - duration: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "frontend_query_range_duration_seconds", - Help: "Total time spent in seconds doing query range requests.", - Buckets: prometheus.DefBuckets, - }, []string{"method", "status_code"}), - } -} - -// NoopCollector is a noop collector that can be used as placeholder when no metric -// should tracked by the instrumentation. -type NoopCollector struct{} - -// Register implements instrument.Collector. -func (c *NoopCollector) Register() {} - -// Before implements instrument.Collector. -func (c *NoopCollector) Before(ctx context.Context, method string, start time.Time) {} - -// After implements instrument.Collector. -func (c *NoopCollector) After(ctx context.Context, method, statusCode string, start time.Time) {} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go deleted file mode 100644 index ee55cde76..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go +++ /dev/null @@ -1,97 +0,0 @@ -package queryrange - -import ( - "context" - "net/http" - "time" - - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/timestamp" - "github.com/weaveworks/common/httpgrpc" - - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// Limits allows us to specify per-tenant runtime limits on the behavior of -// the query handling code. -type Limits interface { - // MaxQueryLookback returns the max lookback period of queries. - MaxQueryLookback(userID string) time.Duration - - // MaxQueryLength returns the limit of the length (in time) of a query. - MaxQueryLength(string) time.Duration - - // MaxQueryParallelism returns the limit to the number of split queries the - // frontend will process in parallel. - MaxQueryParallelism(string) int - - // MaxCacheFreshness returns the period after which results are cacheable, - // to prevent caching of very recent results. - MaxCacheFreshness(string) time.Duration -} - -type limitsMiddleware struct { - Limits - next Handler -} - -// NewLimitsMiddleware creates a new Middleware that enforces query limits. -func NewLimitsMiddleware(l Limits) Middleware { - return MiddlewareFunc(func(next Handler) Handler { - return limitsMiddleware{ - next: next, - Limits: l, - } - }) -} - -func (l limitsMiddleware) Do(ctx context.Context, r Request) (Response, error) { - log, ctx := spanlogger.New(ctx, "limits") - defer log.Finish() - - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - // Clamp the time range based on the max query lookback. - - if maxQueryLookback := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, l.MaxQueryLookback); maxQueryLookback > 0 { - minStartTime := util.TimeToMillis(time.Now().Add(-maxQueryLookback)) - - if r.GetEnd() < minStartTime { - // The request is fully outside the allowed range, so we can return an - // empty response. - level.Debug(log).Log( - "msg", "skipping the execution of the query because its time range is before the 'max query lookback' setting", - "reqStart", util.FormatTimeMillis(r.GetStart()), - "redEnd", util.FormatTimeMillis(r.GetEnd()), - "maxQueryLookback", maxQueryLookback) - - return NewEmptyPrometheusResponse(), nil - } - - if r.GetStart() < minStartTime { - // Replace the start time in the request. - level.Debug(log).Log( - "msg", "the start time of the query has been manipulated because of the 'max query lookback' setting", - "original", util.FormatTimeMillis(r.GetStart()), - "updated", util.FormatTimeMillis(minStartTime)) - - r = r.WithStartEnd(minStartTime, r.GetEnd()) - } - } - - // Enforce the max query length. - if maxQueryLength := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, l.MaxQueryLength); maxQueryLength > 0 { - queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart())) - if queryLen > maxQueryLength { - return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, maxQueryLength) - } - } - - return l.next.Do(ctx, r) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go deleted file mode 100644 index ad8ef2598..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go +++ /dev/null @@ -1,568 +0,0 @@ -package queryrange - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "math" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - "unsafe" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/status" - jsoniter "github.com/json-iterator/go" - "github.com/opentracing/opentracing-go" - otlog "github.com/opentracing/opentracing-go/log" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/timestamp" - "github.com/weaveworks/common/httpgrpc" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/spanlogger" -) - -// StatusSuccess Prometheus success result. -const StatusSuccess = "success" - -var ( - matrix = model.ValMatrix.String() - json = jsoniter.Config{ - EscapeHTML: false, // No HTML in our responses. - SortMapKeys: true, - ValidateJsonRawMessage: true, - }.Froze() - errEndBeforeStart = httpgrpc.Errorf(http.StatusBadRequest, "end timestamp must not be before start time") - errNegativeStep = httpgrpc.Errorf(http.StatusBadRequest, "zero or negative query resolution step widths are not accepted. Try a positive integer") - errStepTooSmall = httpgrpc.Errorf(http.StatusBadRequest, "exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)") - - // PrometheusCodec is a codec to encode and decode Prometheus query range requests and responses. - PrometheusCodec Codec = &prometheusCodec{} - - // Name of the cache control header. - cacheControlHeader = "Cache-Control" -) - -// Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares. -type Codec interface { - Merger - // DecodeRequest decodes a Request from an http request. - DecodeRequest(_ context.Context, request *http.Request, forwardHeaders []string) (Request, error) - // DecodeResponse decodes a Response from an http response. - // The original request is also passed as a parameter this is useful for implementation that needs the request - // to merge result or build the result correctly. - DecodeResponse(context.Context, *http.Response, Request) (Response, error) - // EncodeRequest encodes a Request into an http request. - EncodeRequest(context.Context, Request) (*http.Request, error) - // EncodeResponse encodes a Response into an http response. - EncodeResponse(context.Context, Response) (*http.Response, error) -} - -// Merger is used by middlewares making multiple requests to merge back all responses into a single one. -type Merger interface { - // MergeResponse merges responses from multiple requests into a single Response - MergeResponse(...Response) (Response, error) -} - -// Request represents a query range request that can be process by middlewares. -type Request interface { - // GetStart returns the start timestamp of the request in milliseconds. - GetStart() int64 - // GetEnd returns the end timestamp of the request in milliseconds. - GetEnd() int64 - // GetStep returns the step of the request in milliseconds. - GetStep() int64 - // GetQuery returns the query of the request. - GetQuery() string - // GetCachingOptions returns the caching options. - GetCachingOptions() CachingOptions - // WithStartEnd clone the current request with different start and end timestamp. - WithStartEnd(startTime int64, endTime int64) Request - // WithQuery clone the current request with a different query. - WithQuery(string) Request - proto.Message - // LogToSpan writes information about this request to an OpenTracing span - LogToSpan(opentracing.Span) - // GetStats returns the stats of the request. - GetStats() string - // WithStats clones the current `PrometheusRequest` with a new stats. - WithStats(stats string) Request -} - -// Response represents a query range response. -type Response interface { - proto.Message - // GetHeaders returns the HTTP headers in the response. - GetHeaders() []*PrometheusResponseHeader -} - -type prometheusCodec struct{} - -// WithStartEnd clones the current `PrometheusRequest` with a new `start` and `end` timestamp. -func (q *PrometheusRequest) WithStartEnd(start int64, end int64) Request { - new := *q - new.Start = start - new.End = end - return &new -} - -// WithQuery clones the current `PrometheusRequest` with a new query. -func (q *PrometheusRequest) WithQuery(query string) Request { - new := *q - new.Query = query - return &new -} - -// WithStats clones the current `PrometheusRequest` with a new stats. -func (q *PrometheusRequest) WithStats(stats string) Request { - new := *q - new.Stats = stats - return &new -} - -// LogToSpan logs the current `PrometheusRequest` parameters to the specified span. -func (q *PrometheusRequest) LogToSpan(sp opentracing.Span) { - sp.LogFields( - otlog.String("query", q.GetQuery()), - otlog.String("start", timestamp.Time(q.GetStart()).String()), - otlog.String("end", timestamp.Time(q.GetEnd()).String()), - otlog.Int64("step (ms)", q.GetStep()), - ) -} - -type byFirstTime []*PrometheusResponse - -func (a byFirstTime) Len() int { return len(a) } -func (a byFirstTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byFirstTime) Less(i, j int) bool { return a[i].minTime() < a[j].minTime() } - -func (resp *PrometheusResponse) minTime() int64 { - result := resp.Data.Result - if len(result) == 0 { - return -1 - } - if len(result[0].Samples) == 0 { - return -1 - } - return result[0].Samples[0].TimestampMs -} - -// NewEmptyPrometheusResponse returns an empty successful Prometheus query range response. -func NewEmptyPrometheusResponse() *PrometheusResponse { - return &PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: model.ValMatrix.String(), - Result: []SampleStream{}, - }, - } -} - -func (prometheusCodec) MergeResponse(responses ...Response) (Response, error) { - if len(responses) == 0 { - return NewEmptyPrometheusResponse(), nil - } - - promResponses := make([]*PrometheusResponse, 0, len(responses)) - // we need to pass on all the headers for results cache gen numbers. - var resultsCacheGenNumberHeaderValues []string - - for _, res := range responses { - promResponses = append(promResponses, res.(*PrometheusResponse)) - resultsCacheGenNumberHeaderValues = append(resultsCacheGenNumberHeaderValues, getHeaderValuesWithName(res, ResultsCacheGenNumberHeaderName)...) - } - - // Merge the responses. - sort.Sort(byFirstTime(promResponses)) - - response := PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: model.ValMatrix.String(), - Result: matrixMerge(promResponses), - Stats: statsMerge(promResponses), - }, - } - - if len(resultsCacheGenNumberHeaderValues) != 0 { - response.Headers = []*PrometheusResponseHeader{{ - Name: ResultsCacheGenNumberHeaderName, - Values: resultsCacheGenNumberHeaderValues, - }} - } - - return &response, nil -} - -func (prometheusCodec) DecodeRequest(_ context.Context, r *http.Request, forwardHeaders []string) (Request, error) { - var result PrometheusRequest - var err error - result.Start, err = util.ParseTime(r.FormValue("start")) - if err != nil { - return nil, decorateWithParamName(err, "start") - } - - result.End, err = util.ParseTime(r.FormValue("end")) - if err != nil { - return nil, decorateWithParamName(err, "end") - } - - if result.End < result.Start { - return nil, errEndBeforeStart - } - - result.Step, err = parseDurationMs(r.FormValue("step")) - if err != nil { - return nil, decorateWithParamName(err, "step") - } - - if result.Step <= 0 { - return nil, errNegativeStep - } - - // For safety, limit the number of returned points per timeseries. - // This is sufficient for 60s resolution for a week or 1h resolution for a year. - if (result.End-result.Start)/result.Step > 11000 { - return nil, errStepTooSmall - } - - result.Query = r.FormValue("query") - result.Stats = r.FormValue("stats") - result.Path = r.URL.Path - - // Include the specified headers from http request in prometheusRequest. - for _, header := range forwardHeaders { - for h, hv := range r.Header { - if strings.EqualFold(h, header) { - result.Headers = append(result.Headers, &PrometheusRequestHeader{Name: h, Values: hv}) - break - } - } - } - - for _, value := range r.Header.Values(cacheControlHeader) { - if strings.Contains(value, noStoreValue) { - result.CachingOptions.Disabled = true - break - } - } - - return &result, nil -} - -func (prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Request, error) { - promReq, ok := r.(*PrometheusRequest) - if !ok { - return nil, httpgrpc.Errorf(http.StatusBadRequest, "invalid request format") - } - params := url.Values{ - "start": []string{encodeTime(promReq.Start)}, - "end": []string{encodeTime(promReq.End)}, - "step": []string{encodeDurationMs(promReq.Step)}, - "query": []string{promReq.Query}, - "stats": []string{promReq.Stats}, - } - u := &url.URL{ - Path: promReq.Path, - RawQuery: params.Encode(), - } - var h = http.Header{} - - for _, hv := range promReq.Headers { - for _, v := range hv.Values { - h.Add(hv.Name, v) - } - } - - req := &http.Request{ - Method: "GET", - RequestURI: u.String(), // This is what the httpgrpc code looks at. - URL: u, - Body: http.NoBody, - Header: h, - } - - return req.WithContext(ctx), nil -} - -func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ Request) (Response, error) { - if r.StatusCode/100 != 2 { - body, _ := ioutil.ReadAll(r.Body) - return nil, httpgrpc.Errorf(r.StatusCode, string(body)) - } - log, ctx := spanlogger.New(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck - defer log.Finish() - - buf, err := bodyBuffer(r) - if err != nil { - log.Error(err) - return nil, err - } - log.LogFields(otlog.Int("bytes", len(buf))) - - var resp PrometheusResponse - if err := json.Unmarshal(buf, &resp); err != nil { - return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err) - } - - for h, hv := range r.Header { - resp.Headers = append(resp.Headers, &PrometheusResponseHeader{Name: h, Values: hv}) - } - return &resp, nil -} - -// Buffer can be used to read a response body. -// This allows to avoid reading the body multiple times from the `http.Response.Body`. -type Buffer interface { - Bytes() []byte -} - -func bodyBuffer(res *http.Response) ([]byte, error) { - // Attempt to cast the response body to a Buffer and use it if possible. - // This is because the frontend may have already read the body and buffered it. - if buffer, ok := res.Body.(Buffer); ok { - return buffer.Bytes(), nil - } - // Preallocate the buffer with the exact size so we don't waste allocations - // while progressively growing an initial small buffer. The buffer capacity - // is increased by MinRead to avoid extra allocations due to how ReadFrom() - // internally works. - buf := bytes.NewBuffer(make([]byte, 0, res.ContentLength+bytes.MinRead)) - if _, err := buf.ReadFrom(res.Body); err != nil { - return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err) - } - return buf.Bytes(), nil -} - -func (prometheusCodec) EncodeResponse(ctx context.Context, res Response) (*http.Response, error) { - sp, _ := opentracing.StartSpanFromContext(ctx, "APIResponse.ToHTTPResponse") - defer sp.Finish() - - a, ok := res.(*PrometheusResponse) - if !ok { - return nil, httpgrpc.Errorf(http.StatusInternalServerError, "invalid response format") - } - - sp.LogFields(otlog.Int("series", len(a.Data.Result))) - - b, err := json.Marshal(a) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error encoding response: %v", err) - } - - sp.LogFields(otlog.Int("bytes", len(b))) - - resp := http.Response{ - Header: http.Header{ - "Content-Type": []string{"application/json"}, - }, - Body: ioutil.NopCloser(bytes.NewBuffer(b)), - StatusCode: http.StatusOK, - ContentLength: int64(len(b)), - } - return &resp, nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SampleStream) UnmarshalJSON(data []byte) error { - var stream struct { - Metric model.Metric `json:"metric"` - Values []cortexpb.Sample `json:"values"` - } - if err := json.Unmarshal(data, &stream); err != nil { - return err - } - s.Labels = cortexpb.FromMetricsToLabelAdapters(stream.Metric) - s.Samples = stream.Values - return nil -} - -// MarshalJSON implements json.Marshaler. -func (s *SampleStream) MarshalJSON() ([]byte, error) { - stream := struct { - Metric model.Metric `json:"metric"` - Values []cortexpb.Sample `json:"values"` - }{ - Metric: cortexpb.FromLabelAdaptersToMetric(s.Labels), - Values: s.Samples, - } - return json.Marshal(stream) -} - -// statsMerge merge the stats from 2 responses -// this function is similar to matrixMerge -func statsMerge(resps []*PrometheusResponse) *PrometheusResponseStats { - output := map[int64]*PrometheusResponseQueryableSamplesStatsPerStep{} - hasStats := false - for _, resp := range resps { - if resp.Data.Stats == nil { - continue - } - - hasStats = true - if resp.Data.Stats.Samples == nil { - continue - } - - for _, s := range resp.Data.Stats.Samples.TotalQueryableSamplesPerStep { - output[s.GetTimestampMs()] = s - } - } - - if !hasStats { - return nil - } - - keys := make([]int64, 0, len(output)) - for key := range output { - keys = append(keys, key) - } - - sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) - - result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}} - for _, key := range keys { - result.Samples.TotalQueryableSamplesPerStep = append(result.Samples.TotalQueryableSamplesPerStep, output[key]) - result.Samples.TotalQueryableSamples += output[key].Value - } - - return result -} - -func matrixMerge(resps []*PrometheusResponse) []SampleStream { - output := map[string]*SampleStream{} - for _, resp := range resps { - for _, stream := range resp.Data.Result { - metric := cortexpb.FromLabelAdaptersToLabels(stream.Labels).String() - existing, ok := output[metric] - if !ok { - existing = &SampleStream{ - Labels: stream.Labels, - } - } - // We need to make sure we don't repeat samples. This causes some visualisations to be broken in Grafana. - // The prometheus API is inclusive of start and end timestamps. - if len(existing.Samples) > 0 && len(stream.Samples) > 0 { - existingEndTs := existing.Samples[len(existing.Samples)-1].TimestampMs - if existingEndTs == stream.Samples[0].TimestampMs { - // Typically this the cases where only 1 sample point overlap, - // so optimize with simple code. - stream.Samples = stream.Samples[1:] - } else if existingEndTs > stream.Samples[0].TimestampMs { - // Overlap might be big, use heavier algorithm to remove overlap. - stream.Samples = sliceSamples(stream.Samples, existingEndTs) - } // else there is no overlap, yay! - } - existing.Samples = append(existing.Samples, stream.Samples...) - output[metric] = existing - } - } - - keys := make([]string, 0, len(output)) - for key := range output { - keys = append(keys, key) - } - sort.Strings(keys) - - result := make([]SampleStream, 0, len(output)) - for _, key := range keys { - result = append(result, *output[key]) - } - - return result -} - -// sliceSamples assumes given samples are sorted by timestamp in ascending order and -// return a sub slice whose first element's is the smallest timestamp that is strictly -// bigger than the given minTs. Empty slice is returned if minTs is bigger than all the -// timestamps in samples. -func sliceSamples(samples []cortexpb.Sample, minTs int64) []cortexpb.Sample { - if len(samples) <= 0 || minTs < samples[0].TimestampMs { - return samples - } - - if len(samples) > 0 && minTs > samples[len(samples)-1].TimestampMs { - return samples[len(samples):] - } - - searchResult := sort.Search(len(samples), func(i int) bool { - return samples[i].TimestampMs > minTs - }) - - return samples[searchResult:] -} - -func parseDurationMs(s string) (int64, error) { - if d, err := strconv.ParseFloat(s, 64); err == nil { - ts := d * float64(time.Second/time.Millisecond) - if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) { - return 0, httpgrpc.Errorf(http.StatusBadRequest, "cannot parse %q to a valid duration. It overflows int64", s) - } - return int64(ts), nil - } - if d, err := model.ParseDuration(s); err == nil { - return int64(d) / int64(time.Millisecond/time.Nanosecond), nil - } - return 0, httpgrpc.Errorf(http.StatusBadRequest, "cannot parse %q to a valid duration", s) -} - -func encodeTime(t int64) string { - f := float64(t) / 1.0e3 - return strconv.FormatFloat(f, 'f', -1, 64) -} - -func encodeDurationMs(d int64) string { - return strconv.FormatFloat(float64(d)/float64(time.Second/time.Millisecond), 'f', -1, 64) -} - -func decorateWithParamName(err error, field string) error { - errTmpl := "invalid parameter %q; %v" - if status, ok := status.FromError(err); ok { - return httpgrpc.Errorf(int(status.Code()), errTmpl, field, status.Message()) - } - return fmt.Errorf(errTmpl, field, err) -} - -func PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - if !iter.ReadArray() { - iter.ReportError("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", "expected [") - return - } - - t := model.Time(iter.ReadFloat64() * float64(time.Second/time.Millisecond)) - - if !iter.ReadArray() { - iter.ReportError("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", "expected ,") - return - } - v := iter.ReadInt64() - - if iter.ReadArray() { - iter.ReportError("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", "expected ]") - } - - *(*PrometheusResponseQueryableSamplesStatsPerStep)(ptr) = PrometheusResponseQueryableSamplesStatsPerStep{ - TimestampMs: int64(t), - Value: v, - } -} - -func PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode(ptr unsafe.Pointer, stream *jsoniter.Stream) { - stats := (*PrometheusResponseQueryableSamplesStatsPerStep)(ptr) - stream.WriteArrayStart() - stream.WriteFloat64(float64(stats.TimestampMs) / float64(time.Second/time.Millisecond)) - stream.WriteMore() - stream.WriteInt64(stats.Value) - stream.WriteArrayEnd() -} - -func init() { - jsoniter.RegisterTypeEncoderFunc("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterEncode, func(unsafe.Pointer) bool { return false }) - jsoniter.RegisterTypeDecoderFunc("queryrange.PrometheusResponseQueryableSamplesStatsPerStep", PrometheusResponseQueryableSamplesStatsPerStepJsoniterDecode) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryable.go deleted file mode 100644 index 0925f8ffa..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryable.go +++ /dev/null @@ -1,154 +0,0 @@ -package queryrange - -import ( - "context" - "sync" - - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/querier/astmapper" -) - -const ( - missingEmbeddedQueryMsg = "missing embedded query" - nonEmbeddedErrMsg = "DownstreamQuerier cannot handle a non-embedded query" -) - -// ShardedQueryable is an implementor of the Queryable interface. -type ShardedQueryable struct { - Req Request - Handler Handler - - sharededQuerier *ShardedQuerier -} - -// Querier implements Queryable -func (q *ShardedQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q.sharededQuerier = &ShardedQuerier{Ctx: ctx, Req: q.Req, Handler: q.Handler, ResponseHeaders: map[string][]string{}} - return q.sharededQuerier, nil -} - -func (q *ShardedQueryable) getResponseHeaders() []*PrometheusResponseHeader { - q.sharededQuerier.ResponseHeadersMtx.Lock() - defer q.sharededQuerier.ResponseHeadersMtx.Unlock() - - return headersMapToPrometheusResponseHeaders(q.sharededQuerier.ResponseHeaders) -} - -// ShardedQuerier is a an implementor of the Querier interface. -type ShardedQuerier struct { - Ctx context.Context - Req Request - Handler Handler - ResponseHeaders map[string][]string - ResponseHeadersMtx sync.Mutex -} - -// Select returns a set of series that matches the given label matchers. -// The bool passed is ignored because the series is always sorted. -func (q *ShardedQuerier) Select(_ bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - var embeddedQuery string - var isEmbedded bool - for _, matcher := range matchers { - if matcher.Name == labels.MetricName && matcher.Value == astmapper.EmbeddedQueriesMetricName { - isEmbedded = true - } - - if matcher.Name == astmapper.QueryLabel { - embeddedQuery = matcher.Value - } - } - - if isEmbedded { - if embeddedQuery != "" { - return q.handleEmbeddedQuery(embeddedQuery) - } - return storage.ErrSeriesSet(errors.Errorf(missingEmbeddedQueryMsg)) - - } - - return storage.ErrSeriesSet(errors.Errorf(nonEmbeddedErrMsg)) -} - -// handleEmbeddedQuery defers execution of an encoded query to a downstream Handler -func (q *ShardedQuerier) handleEmbeddedQuery(encoded string) storage.SeriesSet { - queries, err := astmapper.JSONCodec.Decode(encoded) - if err != nil { - return storage.ErrSeriesSet(err) - } - - ctx, cancel := context.WithCancel(q.Ctx) - defer cancel() - - // buffer channels to length of queries to prevent leaking memory due to sending to unbuffered channels after cancel/err - errCh := make(chan error, len(queries)) - samplesCh := make(chan []SampleStream, len(queries)) - // TODO(owen-d): impl unified concurrency controls, not per middleware - for _, query := range queries { - go func(query string) { - resp, err := q.Handler.Do(ctx, q.Req.WithQuery(query)) - if err != nil { - errCh <- err - return - } - streams, err := ResponseToSamples(resp) - if err != nil { - errCh <- err - return - } - q.setResponseHeaders(resp.(*PrometheusResponse).Headers) - samplesCh <- streams - }(query) - } - - var samples []SampleStream - - for i := 0; i < len(queries); i++ { - select { - case err := <-errCh: - return storage.ErrSeriesSet(err) - case streams := <-samplesCh: - samples = append(samples, streams...) - } - } - - return NewSeriesSet(samples) -} - -func (q *ShardedQuerier) setResponseHeaders(headers []*PrometheusResponseHeader) { - q.ResponseHeadersMtx.Lock() - defer q.ResponseHeadersMtx.Unlock() - - for _, header := range headers { - if _, ok := q.ResponseHeaders[header.Name]; !ok { - q.ResponseHeaders[header.Name] = header.Values - } else { - q.ResponseHeaders[header.Name] = append(q.ResponseHeaders[header.Name], header.Values...) - } - } -} - -// LabelValues returns all potential values for a label name. -func (q *ShardedQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, errors.Errorf("unimplemented") -} - -// LabelNames returns all the unique label names present in the block in sorted order. -func (q *ShardedQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, errors.Errorf("unimplemented") -} - -// Close releases the resources of the Querier. -func (q *ShardedQuerier) Close() error { - return nil -} - -func headersMapToPrometheusResponseHeaders(headersMap map[string][]string) (prs []*PrometheusResponseHeader) { - for h, v := range headersMap { - prs = append(prs, &PrometheusResponseHeader{Name: h, Values: v}) - } - - return -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.pb.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.pb.go deleted file mode 100644 index cc2db5023..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.pb.go +++ /dev/null @@ -1,4208 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: queryrange.proto - -package queryrange - -import ( - fmt "fmt" - cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type PrometheusRequestHeader struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"-"` - Values []string `protobuf:"bytes,2,rep,name=Values,proto3" json:"-"` -} - -func (m *PrometheusRequestHeader) Reset() { *m = PrometheusRequestHeader{} } -func (*PrometheusRequestHeader) ProtoMessage() {} -func (*PrometheusRequestHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{0} -} -func (m *PrometheusRequestHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusRequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusRequestHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusRequestHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusRequestHeader.Merge(m, src) -} -func (m *PrometheusRequestHeader) XXX_Size() int { - return m.Size() -} -func (m *PrometheusRequestHeader) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusRequestHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusRequestHeader proto.InternalMessageInfo - -func (m *PrometheusRequestHeader) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PrometheusRequestHeader) GetValues() []string { - if m != nil { - return m.Values - } - return nil -} - -type PrometheusRequest struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Start int64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` - End int64 `protobuf:"varint,3,opt,name=end,proto3" json:"end,omitempty"` - Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` - Timeout time.Duration `protobuf:"bytes,5,opt,name=timeout,proto3,stdduration" json:"timeout"` - Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"` - CachingOptions CachingOptions `protobuf:"bytes,7,opt,name=cachingOptions,proto3" json:"cachingOptions"` - Headers []*PrometheusRequestHeader `protobuf:"bytes,8,rep,name=Headers,proto3" json:"-"` - Stats string `protobuf:"bytes,9,opt,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *PrometheusRequest) Reset() { *m = PrometheusRequest{} } -func (*PrometheusRequest) ProtoMessage() {} -func (*PrometheusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{1} -} -func (m *PrometheusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusRequest.Merge(m, src) -} -func (m *PrometheusRequest) XXX_Size() int { - return m.Size() -} -func (m *PrometheusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusRequest proto.InternalMessageInfo - -func (m *PrometheusRequest) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *PrometheusRequest) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *PrometheusRequest) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} - -func (m *PrometheusRequest) GetStep() int64 { - if m != nil { - return m.Step - } - return 0 -} - -func (m *PrometheusRequest) GetTimeout() time.Duration { - if m != nil { - return m.Timeout - } - return 0 -} - -func (m *PrometheusRequest) GetQuery() string { - if m != nil { - return m.Query - } - return "" -} - -func (m *PrometheusRequest) GetCachingOptions() CachingOptions { - if m != nil { - return m.CachingOptions - } - return CachingOptions{} -} - -func (m *PrometheusRequest) GetHeaders() []*PrometheusRequestHeader { - if m != nil { - return m.Headers - } - return nil -} - -func (m *PrometheusRequest) GetStats() string { - if m != nil { - return m.Stats - } - return "" -} - -type PrometheusResponseHeader struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"-"` - Values []string `protobuf:"bytes,2,rep,name=Values,proto3" json:"-"` -} - -func (m *PrometheusResponseHeader) Reset() { *m = PrometheusResponseHeader{} } -func (*PrometheusResponseHeader) ProtoMessage() {} -func (*PrometheusResponseHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{2} -} -func (m *PrometheusResponseHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusResponseHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusResponseHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusResponseHeader.Merge(m, src) -} -func (m *PrometheusResponseHeader) XXX_Size() int { - return m.Size() -} -func (m *PrometheusResponseHeader) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusResponseHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusResponseHeader proto.InternalMessageInfo - -func (m *PrometheusResponseHeader) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PrometheusResponseHeader) GetValues() []string { - if m != nil { - return m.Values - } - return nil -} - -type PrometheusResponse struct { - Status string `protobuf:"bytes,1,opt,name=Status,proto3" json:"status"` - Data PrometheusData `protobuf:"bytes,2,opt,name=Data,proto3" json:"data,omitempty"` - ErrorType string `protobuf:"bytes,3,opt,name=ErrorType,proto3" json:"errorType,omitempty"` - Error string `protobuf:"bytes,4,opt,name=Error,proto3" json:"error,omitempty"` - Headers []*PrometheusResponseHeader `protobuf:"bytes,5,rep,name=Headers,proto3" json:"-"` -} - -func (m *PrometheusResponse) Reset() { *m = PrometheusResponse{} } -func (*PrometheusResponse) ProtoMessage() {} -func (*PrometheusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{3} -} -func (m *PrometheusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusResponse.Merge(m, src) -} -func (m *PrometheusResponse) XXX_Size() int { - return m.Size() -} -func (m *PrometheusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusResponse proto.InternalMessageInfo - -func (m *PrometheusResponse) GetStatus() string { - if m != nil { - return m.Status - } - return "" -} - -func (m *PrometheusResponse) GetData() PrometheusData { - if m != nil { - return m.Data - } - return PrometheusData{} -} - -func (m *PrometheusResponse) GetErrorType() string { - if m != nil { - return m.ErrorType - } - return "" -} - -func (m *PrometheusResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *PrometheusResponse) GetHeaders() []*PrometheusResponseHeader { - if m != nil { - return m.Headers - } - return nil -} - -type PrometheusData struct { - ResultType string `protobuf:"bytes,1,opt,name=ResultType,proto3" json:"resultType"` - Result []SampleStream `protobuf:"bytes,2,rep,name=Result,proto3" json:"result"` - Stats *PrometheusResponseStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *PrometheusData) Reset() { *m = PrometheusData{} } -func (*PrometheusData) ProtoMessage() {} -func (*PrometheusData) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{4} -} -func (m *PrometheusData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusData) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusData.Merge(m, src) -} -func (m *PrometheusData) XXX_Size() int { - return m.Size() -} -func (m *PrometheusData) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusData.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusData proto.InternalMessageInfo - -func (m *PrometheusData) GetResultType() string { - if m != nil { - return m.ResultType - } - return "" -} - -func (m *PrometheusData) GetResult() []SampleStream { - if m != nil { - return m.Result - } - return nil -} - -func (m *PrometheusData) GetStats() *PrometheusResponseStats { - if m != nil { - return m.Stats - } - return nil -} - -type PrometheusResponseStats struct { - Samples *PrometheusResponseSamplesStats `protobuf:"bytes,1,opt,name=samples,proto3" json:"samples"` -} - -func (m *PrometheusResponseStats) Reset() { *m = PrometheusResponseStats{} } -func (*PrometheusResponseStats) ProtoMessage() {} -func (*PrometheusResponseStats) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{5} -} -func (m *PrometheusResponseStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusResponseStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusResponseStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusResponseStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusResponseStats.Merge(m, src) -} -func (m *PrometheusResponseStats) XXX_Size() int { - return m.Size() -} -func (m *PrometheusResponseStats) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusResponseStats.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusResponseStats proto.InternalMessageInfo - -func (m *PrometheusResponseStats) GetSamples() *PrometheusResponseSamplesStats { - if m != nil { - return m.Samples - } - return nil -} - -type PrometheusResponseSamplesStats struct { - TotalQueryableSamples int64 `protobuf:"varint,1,opt,name=totalQueryableSamples,proto3" json:"totalQueryableSamples"` - TotalQueryableSamplesPerStep []*PrometheusResponseQueryableSamplesStatsPerStep `protobuf:"bytes,2,rep,name=totalQueryableSamplesPerStep,proto3" json:"totalQueryableSamplesPerStep"` -} - -func (m *PrometheusResponseSamplesStats) Reset() { *m = PrometheusResponseSamplesStats{} } -func (*PrometheusResponseSamplesStats) ProtoMessage() {} -func (*PrometheusResponseSamplesStats) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{6} -} -func (m *PrometheusResponseSamplesStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusResponseSamplesStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusResponseSamplesStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusResponseSamplesStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusResponseSamplesStats.Merge(m, src) -} -func (m *PrometheusResponseSamplesStats) XXX_Size() int { - return m.Size() -} -func (m *PrometheusResponseSamplesStats) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusResponseSamplesStats.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusResponseSamplesStats proto.InternalMessageInfo - -func (m *PrometheusResponseSamplesStats) GetTotalQueryableSamples() int64 { - if m != nil { - return m.TotalQueryableSamples - } - return 0 -} - -func (m *PrometheusResponseSamplesStats) GetTotalQueryableSamplesPerStep() []*PrometheusResponseQueryableSamplesStatsPerStep { - if m != nil { - return m.TotalQueryableSamplesPerStep - } - return nil -} - -type PrometheusResponseQueryableSamplesStatsPerStep struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` -} - -func (m *PrometheusResponseQueryableSamplesStatsPerStep) Reset() { - *m = PrometheusResponseQueryableSamplesStatsPerStep{} -} -func (*PrometheusResponseQueryableSamplesStatsPerStep) ProtoMessage() {} -func (*PrometheusResponseQueryableSamplesStatsPerStep) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{7} -} -func (m *PrometheusResponseQueryableSamplesStatsPerStep) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusResponseQueryableSamplesStatsPerStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrometheusResponseQueryableSamplesStatsPerStep.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PrometheusResponseQueryableSamplesStatsPerStep) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusResponseQueryableSamplesStatsPerStep.Merge(m, src) -} -func (m *PrometheusResponseQueryableSamplesStatsPerStep) XXX_Size() int { - return m.Size() -} -func (m *PrometheusResponseQueryableSamplesStatsPerStep) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusResponseQueryableSamplesStatsPerStep.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusResponseQueryableSamplesStatsPerStep proto.InternalMessageInfo - -func (m *PrometheusResponseQueryableSamplesStatsPerStep) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *PrometheusResponseQueryableSamplesStatsPerStep) GetTimestampMs() int64 { - if m != nil { - return m.TimestampMs - } - return 0 -} - -type SampleStream struct { - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"metric"` - Samples []cortexpb.Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"values"` -} - -func (m *SampleStream) Reset() { *m = SampleStream{} } -func (*SampleStream) ProtoMessage() {} -func (*SampleStream) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{8} -} -func (m *SampleStream) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SampleStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SampleStream.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SampleStream) XXX_Merge(src proto.Message) { - xxx_messageInfo_SampleStream.Merge(m, src) -} -func (m *SampleStream) XXX_Size() int { - return m.Size() -} -func (m *SampleStream) XXX_DiscardUnknown() { - xxx_messageInfo_SampleStream.DiscardUnknown(m) -} - -var xxx_messageInfo_SampleStream proto.InternalMessageInfo - -func (m *SampleStream) GetSamples() []cortexpb.Sample { - if m != nil { - return m.Samples - } - return nil -} - -type CachedResponse struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"` - // List of cached responses; non-overlapping and in order. - Extents []Extent `protobuf:"bytes,2,rep,name=extents,proto3" json:"extents"` -} - -func (m *CachedResponse) Reset() { *m = CachedResponse{} } -func (*CachedResponse) ProtoMessage() {} -func (*CachedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{9} -} -func (m *CachedResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CachedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CachedResponse.Merge(m, src) -} -func (m *CachedResponse) XXX_Size() int { - return m.Size() -} -func (m *CachedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CachedResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CachedResponse proto.InternalMessageInfo - -func (m *CachedResponse) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *CachedResponse) GetExtents() []Extent { - if m != nil { - return m.Extents - } - return nil -} - -type Extent struct { - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start"` - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end"` - TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"-"` - Response *types.Any `protobuf:"bytes,5,opt,name=response,proto3" json:"response"` -} - -func (m *Extent) Reset() { *m = Extent{} } -func (*Extent) ProtoMessage() {} -func (*Extent) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{10} -} -func (m *Extent) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Extent.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Extent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Extent.Merge(m, src) -} -func (m *Extent) XXX_Size() int { - return m.Size() -} -func (m *Extent) XXX_DiscardUnknown() { - xxx_messageInfo_Extent.DiscardUnknown(m) -} - -var xxx_messageInfo_Extent proto.InternalMessageInfo - -func (m *Extent) GetStart() int64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *Extent) GetEnd() int64 { - if m != nil { - return m.End - } - return 0 -} - -func (m *Extent) GetTraceId() string { - if m != nil { - return m.TraceId - } - return "" -} - -func (m *Extent) GetResponse() *types.Any { - if m != nil { - return m.Response - } - return nil -} - -type CachingOptions struct { - Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (m *CachingOptions) Reset() { *m = CachingOptions{} } -func (*CachingOptions) ProtoMessage() {} -func (*CachingOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_79b02382e213d0b2, []int{11} -} -func (m *CachingOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CachingOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CachingOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CachingOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CachingOptions.Merge(m, src) -} -func (m *CachingOptions) XXX_Size() int { - return m.Size() -} -func (m *CachingOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CachingOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_CachingOptions proto.InternalMessageInfo - -func (m *CachingOptions) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func init() { - proto.RegisterType((*PrometheusRequestHeader)(nil), "queryrange.PrometheusRequestHeader") - proto.RegisterType((*PrometheusRequest)(nil), "queryrange.PrometheusRequest") - proto.RegisterType((*PrometheusResponseHeader)(nil), "queryrange.PrometheusResponseHeader") - proto.RegisterType((*PrometheusResponse)(nil), "queryrange.PrometheusResponse") - proto.RegisterType((*PrometheusData)(nil), "queryrange.PrometheusData") - proto.RegisterType((*PrometheusResponseStats)(nil), "queryrange.PrometheusResponseStats") - proto.RegisterType((*PrometheusResponseSamplesStats)(nil), "queryrange.PrometheusResponseSamplesStats") - proto.RegisterType((*PrometheusResponseQueryableSamplesStatsPerStep)(nil), "queryrange.PrometheusResponseQueryableSamplesStatsPerStep") - proto.RegisterType((*SampleStream)(nil), "queryrange.SampleStream") - proto.RegisterType((*CachedResponse)(nil), "queryrange.CachedResponse") - proto.RegisterType((*Extent)(nil), "queryrange.Extent") - proto.RegisterType((*CachingOptions)(nil), "queryrange.CachingOptions") -} - -func init() { proto.RegisterFile("queryrange.proto", fileDescriptor_79b02382e213d0b2) } - -var fileDescriptor_79b02382e213d0b2 = []byte{ - // 1007 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4f, 0x6f, 0x1b, 0x55, - 0x10, 0xf7, 0xfa, 0xbf, 0xc7, 0x91, 0x1b, 0x5e, 0x0a, 0x5d, 0x47, 0xb0, 0x6b, 0xb6, 0x1c, 0x02, - 0x6a, 0x1d, 0x29, 0x88, 0x03, 0x95, 0x40, 0xed, 0x92, 0xa0, 0xf2, 0xaf, 0x4d, 0x9f, 0x2b, 0x0e, - 0x5c, 0xaa, 0x67, 0xef, 0xc3, 0xd9, 0xd6, 0xeb, 0xdd, 0xbe, 0x7d, 0x8b, 0xe2, 0x1b, 0x1f, 0x81, - 0x03, 0x07, 0x8e, 0x1c, 0x41, 0xe2, 0x63, 0x70, 0xe8, 0x31, 0xe2, 0x54, 0x71, 0x58, 0x88, 0x73, - 0x41, 0x7b, 0xea, 0x47, 0x40, 0xef, 0xcf, 0xda, 0x1b, 0x27, 0x35, 0xaa, 0xb8, 0x58, 0x6f, 0xe6, - 0xcd, 0x6f, 0x66, 0xde, 0x6f, 0x66, 0x67, 0x0c, 0x9b, 0x4f, 0x13, 0xca, 0x66, 0x8c, 0x4c, 0xc7, - 0xb4, 0x1f, 0xb1, 0x90, 0x87, 0x08, 0x96, 0x9a, 0xed, 0x9b, 0x63, 0x9f, 0x1f, 0x25, 0xc3, 0xfe, - 0x28, 0x0c, 0x76, 0xc7, 0xe1, 0x38, 0xdc, 0x95, 0x26, 0xc3, 0xe4, 0x5b, 0x29, 0x49, 0x41, 0x9e, - 0x14, 0x74, 0xdb, 0x1a, 0x87, 0xe1, 0x78, 0x42, 0x97, 0x56, 0x5e, 0xc2, 0x08, 0xf7, 0xc3, 0xa9, - 0xbe, 0xff, 0xb0, 0xe0, 0x6e, 0x14, 0x32, 0x4e, 0x8f, 0x23, 0x16, 0x3e, 0xa6, 0x23, 0xae, 0xa5, - 0xdd, 0xe8, 0xc9, 0x38, 0xbf, 0x18, 0xea, 0x83, 0x86, 0x76, 0x57, 0x5d, 0x93, 0xe9, 0x4c, 0x5d, - 0x39, 0x03, 0xb8, 0x76, 0xc8, 0xc2, 0x80, 0xf2, 0x23, 0x9a, 0xc4, 0x98, 0x3e, 0x4d, 0x68, 0xcc, - 0xef, 0x52, 0xe2, 0x51, 0x86, 0xba, 0x50, 0xbd, 0x47, 0x02, 0x6a, 0x1a, 0x3d, 0x63, 0xa7, 0xe5, - 0xd6, 0xb2, 0xd4, 0x36, 0x6e, 0x62, 0xa9, 0x42, 0x6f, 0x41, 0xfd, 0x6b, 0x32, 0x49, 0x68, 0x6c, - 0x96, 0x7b, 0x95, 0xe5, 0xa5, 0x56, 0x3a, 0x69, 0x19, 0x5e, 0xbb, 0xe0, 0x15, 0x21, 0xa8, 0x46, - 0x84, 0x1f, 0x29, 0x7f, 0x58, 0x9e, 0xd1, 0x55, 0xa8, 0xc5, 0x9c, 0x30, 0x6e, 0x96, 0x7b, 0xc6, - 0x4e, 0x05, 0x2b, 0x01, 0x6d, 0x42, 0x85, 0x4e, 0x3d, 0xb3, 0x22, 0x75, 0xe2, 0x28, 0xb0, 0x31, - 0xa7, 0x91, 0x59, 0x95, 0x2a, 0x79, 0x46, 0x1f, 0x41, 0x83, 0xfb, 0x01, 0x0d, 0x13, 0x6e, 0xd6, - 0x7a, 0xc6, 0x4e, 0x7b, 0xaf, 0xdb, 0x57, 0xef, 0xec, 0xe7, 0xef, 0xec, 0xef, 0x6b, 0x0a, 0xdd, - 0xe6, 0xb3, 0xd4, 0x2e, 0xfd, 0xf4, 0x97, 0x6d, 0xe0, 0x1c, 0x23, 0x42, 0xcb, 0x62, 0x99, 0x75, - 0x99, 0x8f, 0x12, 0xd0, 0x5d, 0xe8, 0x8c, 0xc8, 0xe8, 0xc8, 0x9f, 0x8e, 0xef, 0x47, 0x02, 0x19, - 0x9b, 0x0d, 0xe9, 0x7b, 0xbb, 0x5f, 0xa8, 0xf5, 0x27, 0xe7, 0x2c, 0xdc, 0xaa, 0x70, 0x8e, 0x57, - 0x70, 0x68, 0x1f, 0x1a, 0x8a, 0xc8, 0xd8, 0x6c, 0xf6, 0x2a, 0x3b, 0xed, 0xbd, 0xeb, 0x45, 0x17, - 0x2f, 0x21, 0x3d, 0x67, 0x32, 0x87, 0x6a, 0x82, 0x78, 0x6c, 0xb6, 0x54, 0x96, 0x52, 0x70, 0x1e, - 0x82, 0x59, 0x74, 0x10, 0x47, 0xe1, 0x34, 0xa6, 0xff, 0xbb, 0x6c, 0xbf, 0x96, 0x01, 0x5d, 0x74, - 0x8b, 0x1c, 0xa8, 0x0f, 0x38, 0xe1, 0x49, 0xac, 0x5d, 0x42, 0x96, 0xda, 0xf5, 0x58, 0x6a, 0xb0, - 0xbe, 0x41, 0x9f, 0x42, 0x75, 0x9f, 0x70, 0x22, 0xcb, 0xb8, 0x42, 0xd6, 0xd2, 0xa3, 0xb0, 0x70, - 0xdf, 0x10, 0x64, 0x65, 0xa9, 0xdd, 0xf1, 0x08, 0x27, 0x37, 0xc2, 0xc0, 0xe7, 0x34, 0x88, 0xf8, - 0x0c, 0x4b, 0x3c, 0xfa, 0x00, 0x5a, 0x07, 0x8c, 0x85, 0xec, 0xe1, 0x2c, 0xa2, 0xb2, 0xfe, 0x2d, - 0xf7, 0x5a, 0x96, 0xda, 0x5b, 0x34, 0x57, 0x16, 0x10, 0x4b, 0x4b, 0xf4, 0x2e, 0xd4, 0xa4, 0x20, - 0xfb, 0xa3, 0xe5, 0x6e, 0x65, 0xa9, 0x7d, 0x45, 0x42, 0x0a, 0xe6, 0xca, 0x02, 0x1d, 0x2c, 0xcb, - 0x52, 0x93, 0x65, 0x79, 0xe7, 0x65, 0x65, 0x29, 0xb2, 0xba, 0x5a, 0x17, 0xe7, 0x0f, 0x03, 0x3a, - 0xe7, 0x5f, 0x86, 0xfa, 0x00, 0x98, 0xc6, 0xc9, 0x84, 0xcb, 0xe4, 0x15, 0x57, 0x9d, 0x2c, 0xb5, - 0x81, 0x2d, 0xb4, 0xb8, 0x60, 0x81, 0x6e, 0x43, 0x5d, 0x49, 0xb2, 0x1a, 0xed, 0x3d, 0xb3, 0x98, - 0xc8, 0x80, 0x04, 0xd1, 0x84, 0x0e, 0x38, 0xa3, 0x24, 0x70, 0x3b, 0x9a, 0xb3, 0xba, 0xf2, 0x84, - 0x35, 0x0e, 0xdd, 0xcb, 0x9b, 0xa3, 0x22, 0x69, 0xbf, 0xbe, 0xfe, 0x25, 0xa2, 0x54, 0xb1, 0xe2, - 0x46, 0xa2, 0x8a, 0xdc, 0xa8, 0xb6, 0x9a, 0x9c, 0x1f, 0x06, 0x05, 0x18, 0x7a, 0x00, 0x8d, 0x58, - 0xa6, 0xa4, 0xba, 0xa0, 0xbd, 0xf7, 0xde, 0x7f, 0x04, 0x53, 0xc6, 0x2a, 0x66, 0x3b, 0x4b, 0xed, - 0x1c, 0x8e, 0xf3, 0x83, 0xf3, 0x63, 0x19, 0xac, 0xf5, 0x40, 0x74, 0x1f, 0x5e, 0xe7, 0x21, 0x27, - 0x93, 0x07, 0x22, 0x14, 0x19, 0x4e, 0xf2, 0x5b, 0x99, 0x43, 0xc5, 0xed, 0x66, 0xa9, 0x7d, 0xb9, - 0x01, 0xbe, 0x5c, 0x8d, 0x7e, 0x36, 0xe0, 0xcd, 0x4b, 0x6f, 0x0e, 0x29, 0x1b, 0x88, 0x01, 0xa3, - 0x4a, 0x71, 0x6b, 0xfd, 0xe3, 0x56, 0xc1, 0x32, 0x59, 0xed, 0xc1, 0xed, 0x65, 0xa9, 0xbd, 0x36, - 0x06, 0x5e, 0x7b, 0xeb, 0xf8, 0xf0, 0x8a, 0x11, 0xc5, 0x8c, 0xf8, 0x4e, 0x7c, 0xc1, 0x8a, 0x15, - 0xac, 0x04, 0xf4, 0x36, 0x6c, 0x88, 0x51, 0x17, 0x73, 0x12, 0x44, 0x8f, 0x82, 0x58, 0x4f, 0xd8, - 0xf6, 0x42, 0xf7, 0x55, 0xec, 0xfc, 0x6e, 0xc0, 0x46, 0xb1, 0xd1, 0xd0, 0x31, 0xd4, 0x27, 0x64, - 0x48, 0x27, 0x82, 0x60, 0xc1, 0xc3, 0x56, 0x3f, 0x5f, 0x28, 0xfd, 0x2f, 0x85, 0xfe, 0x90, 0xf8, - 0xcc, 0xfd, 0x42, 0x74, 0xe3, 0x9f, 0xa9, 0xfd, 0x4a, 0x0b, 0x49, 0xe1, 0xef, 0x78, 0x24, 0xe2, - 0x94, 0x89, 0x56, 0x0e, 0x28, 0x67, 0xfe, 0x08, 0xeb, 0x78, 0xe8, 0xd6, 0xb2, 0xbf, 0x54, 0x09, - 0x36, 0x97, 0xa1, 0x55, 0x8a, 0xcb, 0xaf, 0x40, 0xbe, 0xaf, 0xd0, 0x48, 0x8f, 0xa1, 0x23, 0x26, - 0x32, 0xf5, 0x16, 0x23, 0xab, 0x0b, 0x95, 0x27, 0x74, 0xa6, 0xbf, 0xc1, 0x46, 0x96, 0xda, 0x42, - 0xc4, 0xe2, 0x47, 0x6c, 0x0d, 0x7a, 0xcc, 0xe9, 0x94, 0xe7, 0x81, 0x50, 0xb1, 0xd6, 0x07, 0xf2, - 0xca, 0xbd, 0xa2, 0x43, 0xe5, 0xa6, 0x38, 0x3f, 0x38, 0xbf, 0x19, 0x50, 0x57, 0x46, 0xc8, 0xce, - 0x77, 0x97, 0x6a, 0xc6, 0x56, 0x96, 0xda, 0x4a, 0x91, 0xaf, 0xb1, 0xae, 0x5a, 0x63, 0x92, 0x78, - 0x95, 0x05, 0x9d, 0x7a, 0x6a, 0x9f, 0xf5, 0xa0, 0xc9, 0x19, 0x19, 0xd1, 0x47, 0xbe, 0xa7, 0x67, - 0x56, 0x3e, 0x60, 0xa4, 0xfa, 0x33, 0x0f, 0x7d, 0x0c, 0x4d, 0xa6, 0x9f, 0xa3, 0xd7, 0xdb, 0xd5, - 0x0b, 0xeb, 0xed, 0xce, 0x74, 0xe6, 0x6e, 0x64, 0xa9, 0xbd, 0xb0, 0xc4, 0x8b, 0xd3, 0xe7, 0xd5, - 0x66, 0x65, 0xb3, 0xea, 0xdc, 0x50, 0xd4, 0x14, 0xd6, 0xd2, 0x36, 0x34, 0x3d, 0x3f, 0x16, 0xad, - 0xe4, 0xc9, 0xc4, 0x9b, 0x78, 0x21, 0xbb, 0xb7, 0x4f, 0x4e, 0xad, 0xd2, 0xf3, 0x53, 0xab, 0xf4, - 0xe2, 0xd4, 0x32, 0xbe, 0x9f, 0x5b, 0xc6, 0x2f, 0x73, 0xcb, 0x78, 0x36, 0xb7, 0x8c, 0x93, 0xb9, - 0x65, 0xfc, 0x3d, 0xb7, 0x8c, 0x7f, 0xe6, 0x56, 0xe9, 0xc5, 0xdc, 0x32, 0x7e, 0x38, 0xb3, 0x4a, - 0x27, 0x67, 0x56, 0xe9, 0xf9, 0x99, 0x55, 0xfa, 0xa6, 0xf0, 0x9f, 0x67, 0x58, 0x97, 0xb9, 0xbd, - 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x24, 0x32, 0x4a, 0x1a, 0x09, 0x00, 0x00, -} - -func (this *PrometheusRequestHeader) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusRequestHeader) - if !ok { - that2, ok := that.(PrometheusRequestHeader) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if len(this.Values) != len(that1.Values) { - return false - } - for i := range this.Values { - if this.Values[i] != that1.Values[i] { - return false - } - } - return true -} -func (this *PrometheusRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusRequest) - if !ok { - that2, ok := that.(PrometheusRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Path != that1.Path { - return false - } - if this.Start != that1.Start { - return false - } - if this.End != that1.End { - return false - } - if this.Step != that1.Step { - return false - } - if this.Timeout != that1.Timeout { - return false - } - if this.Query != that1.Query { - return false - } - if !this.CachingOptions.Equal(&that1.CachingOptions) { - return false - } - if len(this.Headers) != len(that1.Headers) { - return false - } - for i := range this.Headers { - if !this.Headers[i].Equal(that1.Headers[i]) { - return false - } - } - if this.Stats != that1.Stats { - return false - } - return true -} -func (this *PrometheusResponseHeader) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusResponseHeader) - if !ok { - that2, ok := that.(PrometheusResponseHeader) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if len(this.Values) != len(that1.Values) { - return false - } - for i := range this.Values { - if this.Values[i] != that1.Values[i] { - return false - } - } - return true -} -func (this *PrometheusResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusResponse) - if !ok { - that2, ok := that.(PrometheusResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Status != that1.Status { - return false - } - if !this.Data.Equal(&that1.Data) { - return false - } - if this.ErrorType != that1.ErrorType { - return false - } - if this.Error != that1.Error { - return false - } - if len(this.Headers) != len(that1.Headers) { - return false - } - for i := range this.Headers { - if !this.Headers[i].Equal(that1.Headers[i]) { - return false - } - } - return true -} -func (this *PrometheusData) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusData) - if !ok { - that2, ok := that.(PrometheusData) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ResultType != that1.ResultType { - return false - } - if len(this.Result) != len(that1.Result) { - return false - } - for i := range this.Result { - if !this.Result[i].Equal(&that1.Result[i]) { - return false - } - } - if !this.Stats.Equal(that1.Stats) { - return false - } - return true -} -func (this *PrometheusResponseStats) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusResponseStats) - if !ok { - that2, ok := that.(PrometheusResponseStats) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Samples.Equal(that1.Samples) { - return false - } - return true -} -func (this *PrometheusResponseSamplesStats) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusResponseSamplesStats) - if !ok { - that2, ok := that.(PrometheusResponseSamplesStats) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.TotalQueryableSamples != that1.TotalQueryableSamples { - return false - } - if len(this.TotalQueryableSamplesPerStep) != len(that1.TotalQueryableSamplesPerStep) { - return false - } - for i := range this.TotalQueryableSamplesPerStep { - if !this.TotalQueryableSamplesPerStep[i].Equal(that1.TotalQueryableSamplesPerStep[i]) { - return false - } - } - return true -} -func (this *PrometheusResponseQueryableSamplesStatsPerStep) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PrometheusResponseQueryableSamplesStatsPerStep) - if !ok { - that2, ok := that.(PrometheusResponseQueryableSamplesStatsPerStep) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Value != that1.Value { - return false - } - if this.TimestampMs != that1.TimestampMs { - return false - } - return true -} -func (this *SampleStream) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*SampleStream) - if !ok { - that2, ok := that.(SampleStream) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Samples) != len(that1.Samples) { - return false - } - for i := range this.Samples { - if !this.Samples[i].Equal(&that1.Samples[i]) { - return false - } - } - return true -} -func (this *CachedResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CachedResponse) - if !ok { - that2, ok := that.(CachedResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Key != that1.Key { - return false - } - if len(this.Extents) != len(that1.Extents) { - return false - } - for i := range this.Extents { - if !this.Extents[i].Equal(&that1.Extents[i]) { - return false - } - } - return true -} -func (this *Extent) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Extent) - if !ok { - that2, ok := that.(Extent) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Start != that1.Start { - return false - } - if this.End != that1.End { - return false - } - if this.TraceId != that1.TraceId { - return false - } - if !this.Response.Equal(that1.Response) { - return false - } - return true -} -func (this *CachingOptions) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CachingOptions) - if !ok { - that2, ok := that.(CachingOptions) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Disabled != that1.Disabled { - return false - } - return true -} -func (this *PrometheusRequestHeader) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrange.PrometheusRequestHeader{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 13) - s = append(s, "&queryrange.PrometheusRequest{") - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") - s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") - s = append(s, "Step: "+fmt.Sprintf("%#v", this.Step)+",\n") - s = append(s, "Timeout: "+fmt.Sprintf("%#v", this.Timeout)+",\n") - s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") - s = append(s, "CachingOptions: "+strings.Replace(this.CachingOptions.GoString(), `&`, ``, 1)+",\n") - if this.Headers != nil { - s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") - } - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusResponseHeader) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrange.PrometheusResponseHeader{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&queryrange.PrometheusResponse{") - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - s = append(s, "Data: "+strings.Replace(this.Data.GoString(), `&`, ``, 1)+",\n") - s = append(s, "ErrorType: "+fmt.Sprintf("%#v", this.ErrorType)+",\n") - s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") - if this.Headers != nil { - s = append(s, "Headers: "+fmt.Sprintf("%#v", this.Headers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusData) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&queryrange.PrometheusData{") - s = append(s, "ResultType: "+fmt.Sprintf("%#v", this.ResultType)+",\n") - if this.Result != nil { - vs := make([]*SampleStream, len(this.Result)) - for i := range vs { - vs[i] = &this.Result[i] - } - s = append(s, "Result: "+fmt.Sprintf("%#v", vs)+",\n") - } - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusResponseStats) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&queryrange.PrometheusResponseStats{") - if this.Samples != nil { - s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusResponseSamplesStats) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrange.PrometheusResponseSamplesStats{") - s = append(s, "TotalQueryableSamples: "+fmt.Sprintf("%#v", this.TotalQueryableSamples)+",\n") - if this.TotalQueryableSamplesPerStep != nil { - s = append(s, "TotalQueryableSamplesPerStep: "+fmt.Sprintf("%#v", this.TotalQueryableSamplesPerStep)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PrometheusResponseQueryableSamplesStatsPerStep) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrange.PrometheusResponseQueryableSamplesStatsPerStep{") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SampleStream) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrange.SampleStream{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Samples != nil { - vs := make([]*cortexpb.Sample, len(this.Samples)) - for i := range vs { - vs[i] = &this.Samples[i] - } - s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CachedResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&queryrange.CachedResponse{") - s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") - if this.Extents != nil { - vs := make([]*Extent, len(this.Extents)) - for i := range vs { - vs[i] = &this.Extents[i] - } - s = append(s, "Extents: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Extent) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&queryrange.Extent{") - s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") - s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") - s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") - if this.Response != nil { - s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CachingOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&queryrange.CachingOptions{") - s = append(s, "Disabled: "+fmt.Sprintf("%#v", this.Disabled)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringQueryrange(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *PrometheusRequestHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusRequestHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusRequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Stats) > 0 { - i -= len(m.Stats) - copy(dAtA[i:], m.Stats) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Stats))) - i-- - dAtA[i] = 0x4a - } - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - { - size, err := m.CachingOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x32 - } - n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Timeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Timeout):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintQueryrange(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x2a - if m.Step != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) - i-- - dAtA[i] = 0x20 - } - if m.End != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x18 - } - if m.Start != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x10 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusResponseHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusResponseHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 - } - if len(m.ErrorType) > 0 { - i -= len(m.ErrorType) - copy(dAtA[i:], m.ErrorType) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ErrorType))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Status) > 0 { - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Stats != nil { - { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Result) > 0 { - for iNdEx := len(m.Result) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Result[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.ResultType) > 0 { - i -= len(m.ResultType) - copy(dAtA[i:], m.ResultType) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ResultType))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusResponseStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusResponseStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusResponseStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Samples != nil { - { - size, err := m.Samples.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrometheusResponseSamplesStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusResponseSamplesStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusResponseSamplesStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TotalQueryableSamplesPerStep) > 0 { - for iNdEx := len(m.TotalQueryableSamplesPerStep) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.TotalQueryableSamplesPerStep[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.TotalQueryableSamples != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.TotalQueryableSamples)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *PrometheusResponseQueryableSamplesStatsPerStep) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusResponseQueryableSamplesStatsPerStep) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusResponseQueryableSamplesStatsPerStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TimestampMs != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.TimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.Value != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *SampleStream) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SampleStream) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CachedResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Extents) > 0 { - for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Extent) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Extent) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.TraceId) > 0 { - i -= len(m.TraceId) - copy(dAtA[i:], m.TraceId) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TraceId))) - i-- - dAtA[i] = 0x22 - } - if m.End != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) - i-- - dAtA[i] = 0x10 - } - if m.Start != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CachingOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CachingOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CachingOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int { - offset -= sovQueryrange(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PrometheusRequestHeader) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *PrometheusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if m.Start != 0 { - n += 1 + sovQueryrange(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovQueryrange(uint64(m.End)) - } - if m.Step != 0 { - n += 1 + sovQueryrange(uint64(m.Step)) - } - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Timeout) - n += 1 + l + sovQueryrange(uint64(l)) - l = len(m.Query) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - l = m.CachingOptions.Size() - n += 1 + l + sovQueryrange(uint64(l)) - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - l = len(m.Stats) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - -func (m *PrometheusResponseHeader) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *PrometheusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Status) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - l = m.Data.Size() - n += 1 + l + sovQueryrange(uint64(l)) - l = len(m.ErrorType) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *PrometheusData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ResultType) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Result) > 0 { - for _, e := range m.Result { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - -func (m *PrometheusResponseStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Samples != nil { - l = m.Samples.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - -func (m *PrometheusResponseSamplesStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TotalQueryableSamples != 0 { - n += 1 + sovQueryrange(uint64(m.TotalQueryableSamples)) - } - if len(m.TotalQueryableSamplesPerStep) > 0 { - for _, e := range m.TotalQueryableSamplesPerStep { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *PrometheusResponseQueryableSamplesStatsPerStep) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != 0 { - n += 1 + sovQueryrange(uint64(m.Value)) - } - if m.TimestampMs != 0 { - n += 1 + sovQueryrange(uint64(m.TimestampMs)) - } - return n -} - -func (m *SampleStream) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - if len(m.Samples) > 0 { - for _, e := range m.Samples { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *CachedResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if len(m.Extents) > 0 { - for _, e := range m.Extents { - l = e.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - } - return n -} - -func (m *Extent) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Start != 0 { - n += 1 + sovQueryrange(uint64(m.Start)) - } - if m.End != 0 { - n += 1 + sovQueryrange(uint64(m.End)) - } - l = len(m.TraceId) - if l > 0 { - n += 1 + l + sovQueryrange(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovQueryrange(uint64(l)) - } - return n -} - -func (m *CachingOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Disabled { - n += 2 - } - return n -} - -func sovQueryrange(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQueryrange(x uint64) (n int) { - return sovQueryrange(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PrometheusRequestHeader) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusRequestHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForHeaders := "[]*PrometheusRequestHeader{" - for _, f := range this.Headers { - repeatedStringForHeaders += strings.Replace(f.String(), "PrometheusRequestHeader", "PrometheusRequestHeader", 1) + "," - } - repeatedStringForHeaders += "}" - s := strings.Join([]string{`&PrometheusRequest{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Start:` + fmt.Sprintf("%v", this.Start) + `,`, - `End:` + fmt.Sprintf("%v", this.End) + `,`, - `Step:` + fmt.Sprintf("%v", this.Step) + `,`, - `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `Query:` + fmt.Sprintf("%v", this.Query) + `,`, - `CachingOptions:` + strings.Replace(strings.Replace(this.CachingOptions.String(), "CachingOptions", "CachingOptions", 1), `&`, ``, 1) + `,`, - `Headers:` + repeatedStringForHeaders + `,`, - `Stats:` + fmt.Sprintf("%v", this.Stats) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusResponseHeader) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusResponseHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForHeaders := "[]*PrometheusResponseHeader{" - for _, f := range this.Headers { - repeatedStringForHeaders += strings.Replace(f.String(), "PrometheusResponseHeader", "PrometheusResponseHeader", 1) + "," - } - repeatedStringForHeaders += "}" - s := strings.Join([]string{`&PrometheusResponse{`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "PrometheusData", "PrometheusData", 1), `&`, ``, 1) + `,`, - `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Headers:` + repeatedStringForHeaders + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusData) String() string { - if this == nil { - return "nil" - } - repeatedStringForResult := "[]SampleStream{" - for _, f := range this.Result { - repeatedStringForResult += strings.Replace(strings.Replace(f.String(), "SampleStream", "SampleStream", 1), `&`, ``, 1) + "," - } - repeatedStringForResult += "}" - s := strings.Join([]string{`&PrometheusData{`, - `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`, - `Result:` + repeatedStringForResult + `,`, - `Stats:` + strings.Replace(this.Stats.String(), "PrometheusResponseStats", "PrometheusResponseStats", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusResponseStats) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusResponseStats{`, - `Samples:` + strings.Replace(this.Samples.String(), "PrometheusResponseSamplesStats", "PrometheusResponseSamplesStats", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusResponseSamplesStats) String() string { - if this == nil { - return "nil" - } - repeatedStringForTotalQueryableSamplesPerStep := "[]*PrometheusResponseQueryableSamplesStatsPerStep{" - for _, f := range this.TotalQueryableSamplesPerStep { - repeatedStringForTotalQueryableSamplesPerStep += strings.Replace(f.String(), "PrometheusResponseQueryableSamplesStatsPerStep", "PrometheusResponseQueryableSamplesStatsPerStep", 1) + "," - } - repeatedStringForTotalQueryableSamplesPerStep += "}" - s := strings.Join([]string{`&PrometheusResponseSamplesStats{`, - `TotalQueryableSamples:` + fmt.Sprintf("%v", this.TotalQueryableSamples) + `,`, - `TotalQueryableSamplesPerStep:` + repeatedStringForTotalQueryableSamplesPerStep + `,`, - `}`, - }, "") - return s -} -func (this *PrometheusResponseQueryableSamplesStatsPerStep) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusResponseQueryableSamplesStatsPerStep{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, - `}`, - }, "") - return s -} -func (this *SampleStream) String() string { - if this == nil { - return "nil" - } - repeatedStringForSamples := "[]Sample{" - for _, f := range this.Samples { - repeatedStringForSamples += fmt.Sprintf("%v", f) + "," - } - repeatedStringForSamples += "}" - s := strings.Join([]string{`&SampleStream{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Samples:` + repeatedStringForSamples + `,`, - `}`, - }, "") - return s -} -func (this *CachedResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForExtents := "[]Extent{" - for _, f := range this.Extents { - repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + "," - } - repeatedStringForExtents += "}" - s := strings.Join([]string{`&CachedResponse{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Extents:` + repeatedStringForExtents + `,`, - `}`, - }, "") - return s -} -func (this *Extent) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Extent{`, - `Start:` + fmt.Sprintf("%v", this.Start) + `,`, - `End:` + fmt.Sprintf("%v", this.End) + `,`, - `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "Any", "types.Any", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CachingOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CachingOptions{`, - `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, - `}`, - }, "") - return s -} -func valueToStringQueryrange(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PrometheusRequestHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusRequestHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusRequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) - } - m.Step = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Step |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Timeout, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CachingOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &PrometheusRequestHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusResponseHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, &PrometheusResponseHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResultType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Result = append(m.Result, SampleStream{}) - if err := m.Result[len(m.Result)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stats == nil { - m.Stats = &PrometheusResponseStats{} - } - if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusResponseStats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseStats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseStats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Samples == nil { - m.Samples = &PrometheusResponseSamplesStats{} - } - if err := m.Samples.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusResponseSamplesStats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseSamplesStats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseSamplesStats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalQueryableSamples", wireType) - } - m.TotalQueryableSamples = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalQueryableSamples |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalQueryableSamplesPerStep", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TotalQueryableSamplesPerStep = append(m.TotalQueryableSamplesPerStep, &PrometheusResponseQueryableSamplesStatsPerStep{}) - if err := m.TotalQueryableSamplesPerStep[len(m.TotalQueryableSamplesPerStep)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrometheusResponseQueryableSamplesStatsPerStep) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusResponseQueryableSamplesStatsPerStep: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusResponseQueryableSamplesStatsPerStep: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) - } - m.TimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SampleStream) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SampleStream: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SampleStream: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Samples = append(m.Samples, cortexpb.Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CachedResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CachedResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CachedResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extents", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Extents = append(m.Extents, Extent{}) - if err := m.Extents[len(m.Extents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Extent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Extent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Extent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - m.End = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.End |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TraceId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQueryrange - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQueryrange - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &types.Any{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CachingOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CachingOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CachingOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQueryrange - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipQueryrange(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthQueryrange - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQueryrange(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQueryrange - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthQueryrange - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQueryrange - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipQueryrange(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthQueryrange - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthQueryrange = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQueryrange = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.proto b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.proto deleted file mode 100644 index ec8b12012..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto3"; - -package queryrange; - -option go_package = "queryrange"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; -import "google/protobuf/any.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -message PrometheusRequestHeader { - string Name = 1 [(gogoproto.jsontag) = "-"]; - repeated string Values = 2 [(gogoproto.jsontag) = "-"]; -} -message PrometheusRequest { - string path = 1; - int64 start = 2; - int64 end = 3; - int64 step = 4; - google.protobuf.Duration timeout = 5 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; - string query = 6; - CachingOptions cachingOptions = 7 [(gogoproto.nullable) = false]; - repeated PrometheusRequestHeader Headers = 8 [(gogoproto.jsontag) = "-"]; - string stats = 9; -} - -message PrometheusResponseHeader { - string Name = 1 [(gogoproto.jsontag) = "-"]; - repeated string Values = 2 [(gogoproto.jsontag) = "-"]; -} - -message PrometheusResponse { - string Status = 1 [(gogoproto.jsontag) = "status"]; - PrometheusData Data = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "data,omitempty"]; - string ErrorType = 3 [(gogoproto.jsontag) = "errorType,omitempty"]; - string Error = 4 [(gogoproto.jsontag) = "error,omitempty"]; - repeated PrometheusResponseHeader Headers = 5 [(gogoproto.jsontag) = "-"]; -} - -message PrometheusData { - string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; - repeated SampleStream Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result"]; - PrometheusResponseStats stats = 3 [(gogoproto.jsontag) = "stats,omitempty"]; -} - -message PrometheusResponseStats { - PrometheusResponseSamplesStats samples = 1 [(gogoproto.jsontag) = "samples"]; -} - -message PrometheusResponseSamplesStats { - int64 totalQueryableSamples = 1 [(gogoproto.jsontag) = "totalQueryableSamples"]; - repeated PrometheusResponseQueryableSamplesStatsPerStep totalQueryableSamplesPerStep = 2 [(gogoproto.jsontag) = "totalQueryableSamplesPerStep"]; -} - -message PrometheusResponseQueryableSamplesStatsPerStep { - int64 value = 1; - int64 timestamp_ms = 2; -} - -message SampleStream { - repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "metric", (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"]; - repeated cortexpb.Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "values"]; -} - -message CachedResponse { - string key = 1 [(gogoproto.jsontag) = "key"]; - - // List of cached responses; non-overlapping and in order. - repeated Extent extents = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "extents"]; -} - -message Extent { - int64 start = 1 [(gogoproto.jsontag) = "start"]; - int64 end = 2 [(gogoproto.jsontag) = "end"]; - // reserved the previous key to ensure cache transition - reserved 3; - string trace_id = 4 [(gogoproto.jsontag) = "-"]; - google.protobuf.Any response = 5 [(gogoproto.jsontag) = "response"]; -} - -message CachingOptions { - bool disabled = 1; -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go deleted file mode 100644 index 8d6db6e5a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go +++ /dev/null @@ -1,262 +0,0 @@ -package queryrange - -import ( - "context" - fmt "fmt" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/querier/astmapper" - "github.com/cortexproject/cortex/pkg/querier/lazyquery" - "github.com/cortexproject/cortex/pkg/util" -) - -var ( - errInvalidShardingRange = errors.New("Query does not fit in a single sharding configuration") -) - -// ShardingConfigs is a slice of chunk shard configs -type ShardingConfigs []chunk.PeriodConfig - -// ValidRange extracts a non-overlapping sharding configuration from a list of configs and a time range. -func (confs ShardingConfigs) ValidRange(start, end int64) (chunk.PeriodConfig, error) { - for i, conf := range confs { - if start < int64(conf.From.Time) { - // the query starts before this config's range - return chunk.PeriodConfig{}, errInvalidShardingRange - } else if i == len(confs)-1 { - // the last configuration has no upper bound - return conf, nil - } else if end < int64(confs[i+1].From.Time) { - // The request is entirely scoped into this shard config - return conf, nil - } else { - continue - } - } - - return chunk.PeriodConfig{}, errInvalidShardingRange -} - -// GetConf will extract a shardable config corresponding to a request and the shardingconfigs -func (confs ShardingConfigs) GetConf(r Request) (chunk.PeriodConfig, error) { - conf, err := confs.ValidRange(r.GetStart(), r.GetEnd()) - - // query exists across multiple sharding configs - if err != nil { - return conf, err - } - - // query doesn't have shard factor, so don't try to do AST mapping. - if conf.RowShards < 2 { - return conf, errors.Errorf("shard factor not high enough: [%d]", conf.RowShards) - } - - return conf, nil -} - -func (confs ShardingConfigs) hasShards() bool { - for _, conf := range confs { - if conf.RowShards > 0 { - return true - } - } - return false -} - -func mapQuery(mapper astmapper.ASTMapper, query string) (parser.Node, error) { - expr, err := parser.ParseExpr(query) - if err != nil { - return nil, err - } - return mapper.Map(expr) -} - -// NewQueryShardMiddleware creates a middleware which downstreams queries after AST mapping and query encoding. -func NewQueryShardMiddleware( - logger log.Logger, - engine *promql.Engine, - confs ShardingConfigs, - codec Codec, - minShardingLookback time.Duration, - metrics *InstrumentMiddlewareMetrics, - registerer prometheus.Registerer, -) Middleware { - - noshards := !confs.hasShards() - - if noshards { - level.Warn(logger).Log( - "middleware", "QueryShard", - "msg", "no configuration with shard found", - "confs", fmt.Sprintf("%+v", confs), - ) - return PassthroughMiddleware - } - - mapperware := MiddlewareFunc(func(next Handler) Handler { - return newASTMapperware(confs, next, logger, registerer) - }) - - shardingware := MiddlewareFunc(func(next Handler) Handler { - return &queryShard{ - confs: confs, - next: next, - engine: engine, - } - }) - - return MiddlewareFunc(func(next Handler) Handler { - return &shardSplitter{ - codec: codec, - MinShardingLookback: minShardingLookback, - shardingware: MergeMiddlewares( - InstrumentMiddleware("shardingware", metrics), - mapperware, - shardingware, - ).Wrap(next), - now: time.Now, - next: InstrumentMiddleware("sharding-bypass", metrics).Wrap(next), - } - }) - -} - -type astMapperware struct { - confs ShardingConfigs - logger log.Logger - next Handler - - // Metrics. - registerer prometheus.Registerer - mappedASTCounter prometheus.Counter - shardedQueriesCounter prometheus.Counter -} - -func newASTMapperware(confs ShardingConfigs, next Handler, logger log.Logger, registerer prometheus.Registerer) *astMapperware { - return &astMapperware{ - confs: confs, - logger: log.With(logger, "middleware", "QueryShard.astMapperware"), - next: next, - registerer: registerer, - mappedASTCounter: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "frontend_mapped_asts_total", - Help: "Total number of queries that have undergone AST mapping", - }), - shardedQueriesCounter: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "frontend_sharded_queries_total", - Help: "Total number of sharded queries", - }), - } -} - -func (ast *astMapperware) Do(ctx context.Context, r Request) (Response, error) { - conf, err := ast.confs.GetConf(r) - // cannot shard with this timerange - if err != nil { - level.Warn(ast.logger).Log("err", err.Error(), "msg", "skipped AST mapper for request") - return ast.next.Do(ctx, r) - } - - shardSummer, err := astmapper.NewShardSummer(int(conf.RowShards), astmapper.VectorSquasher, ast.shardedQueriesCounter) - if err != nil { - return nil, err - } - - subtreeFolder := astmapper.NewSubtreeFolder() - - strQuery := r.GetQuery() - mappedQuery, err := mapQuery( - astmapper.NewMultiMapper( - shardSummer, - subtreeFolder, - ), - strQuery, - ) - - if err != nil { - return nil, err - } - - strMappedQuery := mappedQuery.String() - level.Debug(ast.logger).Log("msg", "mapped query", "original", strQuery, "mapped", strMappedQuery) - ast.mappedASTCounter.Inc() - - return ast.next.Do(ctx, r.WithQuery(strMappedQuery)) - -} - -type queryShard struct { - confs ShardingConfigs - next Handler - engine *promql.Engine -} - -func (qs *queryShard) Do(ctx context.Context, r Request) (Response, error) { - // since there's no available sharding configuration for this time range, - // no astmapping has been performed, so skip this middleware. - if _, err := qs.confs.GetConf(r); err != nil { - return qs.next.Do(ctx, r) - } - - shardedQueryable := &ShardedQueryable{Req: r, Handler: qs.next} - - queryable := lazyquery.NewLazyQueryable(shardedQueryable) - - qry, err := qs.engine.NewRangeQuery( - queryable, - nil, - r.GetQuery(), - util.TimeFromMillis(r.GetStart()), - util.TimeFromMillis(r.GetEnd()), - time.Duration(r.GetStep())*time.Millisecond, - ) - - if err != nil { - return nil, err - } - res := qry.Exec(ctx) - extracted, err := FromResult(res) - if err != nil { - return nil, err - - } - return &PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: string(res.Value.Type()), - Result: extracted, - }, - Headers: shardedQueryable.getResponseHeaders(), - }, nil -} - -// shardSplitter middleware will only shard appropriate requests that do not extend past the MinShardingLookback interval. -// This is used to send nonsharded requests to the ingesters in order to not overload them. -type shardSplitter struct { - codec Codec - MinShardingLookback time.Duration // delimiter for splitting sharded vs non-sharded queries - shardingware Handler // handler for sharded queries - next Handler // handler for non-sharded queries - now func() time.Time // injectable time.Now -} - -func (splitter *shardSplitter) Do(ctx context.Context, r Request) (Response, error) { - cutoff := splitter.now().Add(-splitter.MinShardingLookback) - - // Only attempt to shard queries which are older than the sharding lookback (the period for which ingesters are also queried). - if !cutoff.After(util.TimeFromMillis(r.GetEnd())) { - return splitter.next.Do(ctx, r) - } - return splitter.shardingware.Do(ctx, r) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go deleted file mode 100644 index 3096300bf..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go +++ /dev/null @@ -1,715 +0,0 @@ -package queryrange - -import ( - "context" - "flag" - "fmt" - "net/http" - "sort" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/opentracing/opentracing-go" - otlog "github.com/opentracing/opentracing-go/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/timestamp" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" - "github.com/uber/jaeger-client-go" - "github.com/weaveworks/common/httpgrpc" - - "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - // Value that cacheControlHeader has if the response indicates that the results should not be cached. - noStoreValue = "no-store" - - // ResultsCacheGenNumberHeaderName holds name of the header we want to set in http response - ResultsCacheGenNumberHeaderName = "Results-Cache-Gen-Number" -) - -type CacheGenNumberLoader interface { - GetResultsCacheGenNumber(tenantIDs []string) string -} - -// ResultsCacheConfig is the config for the results cache. -type ResultsCacheConfig struct { - CacheConfig cache.Config `yaml:"cache"` - Compression string `yaml:"compression"` - CacheQueryableSamplesStats bool `yaml:"cache_queryable_samples_stats"` -} - -// RegisterFlags registers flags. -func (cfg *ResultsCacheConfig) RegisterFlags(f *flag.FlagSet) { - cfg.CacheConfig.RegisterFlagsWithPrefix("frontend.", "", f) - - f.StringVar(&cfg.Compression, "frontend.compression", "", "Use compression in results cache. Supported values are: 'snappy' and '' (disable compression).") - f.BoolVar(&cfg.CacheQueryableSamplesStats, "frontend.cache-queryable-samples-stats", false, "Cache Statistics queryable samples on results cache.") - //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods - flagext.DeprecatedFlag(f, "frontend.cache-split-interval", "Deprecated: The maximum interval expected for each request, results will be cached per single interval. This behavior is now determined by querier.split-queries-by-interval.", util_log.Logger) -} - -func (cfg *ResultsCacheConfig) Validate(qCfg querier.Config) error { - switch cfg.Compression { - case "snappy", "": - // valid - default: - return errors.Errorf("unsupported compression type: %s", cfg.Compression) - } - - if cfg.CacheQueryableSamplesStats && !qCfg.EnablePerStepStats { - return errors.New("frontend.cache-queryable-samples-stats may only be enabled in conjunction with querier.per-step-stats-enabled. Please set the latter") - } - - return cfg.CacheConfig.Validate() -} - -// Extractor is used by the cache to extract a subset of a response from a cache entry. -type Extractor interface { - // Extract extracts a subset of a response from the `start` and `end` timestamps in milliseconds in the `from` response. - Extract(start, end int64, from Response) Response - ResponseWithoutHeaders(resp Response) Response - ResponseWithoutStats(resp Response) Response -} - -// PrometheusResponseExtractor helps extracting specific info from Query Response. -type PrometheusResponseExtractor struct{} - -// Extract extracts response for specific a range from a response. -func (PrometheusResponseExtractor) Extract(start, end int64, from Response) Response { - promRes := from.(*PrometheusResponse) - return &PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: promRes.Data.ResultType, - Result: extractMatrix(start, end, promRes.Data.Result), - Stats: extractStats(start, end, promRes.Data.Stats), - }, - Headers: promRes.Headers, - } -} - -// ResponseWithoutHeaders is useful in caching data without headers since -// we anyways do not need headers for sending back the response so this saves some space by reducing size of the objects. -func (PrometheusResponseExtractor) ResponseWithoutHeaders(resp Response) Response { - promRes := resp.(*PrometheusResponse) - return &PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: promRes.Data.ResultType, - Result: promRes.Data.Result, - Stats: promRes.Data.Stats, - }, - } -} - -// ResponseWithoutStats is returns the response without the stats information -func (PrometheusResponseExtractor) ResponseWithoutStats(resp Response) Response { - promRes := resp.(*PrometheusResponse) - return &PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: promRes.Data.ResultType, - Result: promRes.Data.Result, - }, - Headers: promRes.Headers, - } -} - -// CacheSplitter generates cache keys. This is a useful interface for downstream -// consumers who wish to implement their own strategies. -type CacheSplitter interface { - GenerateCacheKey(userID string, r Request) string -} - -// constSplitter is a utility for using a constant split interval when determining cache keys -type constSplitter time.Duration - -// GenerateCacheKey generates a cache key based on the userID, Request and interval. -func (t constSplitter) GenerateCacheKey(userID string, r Request) string { - currentInterval := r.GetStart() / int64(time.Duration(t)/time.Millisecond) - return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval) -} - -// ShouldCacheFn checks whether the current request should go to cache -// or not. If not, just send the request to next handler. -type ShouldCacheFn func(r Request) bool - -type resultsCache struct { - logger log.Logger - cfg ResultsCacheConfig - next Handler - cache cache.Cache - limits Limits - splitter CacheSplitter - - extractor Extractor - minCacheExtent int64 // discard any cache extent smaller than this - merger Merger - cacheGenNumberLoader CacheGenNumberLoader - shouldCache ShouldCacheFn - cacheQueryableSamplesStats bool -} - -// NewResultsCacheMiddleware creates results cache middleware from config. -// The middleware cache result using a unique cache key for a given request (step,query,user) and interval. -// The cache assumes that each request length (end-start) is below or equal the interval. -// Each request starting from within the same interval will hit the same cache entry. -// If the cache doesn't have the entire duration of the request cached, it will query the uncached parts and append them to the cache entries. -// see `generateKey`. -func NewResultsCacheMiddleware( - logger log.Logger, - cfg ResultsCacheConfig, - splitter CacheSplitter, - limits Limits, - merger Merger, - extractor Extractor, - cacheGenNumberLoader CacheGenNumberLoader, - shouldCache ShouldCacheFn, - reg prometheus.Registerer, -) (Middleware, cache.Cache, error) { - c, err := cache.New(cfg.CacheConfig, reg, logger) - if err != nil { - return nil, nil, err - } - if cfg.Compression == "snappy" { - c = cache.NewSnappy(c, logger) - } - - if cacheGenNumberLoader != nil { - c = cache.NewCacheGenNumMiddleware(c) - } - - return MiddlewareFunc(func(next Handler) Handler { - return &resultsCache{ - logger: logger, - cfg: cfg, - next: next, - cache: c, - limits: limits, - merger: merger, - extractor: extractor, - minCacheExtent: (5 * time.Minute).Milliseconds(), - splitter: splitter, - cacheGenNumberLoader: cacheGenNumberLoader, - shouldCache: shouldCache, - cacheQueryableSamplesStats: cfg.CacheQueryableSamplesStats, - } - }), c, nil -} - -func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - respWithStats := r.GetStats() != "" && s.cacheQueryableSamplesStats - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - // If cache_queryable_samples_stats is enabled we always need request the status upstream - if s.cacheQueryableSamplesStats { - r = r.WithStats("all") - } else { - r = r.WithStats("") - } - - if s.shouldCache != nil && !s.shouldCache(r) { - return s.next.Do(ctx, r) - } - - if s.cacheGenNumberLoader != nil { - ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs)) - } - - var ( - key = s.splitter.GenerateCacheKey(tenant.JoinTenantIDs(tenantIDs), r) - extents []Extent - response Response - ) - - maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, s.limits.MaxCacheFreshness) - maxCacheTime := int64(model.Now().Add(-maxCacheFreshness)) - if r.GetStart() > maxCacheTime { - return s.next.Do(ctx, r) - } - - cached, ok := s.get(ctx, key) - if ok { - response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime) - } else { - response, extents, err = s.handleMiss(ctx, r, maxCacheTime) - } - - if err == nil && len(extents) > 0 { - extents, err := s.filterRecentExtents(r, maxCacheFreshness, extents) - if err != nil { - return nil, err - } - s.put(ctx, key, extents) - } - - if err == nil && !respWithStats { - response = s.extractor.ResponseWithoutStats(response) - } - return response, err -} - -// shouldCacheResponse says whether the response should be cached or not. -func (s resultsCache) shouldCacheResponse(ctx context.Context, req Request, r Response, maxCacheTime int64) bool { - headerValues := getHeaderValuesWithName(r, cacheControlHeader) - for _, v := range headerValues { - if v == noStoreValue { - level.Debug(s.logger).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cacheControlHeader, noStoreValue)) - return false - } - } - - if !s.isAtModifierCachable(req, maxCacheTime) { - return false - } - - if s.cacheGenNumberLoader == nil { - return true - } - - genNumbersFromResp := getHeaderValuesWithName(r, ResultsCacheGenNumberHeaderName) - genNumberFromCtx := cache.ExtractCacheGenNumber(ctx) - - if len(genNumbersFromResp) == 0 && genNumberFromCtx != "" { - level.Debug(s.logger).Log("msg", fmt.Sprintf("we found results cache gen number %s set in store but none in headers", genNumberFromCtx)) - return false - } - - for _, gen := range genNumbersFromResp { - if gen != genNumberFromCtx { - level.Debug(s.logger).Log("msg", fmt.Sprintf("inconsistency in results cache gen numbers %s (GEN-FROM-RESPONSE) != %s (GEN-FROM-STORE), not caching the response", gen, genNumberFromCtx)) - return false - } - } - - return true -} - -var errAtModifierAfterEnd = errors.New("at modifier after end") - -// isAtModifierCachable returns true if the @ modifier result -// is safe to cache. -func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool { - // There are 2 cases when @ modifier is not safe to cache: - // 1. When @ modifier points to time beyond the maxCacheTime. - // 2. If the @ modifier time is > the query range end while being - // below maxCacheTime. In such cases if any tenant is intentionally - // playing with old data, we could cache empty result if we look - // beyond query end. - query := r.GetQuery() - if !strings.Contains(query, "@") { - return true - } - expr, err := parser.ParseExpr(query) - if err != nil { - // We are being pessimistic in such cases. - level.Warn(s.logger).Log("msg", "failed to parse query, considering @ modifier as not cachable", "query", query, "err", err) - return false - } - - // This resolves the start() and end() used with the @ modifier. - expr = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd())) - - end := r.GetEnd() - atModCachable := true - parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error { - switch e := n.(type) { - case *parser.VectorSelector: - if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) { - atModCachable = false - return errAtModifierAfterEnd - } - case *parser.MatrixSelector: - ts := e.VectorSelector.(*parser.VectorSelector).Timestamp - if ts != nil && (*ts > end || *ts > maxCacheTime) { - atModCachable = false - return errAtModifierAfterEnd - } - case *parser.SubqueryExpr: - if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) { - atModCachable = false - return errAtModifierAfterEnd - } - } - return nil - }) - - return atModCachable -} - -func getHeaderValuesWithName(r Response, headerName string) (headerValues []string) { - for _, hv := range r.GetHeaders() { - if hv.GetName() != headerName { - continue - } - - headerValues = append(headerValues, hv.GetValues()...) - } - - return -} - -func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) { - response, err := s.next.Do(ctx, r) - if err != nil { - return nil, nil, err - } - - if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) { - return response, []Extent{}, nil - } - - extent, err := toExtent(ctx, r, s.extractor.ResponseWithoutHeaders(response)) - if err != nil { - return nil, nil, err - } - - extents := []Extent{ - extent, - } - return response, extents, nil -} - -func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) { - var ( - reqResps []RequestResponse - err error - ) - log, ctx := spanlogger.New(ctx, "handleHit") - defer log.Finish() - - requests, responses, err := s.partition(r, extents) - if err != nil { - return nil, nil, err - } - if len(requests) == 0 { - response, err := s.merger.MergeResponse(responses...) - // No downstream requests so no need to write back to the cache. - return response, nil, err - } - - reqResps, err = DoRequests(ctx, s.next, requests, s.limits) - if err != nil { - return nil, nil, err - } - - for _, reqResp := range reqResps { - responses = append(responses, reqResp.Response) - if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) { - continue - } - extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response)) - if err != nil { - return nil, nil, err - } - extents = append(extents, extent) - } - sort.Slice(extents, func(i, j int) bool { - if extents[i].Start == extents[j].Start { - // as an optimization, for two extents starts at the same time, we - // put bigger extent at the front of the slice, which helps - // to reduce the amount of merge we have to do later. - return extents[i].End > extents[j].End - } - - return extents[i].Start < extents[j].Start - }) - - // Merge any extents - potentially overlapping - accumulator, err := newAccumulator(extents[0]) - if err != nil { - return nil, nil, err - } - mergedExtents := make([]Extent, 0, len(extents)) - - for i := 1; i < len(extents); i++ { - if accumulator.End+r.GetStep() < extents[i].Start { - mergedExtents, err = merge(mergedExtents, accumulator) - if err != nil { - return nil, nil, err - } - accumulator, err = newAccumulator(extents[i]) - if err != nil { - return nil, nil, err - } - continue - } - - if accumulator.End >= extents[i].End { - continue - } - - accumulator.TraceId = jaegerTraceID(ctx) - accumulator.End = extents[i].End - currentRes, err := extents[i].toResponse() - if err != nil { - return nil, nil, err - } - merged, err := s.merger.MergeResponse(accumulator.Response, currentRes) - if err != nil { - return nil, nil, err - } - accumulator.Response = merged - } - - mergedExtents, err = merge(mergedExtents, accumulator) - if err != nil { - return nil, nil, err - } - - response, err := s.merger.MergeResponse(responses...) - return response, mergedExtents, err -} - -type accumulator struct { - Response - Extent -} - -func merge(extents []Extent, acc *accumulator) ([]Extent, error) { - any, err := types.MarshalAny(acc.Response) - if err != nil { - return nil, err - } - return append(extents, Extent{ - Start: acc.Extent.Start, - End: acc.Extent.End, - Response: any, - TraceId: acc.Extent.TraceId, - }), nil -} - -func newAccumulator(base Extent) (*accumulator, error) { - res, err := base.toResponse() - if err != nil { - return nil, err - } - return &accumulator{ - Response: res, - Extent: base, - }, nil -} - -func toExtent(ctx context.Context, req Request, res Response) (Extent, error) { - any, err := types.MarshalAny(res) - if err != nil { - return Extent{}, err - } - return Extent{ - Start: req.GetStart(), - End: req.GetEnd(), - Response: any, - TraceId: jaegerTraceID(ctx), - }, nil -} - -// partition calculates the required requests to satisfy req given the cached data. -// extents must be in order by start time. -func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) { - var requests []Request - var cachedResponses []Response - start := req.GetStart() - - for _, extent := range extents { - // If there is no overlap, ignore this extent. - if extent.GetEnd() < start || extent.Start > req.GetEnd() { - continue - } - - // If this extent is tiny and request is not tiny, discard it: more efficient to do a few larger queries. - // Hopefully tiny request can make tiny extent into not-so-tiny extent. - - // However if the step is large enough, the split_query_by_interval middleware would generate a query with same start and end. - // For example, if the step size is more than 12h and the interval is 24h. - // This means the extent's start and end time would be same, even if the timerange covers several hours. - if (req.GetStart() != req.GetEnd()) && (req.GetEnd()-req.GetStart() > s.minCacheExtent) && (extent.End-extent.Start < s.minCacheExtent) { - continue - } - - // If there is a bit missing at the front, make a request for that. - if start < extent.Start { - r := req.WithStartEnd(start, extent.Start) - requests = append(requests, r) - } - res, err := extent.toResponse() - if err != nil { - return nil, nil, err - } - // extract the overlap from the cached extent. - cachedResponses = append(cachedResponses, s.extractor.Extract(start, req.GetEnd(), res)) - start = extent.End - } - - // Lastly, make a request for any data missing at the end. - if start < req.GetEnd() { - r := req.WithStartEnd(start, req.GetEnd()) - requests = append(requests, r) - } - - // If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query. - // But we should only do the request if we don't have a valid cached response for it. - if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 { - requests = append(requests, req) - } - - return requests, cachedResponses, nil -} - -func (s resultsCache) filterRecentExtents(req Request, maxCacheFreshness time.Duration, extents []Extent) ([]Extent, error) { - maxCacheTime := (int64(model.Now().Add(-maxCacheFreshness)) / req.GetStep()) * req.GetStep() - for i := range extents { - // Never cache data for the latest freshness period. - if extents[i].End > maxCacheTime { - extents[i].End = maxCacheTime - res, err := extents[i].toResponse() - if err != nil { - return nil, err - } - extracted := s.extractor.Extract(extents[i].Start, maxCacheTime, res) - any, err := types.MarshalAny(extracted) - if err != nil { - return nil, err - } - extents[i].Response = any - } - } - return extents, nil -} - -func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) { - found, bufs, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)}) - if len(found) != 1 { - return nil, false - } - - var resp CachedResponse - log, ctx := spanlogger.New(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck - defer log.Finish() - - log.LogFields(otlog.Int("bytes", len(bufs[0]))) - - if err := proto.Unmarshal(bufs[0], &resp); err != nil { - level.Error(log).Log("msg", "error unmarshalling cached value", "err", err) - log.Error(err) - return nil, false - } - - if resp.Key != key { - return nil, false - } - - // Refreshes the cache if it contains an old proto schema. - for _, e := range resp.Extents { - if e.Response == nil { - return nil, false - } - } - - return resp.Extents, true -} - -func (s resultsCache) put(ctx context.Context, key string, extents []Extent) { - buf, err := proto.Marshal(&CachedResponse{ - Key: key, - Extents: extents, - }) - if err != nil { - level.Error(s.logger).Log("msg", "error marshalling cached value", "err", err) - return - } - - s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf}) -} - -func jaegerTraceID(ctx context.Context) string { - span := opentracing.SpanFromContext(ctx) - if span == nil { - return "" - } - - spanContext, ok := span.Context().(jaeger.SpanContext) - if !ok { - return "" - } - - return spanContext.TraceID().String() -} - -// extractStats returns the stats for a given time range -// this function is similar to extractSampleStream -func extractStats(start, end int64, stats *PrometheusResponseStats) *PrometheusResponseStats { - if stats == nil || stats.Samples == nil { - return stats - } - - result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}} - for _, s := range stats.Samples.TotalQueryableSamplesPerStep { - if start <= s.TimestampMs && s.TimestampMs <= end { - result.Samples.TotalQueryableSamplesPerStep = append(result.Samples.TotalQueryableSamplesPerStep, s) - result.Samples.TotalQueryableSamples += s.Value - } - } - return result -} - -func extractMatrix(start, end int64, matrix []SampleStream) []SampleStream { - result := make([]SampleStream, 0, len(matrix)) - for _, stream := range matrix { - extracted, ok := extractSampleStream(start, end, stream) - if ok { - result = append(result, extracted) - } - } - return result -} - -func extractSampleStream(start, end int64, stream SampleStream) (SampleStream, bool) { - result := SampleStream{ - Labels: stream.Labels, - Samples: make([]cortexpb.Sample, 0, len(stream.Samples)), - } - for _, sample := range stream.Samples { - if start <= sample.TimestampMs && sample.TimestampMs <= end { - result.Samples = append(result.Samples, sample) - } - } - if len(result.Samples) == 0 { - return SampleStream{}, false - } - return result, true -} - -func (e *Extent) toResponse() (Response, error) { - msg, err := types.EmptyAny(e.Response) - if err != nil { - return nil, err - } - - if err := types.UnmarshalAny(e.Response, msg); err != nil { - return nil, err - } - - resp, ok := msg.(Response) - if !ok { - return nil, fmt.Errorf("bad cached type") - } - return resp, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go deleted file mode 100644 index 72c97b3d1..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go +++ /dev/null @@ -1,85 +0,0 @@ -package queryrange - -import ( - "context" - "errors" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -type RetryMiddlewareMetrics struct { - retriesCount prometheus.Histogram -} - -func NewRetryMiddlewareMetrics(registerer prometheus.Registerer) *RetryMiddlewareMetrics { - return &RetryMiddlewareMetrics{ - retriesCount: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "query_frontend_retries", - Help: "Number of times a request is retried.", - Buckets: []float64{0, 1, 2, 3, 4, 5}, - }), - } -} - -type retry struct { - log log.Logger - next Handler - maxRetries int - - metrics *RetryMiddlewareMetrics -} - -// NewRetryMiddleware returns a middleware that retries requests if they -// fail with 500 or a non-HTTP error. -func NewRetryMiddleware(log log.Logger, maxRetries int, metrics *RetryMiddlewareMetrics) Middleware { - if metrics == nil { - metrics = NewRetryMiddlewareMetrics(nil) - } - - return MiddlewareFunc(func(next Handler) Handler { - return retry{ - log: log, - next: next, - maxRetries: maxRetries, - metrics: metrics, - } - }) -} - -func (r retry) Do(ctx context.Context, req Request) (Response, error) { - tries := 0 - defer func() { r.metrics.retriesCount.Observe(float64(tries)) }() - - var lastErr error - for ; tries < r.maxRetries; tries++ { - if ctx.Err() != nil { - return nil, ctx.Err() - } - resp, err := r.next.Do(ctx, req) - if err == nil { - return resp, nil - } - - if errors.Is(err, context.Canceled) { - return nil, err - } - - // Retry if we get a HTTP 500 or a non-HTTP error. - httpResp, ok := httpgrpc.HTTPResponseFromError(err) - if !ok || httpResp.Code/100 == 5 { - lastErr = err - level.Error(util_log.WithContext(ctx, r.log)).Log("msg", "error processing request", "try", tries, "err", err) - continue - } - - return nil, err - } - return nil, lastErr -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go deleted file mode 100644 index f6b31c4f2..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Mostly lifted from prometheus/web/api/v1/api.go. - -package queryrange - -import ( - "context" - "flag" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/promql" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/flagext" -) - -const day = 24 * time.Hour - -var ( - // PassthroughMiddleware is a noop middleware - PassthroughMiddleware = MiddlewareFunc(func(next Handler) Handler { - return next - }) - - errInvalidMinShardingLookback = errors.New("a non-zero value is required for querier.query-ingesters-within when -querier.parallelise-shardable-queries is enabled") -) - -// Config for query_range middleware chain. -type Config struct { - SplitQueriesByInterval time.Duration `yaml:"split_queries_by_interval"` - AlignQueriesWithStep bool `yaml:"align_queries_with_step"` - ResultsCacheConfig `yaml:"results_cache"` - CacheResults bool `yaml:"cache_results"` - MaxRetries int `yaml:"max_retries"` - ShardedQueries bool `yaml:"parallelise_shardable_queries"` - // List of headers which query_range middleware chain would forward to downstream querier. - ForwardHeaders flagext.StringSlice `yaml:"forward_headers_list"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxRetries, "querier.max-retries-per-request", 5, "Maximum number of retries for a single request; beyond this, the downstream error is returned.") - f.DurationVar(&cfg.SplitQueriesByInterval, "querier.split-queries-by-interval", 0, "Split queries by an interval and execute in parallel, 0 disables it. You should use an a multiple of 24 hours (same as the storage bucketing scheme), to avoid queriers downloading and processing the same chunks. This also determines how cache keys are chosen when result caching is enabled") - f.BoolVar(&cfg.AlignQueriesWithStep, "querier.align-querier-with-step", false, "Mutate incoming queries to align their start and end with their step.") - f.BoolVar(&cfg.CacheResults, "querier.cache-results", false, "Cache query results.") - f.BoolVar(&cfg.ShardedQueries, "querier.parallelise-shardable-queries", false, "Perform query parallelisations based on storage sharding configuration and query ASTs. This feature is supported only by the chunks storage engine.") - f.Var(&cfg.ForwardHeaders, "frontend.forward-headers-list", "List of headers forwarded by the query Frontend to downstream querier.") - cfg.ResultsCacheConfig.RegisterFlags(f) -} - -// Validate validates the config. -func (cfg *Config) Validate(qCfg querier.Config) error { - if cfg.CacheResults { - if cfg.SplitQueriesByInterval <= 0 { - return errors.New("querier.cache-results may only be enabled in conjunction with querier.split-queries-by-interval. Please set the latter") - } - if err := cfg.ResultsCacheConfig.Validate(qCfg); err != nil { - return errors.Wrap(err, "invalid ResultsCache config") - } - } - return nil -} - -// HandlerFunc is like http.HandlerFunc, but for Handler. -type HandlerFunc func(context.Context, Request) (Response, error) - -// Do implements Handler. -func (q HandlerFunc) Do(ctx context.Context, req Request) (Response, error) { - return q(ctx, req) -} - -// Handler is like http.Handle, but specifically for Prometheus query_range calls. -type Handler interface { - Do(context.Context, Request) (Response, error) -} - -// MiddlewareFunc is like http.HandlerFunc, but for Middleware. -type MiddlewareFunc func(Handler) Handler - -// Wrap implements Middleware. -func (q MiddlewareFunc) Wrap(h Handler) Handler { - return q(h) -} - -// Middleware is a higher order Handler. -type Middleware interface { - Wrap(Handler) Handler -} - -// MergeMiddlewares produces a middleware that applies multiple middleware in turn; -// ie Merge(f,g,h).Wrap(handler) == f.Wrap(g.Wrap(h.Wrap(handler))) -func MergeMiddlewares(middleware ...Middleware) Middleware { - return MiddlewareFunc(func(next Handler) Handler { - for i := len(middleware) - 1; i >= 0; i-- { - next = middleware[i].Wrap(next) - } - return next - }) -} - -// Tripperware is a signature for all http client-side middleware. -type Tripperware func(http.RoundTripper) http.RoundTripper - -// RoundTripFunc is to http.RoundTripper what http.HandlerFunc is to http.Handler. -type RoundTripFunc func(*http.Request) (*http.Response, error) - -// RoundTrip implements http.RoundTripper. -func (f RoundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return f(r) -} - -// NewTripperware returns a Tripperware configured with middlewares to limit, align, split, retry and cache requests. -func NewTripperware( - cfg Config, - log log.Logger, - limits Limits, - codec Codec, - cacheExtractor Extractor, - schema chunk.SchemaConfig, - engineOpts promql.EngineOpts, - minShardingLookback time.Duration, - registerer prometheus.Registerer, - cacheGenNumberLoader CacheGenNumberLoader, -) (Tripperware, cache.Cache, error) { - // Per tenant query metrics. - queriesPerTenant := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_query_frontend_queries_total", - Help: "Total queries sent per tenant.", - }, []string{"op", "user"}) - - activeUsers := util.NewActiveUsersCleanupWithDefaultValues(func(user string) { - err := util.DeleteMatchingLabels(queriesPerTenant, map[string]string{"user": user}) - if err != nil { - level.Warn(log).Log("msg", "failed to remove cortex_query_frontend_queries_total metric for user", "user", user) - } - }) - - // Metric used to keep track of each middleware execution duration. - metrics := NewInstrumentMiddlewareMetrics(registerer) - - queryRangeMiddleware := []Middleware{NewLimitsMiddleware(limits)} - if cfg.AlignQueriesWithStep { - queryRangeMiddleware = append(queryRangeMiddleware, InstrumentMiddleware("step_align", metrics), StepAlignMiddleware) - } - if cfg.SplitQueriesByInterval != 0 { - staticIntervalFn := func(_ Request) time.Duration { return cfg.SplitQueriesByInterval } - queryRangeMiddleware = append(queryRangeMiddleware, InstrumentMiddleware("split_by_interval", metrics), SplitByIntervalMiddleware(staticIntervalFn, limits, codec, registerer)) - } - - var c cache.Cache - if cfg.CacheResults { - shouldCache := func(r Request) bool { - return !r.GetCachingOptions().Disabled - } - queryCacheMiddleware, cache, err := NewResultsCacheMiddleware(log, cfg.ResultsCacheConfig, constSplitter(cfg.SplitQueriesByInterval), limits, codec, cacheExtractor, cacheGenNumberLoader, shouldCache, registerer) - if err != nil { - return nil, nil, err - } - c = cache - queryRangeMiddleware = append(queryRangeMiddleware, InstrumentMiddleware("results_cache", metrics), queryCacheMiddleware) - } - - if cfg.ShardedQueries { - if minShardingLookback == 0 { - return nil, nil, errInvalidMinShardingLookback - } - - shardingware := NewQueryShardMiddleware( - log, - promql.NewEngine(engineOpts), - schema.Configs, - codec, - minShardingLookback, - metrics, - registerer, - ) - - queryRangeMiddleware = append( - queryRangeMiddleware, - shardingware, // instrumentation is included in the sharding middleware - ) - } - - if cfg.MaxRetries > 0 { - queryRangeMiddleware = append(queryRangeMiddleware, InstrumentMiddleware("retry", metrics), NewRetryMiddleware(log, cfg.MaxRetries, NewRetryMiddlewareMetrics(registerer))) - } - - // Start cleanup. If cleaner stops or fail, we will simply not clean the metrics for inactive users. - _ = activeUsers.StartAsync(context.Background()) - return func(next http.RoundTripper) http.RoundTripper { - // Finally, if the user selected any query range middleware, stitch it in. - if len(queryRangeMiddleware) > 0 { - queryrange := NewRoundTripper(next, codec, cfg.ForwardHeaders, queryRangeMiddleware...) - return RoundTripFunc(func(r *http.Request) (*http.Response, error) { - isQueryRange := strings.HasSuffix(r.URL.Path, "/query_range") - op := "query" - if isQueryRange { - op = "query_range" - } - - tenantIDs, err := tenant.TenantIDs(r.Context()) - // This should never happen anyways because we have auth middleware before this. - if err != nil { - return nil, err - } - userStr := tenant.JoinTenantIDs(tenantIDs) - activeUsers.UpdateUserTimestamp(userStr, time.Now()) - queriesPerTenant.WithLabelValues(op, userStr).Inc() - - if !isQueryRange { - return next.RoundTrip(r) - } - return queryrange.RoundTrip(r) - }) - } - return next - }, c, nil -} - -type roundTripper struct { - next http.RoundTripper - handler Handler - codec Codec - headers []string -} - -// NewRoundTripper merges a set of middlewares into an handler, then inject it into the `next` roundtripper -// using the codec to translate requests and responses. -func NewRoundTripper(next http.RoundTripper, codec Codec, headers []string, middlewares ...Middleware) http.RoundTripper { - transport := roundTripper{ - next: next, - codec: codec, - headers: headers, - } - transport.handler = MergeMiddlewares(middlewares...).Wrap(&transport) - return transport -} - -func (q roundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - - // include the headers specified in the roundTripper during decoding the request. - request, err := q.codec.DecodeRequest(r.Context(), r, q.headers) - if err != nil { - return nil, err - } - - if span := opentracing.SpanFromContext(r.Context()); span != nil { - request.LogToSpan(span) - } - - response, err := q.handler.Do(r.Context(), request) - if err != nil { - return nil, err - } - - return q.codec.EncodeResponse(r.Context(), response) -} - -// Do implements Handler. -func (q roundTripper) Do(ctx context.Context, r Request) (Response, error) { - request, err := q.codec.EncodeRequest(ctx, r) - if err != nil { - return nil, err - } - - if err := user.InjectOrgIDIntoHTTPRequest(ctx, request); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - response, err := q.next.RoundTrip(request) - if err != nil { - return nil, err - } - defer func() { - io.Copy(ioutil.Discard, io.LimitReader(response.Body, 1024)) - _ = response.Body.Close() - }() - - return q.codec.DecodeResponse(ctx, response, r) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/split_by_interval.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/split_by_interval.go deleted file mode 100644 index 3d52fc4cb..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/split_by_interval.go +++ /dev/null @@ -1,121 +0,0 @@ -package queryrange - -import ( - "context" - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/promql/parser" - "github.com/weaveworks/common/httpgrpc" -) - -type IntervalFn func(r Request) time.Duration - -// SplitByIntervalMiddleware creates a new Middleware that splits requests by a given interval. -func SplitByIntervalMiddleware(interval IntervalFn, limits Limits, merger Merger, registerer prometheus.Registerer) Middleware { - return MiddlewareFunc(func(next Handler) Handler { - return splitByInterval{ - next: next, - limits: limits, - merger: merger, - interval: interval, - splitByCounter: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "frontend_split_queries_total", - Help: "Total number of underlying query requests after the split by interval is applied", - }), - } - }) -} - -type splitByInterval struct { - next Handler - limits Limits - merger Merger - interval IntervalFn - - // Metrics. - splitByCounter prometheus.Counter -} - -func (s splitByInterval) Do(ctx context.Context, r Request) (Response, error) { - // First we're going to build new requests, one for each day, taking care - // to line up the boundaries with step. - reqs, err := splitQuery(r, s.interval(r)) - if err != nil { - return nil, err - } - s.splitByCounter.Add(float64(len(reqs))) - - reqResps, err := DoRequests(ctx, s.next, reqs, s.limits) - if err != nil { - return nil, err - } - - resps := make([]Response, 0, len(reqResps)) - for _, reqResp := range reqResps { - resps = append(resps, reqResp.Response) - } - - response, err := s.merger.MergeResponse(resps...) - if err != nil { - return nil, err - } - return response, nil -} - -func splitQuery(r Request, interval time.Duration) ([]Request, error) { - // Replace @ modifier function to their respective constant values in the query. - // This way subqueries will be evaluated at the same time as the parent query. - query, err := evaluateAtModifierFunction(r.GetQuery(), r.GetStart(), r.GetEnd()) - if err != nil { - return nil, err - } - var reqs []Request - for start := r.GetStart(); start < r.GetEnd(); start = nextIntervalBoundary(start, r.GetStep(), interval) + r.GetStep() { - end := nextIntervalBoundary(start, r.GetStep(), interval) - if end+r.GetStep() >= r.GetEnd() { - end = r.GetEnd() - } - - reqs = append(reqs, r.WithQuery(query).WithStartEnd(start, end)) - } - return reqs, nil -} - -// evaluateAtModifierFunction parse the query and evaluates the `start()` and `end()` at modifier functions into actual constant timestamps. -// For example given the start of the query is 10.00, `http_requests_total[1h] @ start()` query will be replaced with `http_requests_total[1h] @ 10.00` -// If the modifier is already a constant, it will be returned as is. -func evaluateAtModifierFunction(query string, start, end int64) (string, error) { - expr, err := parser.ParseExpr(query) - if err != nil { - return "", httpgrpc.Errorf(http.StatusBadRequest, "%s", err) - } - parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error { - if selector, ok := n.(*parser.VectorSelector); ok { - switch selector.StartOrEnd { - case parser.START: - selector.Timestamp = &start - case parser.END: - selector.Timestamp = &end - } - selector.StartOrEnd = 0 - } - return nil - }) - return expr.String(), err -} - -// Round up to the step before the next interval boundary. -func nextIntervalBoundary(t, step int64, interval time.Duration) int64 { - msPerInterval := int64(interval / time.Millisecond) - startOfNextInterval := ((t / msPerInterval) + 1) * msPerInterval - // ensure that target is a multiple of steps away from the start time - target := startOfNextInterval - ((startOfNextInterval - t) % step) - if target == startOfNextInterval { - target -= step - } - return target -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/step_align.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/step_align.go deleted file mode 100644 index 19454f7a9..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/step_align.go +++ /dev/null @@ -1,23 +0,0 @@ -package queryrange - -import ( - "context" -) - -// StepAlignMiddleware aligns the start and end of request to the step to -// improved the cacheability of the query results. -var StepAlignMiddleware = MiddlewareFunc(func(next Handler) Handler { - return stepAlign{ - next: next, - } -}) - -type stepAlign struct { - next Handler -} - -func (s stepAlign) Do(ctx context.Context, r Request) (Response, error) { - start := (r.GetStart() / r.GetStep()) * r.GetStep() - end := (r.GetEnd() / r.GetStep()) * r.GetStep() - return s.next.Do(ctx, r.WithStartEnd(start, end)) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/test_utils.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/test_utils.go deleted file mode 100644 index 4b19b39da..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/test_utils.go +++ /dev/null @@ -1,185 +0,0 @@ -package queryrange - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/querier/astmapper" - "github.com/cortexproject/cortex/pkg/querier/series" -) - -// genLabels will create a slice of labels where each label has an equal chance to occupy a value from [0,labelBuckets]. It returns a slice of length labelBuckets^len(labelSet) -func genLabels( - labelSet []string, - labelBuckets int, -) (result []labels.Labels) { - if len(labelSet) == 0 { - return result - } - - l := labelSet[0] - rest := genLabels(labelSet[1:], labelBuckets) - - for i := 0; i < labelBuckets; i++ { - x := labels.Label{ - Name: l, - Value: fmt.Sprintf("%d", i), - } - if len(rest) == 0 { - set := labels.Labels{x} - result = append(result, set) - continue - } - for _, others := range rest { - set := append(others, x) - result = append(result, set) - } - } - return result - -} - -// NewMockShardedQueryable creates a shard-aware in memory queryable. -func NewMockShardedQueryable( - nSamples int, - labelSet []string, - labelBuckets int, - delayPerSeries time.Duration, -) *MockShardedQueryable { - samples := make([]model.SamplePair, 0, nSamples) - for i := 0; i < nSamples; i++ { - samples = append(samples, model.SamplePair{ - Timestamp: model.Time(i * 1000), - Value: model.SampleValue(i), - }) - } - sets := genLabels(labelSet, labelBuckets) - xs := make([]storage.Series, 0, len(sets)) - for _, ls := range sets { - xs = append(xs, series.NewConcreteSeries(ls, samples)) - } - - return &MockShardedQueryable{ - series: xs, - delayPerSeries: delayPerSeries, - } -} - -// MockShardedQueryable is exported to be reused in the querysharding benchmarking -type MockShardedQueryable struct { - series []storage.Series - delayPerSeries time.Duration -} - -// Querier impls storage.Queryable -func (q *MockShardedQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return q, nil -} - -// Select implements storage.Querier interface. -// The bool passed is ignored because the series is always sorted. -func (q *MockShardedQueryable) Select(_ bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - tStart := time.Now() - - shard, _, err := astmapper.ShardFromMatchers(matchers) - if err != nil { - return storage.ErrSeriesSet(err) - } - - var ( - start int - end int - ) - - if shard == nil { - start = 0 - end = len(q.series) - } else { - // return the series range associated with this shard - seriesPerShard := len(q.series) / shard.Of - start = shard.Shard * seriesPerShard - end = start + seriesPerShard - - // if we're clipping an odd # of series, add the final series to the last shard - if end == len(q.series)-1 && len(q.series)%2 == 1 { - end = len(q.series) - } - } - - var name string - for _, m := range matchers { - if m.Type == labels.MatchEqual && m.Name == "__name__" { - name = m.Value - } - } - - results := make([]storage.Series, 0, end-start) - for i := start; i < end; i++ { - results = append(results, &ShardLabelSeries{ - shard: shard, - name: name, - Series: q.series[i], - }) - } - - // loosely enforce the assumption that an operation on 1/nth of the data - // takes 1/nth of the time. - duration := q.delayPerSeries * time.Duration(len(q.series)) - if shard != nil { - duration = duration / time.Duration(shard.Of) - } - - remaining := time.Until(tStart.Add(duration)) - if remaining > 0 { - time.Sleep(remaining) - } - - // sorted - return series.NewConcreteSeriesSet(results) -} - -// ShardLabelSeries allows extending a Series with new labels. This is helpful for adding cortex shard labels -type ShardLabelSeries struct { - shard *astmapper.ShardAnnotation - name string - storage.Series -} - -// Labels impls storage.Series -func (s *ShardLabelSeries) Labels() labels.Labels { - ls := s.Series.Labels() - - if s.name != "" { - ls = append(ls, labels.Label{ - Name: "__name__", - Value: s.name, - }) - } - - if s.shard != nil { - ls = append(ls, s.shard.Label()) - } - - return ls -} - -// LabelValues impls storage.Querier -func (q *MockShardedQueryable) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, errors.Errorf("unimplemented") -} - -// LabelNames returns all the unique label names present in the block in sorted order. -func (q *MockShardedQueryable) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, errors.Errorf("unimplemented") -} - -// Close releases the resources of the Querier. -func (q *MockShardedQueryable) Close() error { - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go deleted file mode 100644 index 2b82e8b3b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go +++ /dev/null @@ -1,72 +0,0 @@ -package queryrange - -import ( - "context" - "net/http" - - "github.com/weaveworks/common/httpgrpc" - - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// RequestResponse contains a request response and the respective request that was used. -type RequestResponse struct { - Request Request - Response Response -} - -// DoRequests executes a list of requests in parallel. The limits parameters is used to limit parallelism per single request. -func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits Limits) ([]RequestResponse, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } - - // If one of the requests fail, we want to be able to cancel the rest of them. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Feed all requests to a bounded intermediate channel to limit parallelism. - intermediate := make(chan Request) - go func() { - for _, req := range reqs { - intermediate <- req - } - close(intermediate) - }() - - respChan, errChan := make(chan RequestResponse), make(chan error) - parallelism := validation.SmallestPositiveIntPerTenant(tenantIDs, limits.MaxQueryParallelism) - if parallelism > len(reqs) { - parallelism = len(reqs) - } - for i := 0; i < parallelism; i++ { - go func() { - for req := range intermediate { - resp, err := downstream.Do(ctx, req) - if err != nil { - errChan <- err - } else { - respChan <- RequestResponse{req, resp} - } - } - }() - } - - resps := make([]RequestResponse, 0, len(reqs)) - var firstErr error - for range reqs { - select { - case resp := <-respChan: - resps = append(resps, resp) - case err := <-errChan: - if firstErr == nil { - cancel() - firstErr = err - } - } - } - - return resps, firstErr -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go deleted file mode 100644 index 435e86932..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go +++ /dev/null @@ -1,125 +0,0 @@ -package queryrange - -import ( - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/querier/series" -) - -// FromResult transforms a promql query result into a samplestream -func FromResult(res *promql.Result) ([]SampleStream, error) { - if res.Err != nil { - // The error could be wrapped by the PromQL engine. We get the error's cause in order to - // correctly parse the error in parent callers (eg. gRPC response status code extraction). - return nil, errors.Cause(res.Err) - } - switch v := res.Value.(type) { - case promql.Scalar: - return []SampleStream{ - { - Samples: []cortexpb.Sample{ - { - Value: v.V, - TimestampMs: v.T, - }, - }, - }, - }, nil - - case promql.Vector: - res := make([]SampleStream, 0, len(v)) - for _, sample := range v { - res = append(res, SampleStream{ - Labels: mapLabels(sample.Metric), - Samples: mapPoints(sample.Point), - }) - } - return res, nil - - case promql.Matrix: - res := make([]SampleStream, 0, len(v)) - for _, series := range v { - res = append(res, SampleStream{ - Labels: mapLabels(series.Metric), - Samples: mapPoints(series.Points...), - }) - } - return res, nil - - } - - return nil, errors.Errorf("Unexpected value type: [%s]", res.Value.Type()) -} - -func mapLabels(ls labels.Labels) []cortexpb.LabelAdapter { - result := make([]cortexpb.LabelAdapter, 0, len(ls)) - for _, l := range ls { - result = append(result, cortexpb.LabelAdapter(l)) - } - - return result -} - -func mapPoints(pts ...promql.Point) []cortexpb.Sample { - result := make([]cortexpb.Sample, 0, len(pts)) - - for _, pt := range pts { - result = append(result, cortexpb.Sample{ - Value: pt.V, - TimestampMs: pt.T, - }) - } - - return result -} - -// ResponseToSamples is needed to map back from api response to the underlying series data -func ResponseToSamples(resp Response) ([]SampleStream, error) { - promRes, ok := resp.(*PrometheusResponse) - if !ok { - return nil, errors.Errorf("error invalid response type: %T, expected: %T", resp, &PrometheusResponse{}) - } - if promRes.Error != "" { - return nil, errors.New(promRes.Error) - } - switch promRes.Data.ResultType { - case string(parser.ValueTypeVector), string(parser.ValueTypeMatrix): - return promRes.Data.Result, nil - } - - return nil, errors.Errorf( - "Invalid promql.Value type: [%s]. Only %s and %s supported", - promRes.Data.ResultType, - parser.ValueTypeVector, - parser.ValueTypeMatrix, - ) -} - -// NewSeriesSet returns an in memory storage.SeriesSet from a []SampleStream -// As NewSeriesSet uses NewConcreteSeriesSet to implement SeriesSet, result will be sorted by label names. -func NewSeriesSet(results []SampleStream) storage.SeriesSet { - set := make([]storage.Series, 0, len(results)) - - for _, stream := range results { - samples := make([]model.SamplePair, 0, len(stream.Samples)) - for _, sample := range stream.Samples { - samples = append(samples, model.SamplePair{ - Timestamp: model.Time(sample.TimestampMs), - Value: model.SampleValue(sample.Value), - }) - } - - ls := make([]labels.Label, 0, len(stream.Labels)) - for _, l := range stream.Labels { - ls = append(ls, labels.Label(l)) - } - set = append(set, series.NewConcreteSeries(ls, samples)) - } - return series.NewConcreteSeriesSet(set) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go b/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go deleted file mode 100644 index 65dbc96e9..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go +++ /dev/null @@ -1,102 +0,0 @@ -package querier - -import ( - "net/http" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// Queries are a set of matchers with time ranges - should not get into megabytes -const maxRemoteReadQuerySize = 1024 * 1024 - -// RemoteReadHandler handles Prometheus remote read requests. -func RemoteReadHandler(q storage.Queryable, logger log.Logger) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var req client.ReadRequest - logger := util_log.WithContext(r.Context(), logger) - if err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRemoteReadQuerySize, &req, util.RawSnappy); err != nil { - level.Error(logger).Log("msg", "failed to parse proto", "err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Fetch samples for all queries in parallel. - resp := client.ReadResponse{ - Results: make([]*client.QueryResponse, len(req.Queries)), - } - errors := make(chan error) - for i, qr := range req.Queries { - go func(i int, qr *client.QueryRequest) { - from, to, matchers, err := client.FromQueryRequest(qr) - if err != nil { - errors <- err - return - } - - querier, err := q.Querier(ctx, int64(from), int64(to)) - if err != nil { - errors <- err - return - } - - params := &storage.SelectHints{ - Start: int64(from), - End: int64(to), - } - seriesSet := querier.Select(false, params, matchers...) - resp.Results[i], err = seriesSetToQueryResponse(seriesSet) - errors <- err - }(i, qr) - } - - var lastErr error - for range req.Queries { - err := <-errors - if err != nil { - lastErr = err - } - } - if lastErr != nil { - http.Error(w, lastErr.Error(), http.StatusBadRequest) - return - } - w.Header().Add("Content-Type", "application/x-protobuf") - if err := util.SerializeProtoResponse(w, &resp, util.RawSnappy); err != nil { - level.Error(logger).Log("msg", "error sending remote read response", "err", err) - } - }) -} - -func seriesSetToQueryResponse(s storage.SeriesSet) (*client.QueryResponse, error) { - result := &client.QueryResponse{} - - for s.Next() { - series := s.At() - samples := []cortexpb.Sample{} - it := series.Iterator() - for it.Next() { - t, v := it.At() - samples = append(samples, cortexpb.Sample{ - TimestampMs: t, - Value: v, - }) - } - if err := it.Err(); err != nil { - return nil, err - } - result.Timeseries = append(result.Timeseries, cortexpb.TimeSeries{ - Labels: cortexpb.FromLabelsToLabelAdapters(series.Labels()), - Samples: samples, - }) - } - - return result, s.Err() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go deleted file mode 100644 index 76896c6e8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/series/series_set.go +++ /dev/null @@ -1,385 +0,0 @@ -// Some of the code in this file was adapted from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package series - -import ( - "sort" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/prom1/storage/metric" -) - -// ConcreteSeriesSet implements storage.SeriesSet. -type ConcreteSeriesSet struct { - cur int - series []storage.Series -} - -// NewConcreteSeriesSet instantiates an in-memory series set from a series -// Series will be sorted by labels. -func NewConcreteSeriesSet(series []storage.Series) storage.SeriesSet { - sort.Sort(byLabels(series)) - return &ConcreteSeriesSet{ - cur: -1, - series: series, - } -} - -// Next iterates through a series set and implements storage.SeriesSet. -func (c *ConcreteSeriesSet) Next() bool { - c.cur++ - return c.cur < len(c.series) -} - -// At returns the current series and implements storage.SeriesSet. -func (c *ConcreteSeriesSet) At() storage.Series { - return c.series[c.cur] -} - -// Err implements storage.SeriesSet. -func (c *ConcreteSeriesSet) Err() error { - return nil -} - -// Warnings implements storage.SeriesSet. -func (c *ConcreteSeriesSet) Warnings() storage.Warnings { - return nil -} - -// ConcreteSeries implements storage.Series. -type ConcreteSeries struct { - labels labels.Labels - samples []model.SamplePair -} - -// NewConcreteSeries instantiates an in memory series from a list of samples & labels -func NewConcreteSeries(ls labels.Labels, samples []model.SamplePair) *ConcreteSeries { - return &ConcreteSeries{ - labels: ls, - samples: samples, - } -} - -// Labels implements storage.Series -func (c *ConcreteSeries) Labels() labels.Labels { - return c.labels -} - -// Iterator implements storage.Series -func (c *ConcreteSeries) Iterator() chunkenc.Iterator { - return NewConcreteSeriesIterator(c) -} - -// concreteSeriesIterator implements chunkenc.Iterator. -type concreteSeriesIterator struct { - cur int - series *ConcreteSeries -} - -// NewConcreteSeriesIterator instaniates an in memory chunkenc.Iterator -func NewConcreteSeriesIterator(series *ConcreteSeries) chunkenc.Iterator { - return &concreteSeriesIterator{ - cur: -1, - series: series, - } -} - -func (c *concreteSeriesIterator) Seek(t int64) bool { - c.cur = sort.Search(len(c.series.samples), func(n int) bool { - return c.series.samples[n].Timestamp >= model.Time(t) - }) - return c.cur < len(c.series.samples) -} - -func (c *concreteSeriesIterator) At() (t int64, v float64) { - s := c.series.samples[c.cur] - return int64(s.Timestamp), float64(s.Value) -} - -func (c *concreteSeriesIterator) Next() bool { - c.cur++ - return c.cur < len(c.series.samples) -} - -func (c *concreteSeriesIterator) Err() error { - return nil -} - -// NewErrIterator instantiates an errIterator -func NewErrIterator(err error) chunkenc.Iterator { - return errIterator{err} -} - -// errIterator implements chunkenc.Iterator, just returning an error. -type errIterator struct { - err error -} - -func (errIterator) Seek(int64) bool { - return false -} - -func (errIterator) Next() bool { - return false -} - -func (errIterator) At() (t int64, v float64) { - return 0, 0 -} - -func (e errIterator) Err() error { - return e.err -} - -// MatrixToSeriesSet creates a storage.SeriesSet from a model.Matrix -// Series will be sorted by labels. -func MatrixToSeriesSet(m model.Matrix) storage.SeriesSet { - series := make([]storage.Series, 0, len(m)) - for _, ss := range m { - series = append(series, &ConcreteSeries{ - labels: metricToLabels(ss.Metric), - samples: ss.Values, - }) - } - return NewConcreteSeriesSet(series) -} - -// MetricsToSeriesSet creates a storage.SeriesSet from a []metric.Metric -func MetricsToSeriesSet(ms []metric.Metric) storage.SeriesSet { - series := make([]storage.Series, 0, len(ms)) - for _, m := range ms { - series = append(series, &ConcreteSeries{ - labels: metricToLabels(m.Metric), - samples: nil, - }) - } - return NewConcreteSeriesSet(series) -} - -func metricToLabels(m model.Metric) labels.Labels { - ls := make(labels.Labels, 0, len(m)) - for k, v := range m { - ls = append(ls, labels.Label{ - Name: string(k), - Value: string(v), - }) - } - // PromQL expects all labels to be sorted! In general, anyone constructing - // a labels.Labels list is responsible for sorting it during construction time. - sort.Sort(ls) - return ls -} - -type byLabels []storage.Series - -func (b byLabels) Len() int { return len(b) } -func (b byLabels) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i].Labels(), b[j].Labels()) < 0 } - -type DeletedSeriesSet struct { - seriesSet storage.SeriesSet - tombstones *purger.TombstonesSet - queryInterval model.Interval -} - -func NewDeletedSeriesSet(seriesSet storage.SeriesSet, tombstones *purger.TombstonesSet, queryInterval model.Interval) storage.SeriesSet { - return &DeletedSeriesSet{ - seriesSet: seriesSet, - tombstones: tombstones, - queryInterval: queryInterval, - } -} - -func (d DeletedSeriesSet) Next() bool { - return d.seriesSet.Next() -} - -func (d DeletedSeriesSet) At() storage.Series { - series := d.seriesSet.At() - deletedIntervals := d.tombstones.GetDeletedIntervals(series.Labels(), d.queryInterval.Start, d.queryInterval.End) - - // series is deleted for whole query range so return empty series - if len(deletedIntervals) == 1 && deletedIntervals[0] == d.queryInterval { - return NewEmptySeries(series.Labels()) - } - - return NewDeletedSeries(series, deletedIntervals) -} - -func (d DeletedSeriesSet) Err() error { - return d.seriesSet.Err() -} - -func (d DeletedSeriesSet) Warnings() storage.Warnings { - return nil -} - -type DeletedSeries struct { - series storage.Series - deletedIntervals []model.Interval -} - -func NewDeletedSeries(series storage.Series, deletedIntervals []model.Interval) storage.Series { - return &DeletedSeries{ - series: series, - deletedIntervals: deletedIntervals, - } -} - -func (d DeletedSeries) Labels() labels.Labels { - return d.series.Labels() -} - -func (d DeletedSeries) Iterator() chunkenc.Iterator { - return NewDeletedSeriesIterator(d.series.Iterator(), d.deletedIntervals) -} - -type DeletedSeriesIterator struct { - itr chunkenc.Iterator - deletedIntervals []model.Interval -} - -func NewDeletedSeriesIterator(itr chunkenc.Iterator, deletedIntervals []model.Interval) chunkenc.Iterator { - return &DeletedSeriesIterator{ - itr: itr, - deletedIntervals: deletedIntervals, - } -} - -func (d DeletedSeriesIterator) Seek(t int64) bool { - if found := d.itr.Seek(t); !found { - return false - } - - seekedTs, _ := d.itr.At() - if d.isDeleted(seekedTs) { - // point we have seeked into is deleted, Next() should find a new non-deleted sample which is after t and seekedTs - return d.Next() - } - - return true -} - -func (d DeletedSeriesIterator) At() (t int64, v float64) { - return d.itr.At() -} - -func (d DeletedSeriesIterator) Next() bool { - for d.itr.Next() { - ts, _ := d.itr.At() - - if d.isDeleted(ts) { - continue - } - return true - } - return false -} - -func (d DeletedSeriesIterator) Err() error { - return d.itr.Err() -} - -// isDeleted removes intervals which are past ts while checking for whether ts happens to be in one of the deleted intervals -func (d *DeletedSeriesIterator) isDeleted(ts int64) bool { - mts := model.Time(ts) - - for _, interval := range d.deletedIntervals { - if mts > interval.End { - d.deletedIntervals = d.deletedIntervals[1:] - continue - } else if mts < interval.Start { - return false - } - - return true - } - - return false -} - -type emptySeries struct { - labels labels.Labels -} - -func NewEmptySeries(labels labels.Labels) storage.Series { - return emptySeries{labels} -} - -func (e emptySeries) Labels() labels.Labels { - return e.labels -} - -func (emptySeries) Iterator() chunkenc.Iterator { - return NewEmptySeriesIterator() -} - -type emptySeriesIterator struct { -} - -func NewEmptySeriesIterator() chunkenc.Iterator { - return emptySeriesIterator{} -} - -func (emptySeriesIterator) Seek(t int64) bool { - return false -} - -func (emptySeriesIterator) At() (t int64, v float64) { - return 0, 0 -} - -func (emptySeriesIterator) Next() bool { - return false -} - -func (emptySeriesIterator) Err() error { - return nil -} - -type seriesSetWithWarnings struct { - wrapped storage.SeriesSet - warnings storage.Warnings -} - -func NewSeriesSetWithWarnings(wrapped storage.SeriesSet, warnings storage.Warnings) storage.SeriesSet { - return seriesSetWithWarnings{ - wrapped: wrapped, - warnings: warnings, - } -} - -func (s seriesSetWithWarnings) Next() bool { - return s.wrapped.Next() -} - -func (s seriesSetWithWarnings) At() storage.Series { - return s.wrapped.At() -} - -func (s seriesSetWithWarnings) Err() error { - return s.wrapped.Err() -} - -func (s seriesSetWithWarnings) Warnings() storage.Warnings { - return append(s.wrapped.Warnings(), s.warnings...) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/series_with_chunks.go b/vendor/github.com/cortexproject/cortex/pkg/querier/series_with_chunks.go deleted file mode 100644 index fa5d8c7dd..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/series_with_chunks.go +++ /dev/null @@ -1,15 +0,0 @@ -package querier - -import ( - "github.com/prometheus/prometheus/storage" - - "github.com/cortexproject/cortex/pkg/chunk" -) - -// SeriesWithChunks extends storage.Series interface with direct access to Cortex chunks. -type SeriesWithChunks interface { - storage.Series - - // Returns all chunks with series data. - Chunks() []chunk.Chunk -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go deleted file mode 100644 index 1a39b3206..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go +++ /dev/null @@ -1,103 +0,0 @@ -package stats - -import ( - "context" - "sync/atomic" //lint:ignore faillint we can't use go.uber.org/atomic with a protobuf struct without wrapping it. - "time" - - "github.com/weaveworks/common/httpgrpc" -) - -type contextKey int - -var ctxKey = contextKey(0) - -// ContextWithEmptyStats returns a context with empty stats. -func ContextWithEmptyStats(ctx context.Context) (*Stats, context.Context) { - stats := &Stats{} - ctx = context.WithValue(ctx, ctxKey, stats) - return stats, ctx -} - -// FromContext gets the Stats out of the Context. Returns nil if stats have not -// been initialised in the context. -func FromContext(ctx context.Context) *Stats { - o := ctx.Value(ctxKey) - if o == nil { - return nil - } - return o.(*Stats) -} - -// IsEnabled returns whether stats tracking is enabled in the context. -func IsEnabled(ctx context.Context) bool { - // When query statistics are enabled, the stats object is already initialised - // within the context, so we can just check it. - return FromContext(ctx) != nil -} - -// AddWallTime adds some time to the counter. -func (s *Stats) AddWallTime(t time.Duration) { - if s == nil { - return - } - - atomic.AddInt64((*int64)(&s.WallTime), int64(t)) -} - -// LoadWallTime returns current wall time. -func (s *Stats) LoadWallTime() time.Duration { - if s == nil { - return 0 - } - - return time.Duration(atomic.LoadInt64((*int64)(&s.WallTime))) -} - -func (s *Stats) AddFetchedSeries(series uint64) { - if s == nil { - return - } - - atomic.AddUint64(&s.FetchedSeriesCount, series) -} - -func (s *Stats) LoadFetchedSeries() uint64 { - if s == nil { - return 0 - } - - return atomic.LoadUint64(&s.FetchedSeriesCount) -} - -func (s *Stats) AddFetchedChunkBytes(bytes uint64) { - if s == nil { - return - } - - atomic.AddUint64(&s.FetchedChunkBytes, bytes) -} - -func (s *Stats) LoadFetchedChunkBytes() uint64 { - if s == nil { - return 0 - } - - return atomic.LoadUint64(&s.FetchedChunkBytes) -} - -// Merge the provide Stats into this one. -func (s *Stats) Merge(other *Stats) { - if s == nil || other == nil { - return - } - - s.AddWallTime(other.LoadWallTime()) - s.AddFetchedSeries(other.LoadFetchedSeries()) - s.AddFetchedChunkBytes(other.LoadFetchedChunkBytes()) -} - -func ShouldTrackHTTPGRPCResponse(r *httpgrpc.HTTPResponse) bool { - // Do no track statistics for requests failed because of a server error. - return r.Code < 500 -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go deleted file mode 100644 index 9fd4affc1..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go +++ /dev/null @@ -1,500 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: stats.proto - -package stats - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Stats struct { - // The sum of all wall time spent in the querier to execute the query. - WallTime time.Duration `protobuf:"bytes,1,opt,name=wall_time,json=wallTime,proto3,stdduration" json:"wall_time"` - // The number of series fetched for the query - FetchedSeriesCount uint64 `protobuf:"varint,2,opt,name=fetched_series_count,json=fetchedSeriesCount,proto3" json:"fetched_series_count,omitempty"` - // The number of bytes of the chunks fetched for the query - FetchedChunkBytes uint64 `protobuf:"varint,3,opt,name=fetched_chunk_bytes,json=fetchedChunkBytes,proto3" json:"fetched_chunk_bytes,omitempty"` -} - -func (m *Stats) Reset() { *m = Stats{} } -func (*Stats) ProtoMessage() {} -func (*Stats) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{0} -} -func (m *Stats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stats) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stats.Merge(m, src) -} -func (m *Stats) XXX_Size() int { - return m.Size() -} -func (m *Stats) XXX_DiscardUnknown() { - xxx_messageInfo_Stats.DiscardUnknown(m) -} - -var xxx_messageInfo_Stats proto.InternalMessageInfo - -func (m *Stats) GetWallTime() time.Duration { - if m != nil { - return m.WallTime - } - return 0 -} - -func (m *Stats) GetFetchedSeriesCount() uint64 { - if m != nil { - return m.FetchedSeriesCount - } - return 0 -} - -func (m *Stats) GetFetchedChunkBytes() uint64 { - if m != nil { - return m.FetchedChunkBytes - } - return 0 -} - -func init() { - proto.RegisterType((*Stats)(nil), "stats.Stats") -} - -func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } - -var fileDescriptor_b4756a0aec8b9d44 = []byte{ - // 281 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0xd0, 0xb1, 0x4e, 0x83, 0x40, - 0x1c, 0xc7, 0xf1, 0xfb, 0xab, 0x35, 0x95, 0x4e, 0xa2, 0x03, 0x76, 0xf8, 0xb7, 0x71, 0xea, 0xe2, - 0xd5, 0xe8, 0xe8, 0x62, 0xa8, 0x4f, 0xd0, 0x3a, 0xb9, 0x10, 0xa0, 0x57, 0x20, 0x02, 0x67, 0xe0, - 0x2e, 0xc6, 0xcd, 0x47, 0x70, 0xf4, 0x11, 0x4c, 0x7c, 0x91, 0x8e, 0x8c, 0x9d, 0x54, 0x8e, 0xc5, - 0xb1, 0x8f, 0x60, 0xee, 0xa0, 0x71, 0xe3, 0x97, 0x0f, 0xdf, 0x4b, 0xee, 0xac, 0x41, 0x29, 0x7c, - 0x51, 0xd2, 0xa7, 0x82, 0x0b, 0x6e, 0xf7, 0xcc, 0x18, 0x5e, 0x44, 0x89, 0x88, 0x65, 0x40, 0x43, - 0x9e, 0x4d, 0x23, 0x1e, 0xf1, 0xa9, 0xd1, 0x40, 0xae, 0xcc, 0x32, 0xc3, 0x7c, 0xb5, 0xd5, 0x10, - 0x23, 0xce, 0xa3, 0x94, 0xfd, 0xff, 0xb5, 0x94, 0x85, 0x2f, 0x12, 0x9e, 0xb7, 0x7e, 0xfe, 0x09, - 0x56, 0x6f, 0xa1, 0x0f, 0xb6, 0x6f, 0xad, 0xa3, 0x67, 0x3f, 0x4d, 0x3d, 0x91, 0x64, 0xcc, 0x81, - 0x31, 0x4c, 0x06, 0x57, 0x67, 0xb4, 0xad, 0xe9, 0xae, 0xa6, 0x77, 0x5d, 0xed, 0xf6, 0xd7, 0x5f, - 0x23, 0xf2, 0xfe, 0x3d, 0x82, 0x79, 0x5f, 0x57, 0xf7, 0x49, 0xc6, 0xec, 0x4b, 0xeb, 0x74, 0xc5, - 0x44, 0x18, 0xb3, 0xa5, 0x57, 0xb2, 0x22, 0x61, 0xa5, 0x17, 0x72, 0x99, 0x0b, 0x67, 0x6f, 0x0c, - 0x93, 0x83, 0xb9, 0xdd, 0xd9, 0xc2, 0xd0, 0x4c, 0x8b, 0x4d, 0xad, 0x93, 0x5d, 0x11, 0xc6, 0x32, - 0x7f, 0xf4, 0x82, 0x17, 0xc1, 0x4a, 0x67, 0xdf, 0x04, 0xc7, 0x1d, 0xcd, 0xb4, 0xb8, 0x1a, 0xdc, - 0x9b, 0xaa, 0x46, 0xb2, 0xa9, 0x91, 0x6c, 0x6b, 0x84, 0x57, 0x85, 0xf0, 0xa1, 0x10, 0xd6, 0x0a, - 0xa1, 0x52, 0x08, 0x3f, 0x0a, 0xe1, 0x57, 0x21, 0xd9, 0x2a, 0x84, 0xb7, 0x06, 0x49, 0xd5, 0x20, - 0xd9, 0x34, 0x48, 0x1e, 0xda, 0x97, 0x0b, 0x0e, 0xcd, 0x2d, 0xae, 0xff, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x9d, 0xf1, 0x86, 0xb8, 0x56, 0x01, 0x00, 0x00, -} - -func (this *Stats) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Stats) - if !ok { - that2, ok := that.(Stats) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.WallTime != that1.WallTime { - return false - } - if this.FetchedSeriesCount != that1.FetchedSeriesCount { - return false - } - if this.FetchedChunkBytes != that1.FetchedChunkBytes { - return false - } - return true -} -func (this *Stats) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&stats.Stats{") - s = append(s, "WallTime: "+fmt.Sprintf("%#v", this.WallTime)+",\n") - s = append(s, "FetchedSeriesCount: "+fmt.Sprintf("%#v", this.FetchedSeriesCount)+",\n") - s = append(s, "FetchedChunkBytes: "+fmt.Sprintf("%#v", this.FetchedChunkBytes)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringStats(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Stats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.FetchedChunkBytes != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.FetchedChunkBytes)) - i-- - dAtA[i] = 0x18 - } - if m.FetchedSeriesCount != 0 { - i = encodeVarintStats(dAtA, i, uint64(m.FetchedSeriesCount)) - i-- - dAtA[i] = 0x10 - } - n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.WallTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.WallTime):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintStats(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintStats(dAtA []byte, offset int, v uint64) int { - offset -= sovStats(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Stats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.WallTime) - n += 1 + l + sovStats(uint64(l)) - if m.FetchedSeriesCount != 0 { - n += 1 + sovStats(uint64(m.FetchedSeriesCount)) - } - if m.FetchedChunkBytes != 0 { - n += 1 + sovStats(uint64(m.FetchedChunkBytes)) - } - return n -} - -func sovStats(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStats(x uint64) (n int) { - return sovStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Stats) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Stats{`, - `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `FetchedSeriesCount:` + fmt.Sprintf("%v", this.FetchedSeriesCount) + `,`, - `FetchedChunkBytes:` + fmt.Sprintf("%v", this.FetchedChunkBytes) + `,`, - `}`, - }, "") - return s -} -func valueToStringStats(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Stats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Stats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Stats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WallTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.WallTime, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FetchedSeriesCount", wireType) - } - m.FetchedSeriesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FetchedSeriesCount |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FetchedChunkBytes", wireType) - } - m.FetchedChunkBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FetchedChunkBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStats(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStats - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipStats(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthStats = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStats = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto deleted file mode 100644 index 765dd9958..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package stats; - -option go_package = "stats"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -message Stats { - // The sum of all wall time spent in the querier to execute the query. - google.protobuf.Duration wall_time = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; - // The number of series fetched for the query - uint64 fetched_series_count = 2; - // The number of bytes of the chunks fetched for the query - uint64 fetched_chunk_bytes = 3; -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go deleted file mode 100644 index a34697a66..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go +++ /dev/null @@ -1,30 +0,0 @@ -package stats - -import ( - "net/http" - "time" -) - -// WallTimeMiddleware tracks the wall time. -type WallTimeMiddleware struct{} - -// NewWallTimeMiddleware makes a new WallTimeMiddleware. -func NewWallTimeMiddleware() WallTimeMiddleware { - return WallTimeMiddleware{} -} - -// Wrap implements middleware.Interface. -func (m WallTimeMiddleware) Wrap(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !IsEnabled(r.Context()) { - next.ServeHTTP(w, r) - return - } - - startTime := time.Now() - next.ServeHTTP(w, r) - - stats := FromContext(r.Context()) - stats.AddWallTime(time.Since(startTime)) - }) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go b/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go deleted file mode 100644 index b84a87846..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go +++ /dev/null @@ -1,106 +0,0 @@ -package querier - -import ( - "flag" - "time" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/tls" -) - -func newStoreGatewayClientFactory(clientCfg grpcclient.Config, reg prometheus.Registerer) client.PoolFactory { - requestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "storegateway_client_request_duration_seconds", - Help: "Time spent executing requests to the store-gateway.", - Buckets: prometheus.ExponentialBuckets(0.008, 4, 7), - ConstLabels: prometheus.Labels{"client": "querier"}, - }, []string{"operation", "status_code"}) - - return func(addr string) (client.PoolClient, error) { - return dialStoreGatewayClient(clientCfg, addr, requestDuration) - } -} - -func dialStoreGatewayClient(clientCfg grpcclient.Config, addr string, requestDuration *prometheus.HistogramVec) (*storeGatewayClient, error) { - opts, err := clientCfg.DialOption(grpcclient.Instrument(requestDuration)) - if err != nil { - return nil, err - } - - conn, err := grpc.Dial(addr, opts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to dial store-gateway %s", addr) - } - - return &storeGatewayClient{ - StoreGatewayClient: storegatewaypb.NewStoreGatewayClient(conn), - HealthClient: grpc_health_v1.NewHealthClient(conn), - conn: conn, - }, nil -} - -type storeGatewayClient struct { - storegatewaypb.StoreGatewayClient - grpc_health_v1.HealthClient - conn *grpc.ClientConn -} - -func (c *storeGatewayClient) Close() error { - return c.conn.Close() -} - -func (c *storeGatewayClient) String() string { - return c.RemoteAddress() -} - -func (c *storeGatewayClient) RemoteAddress() string { - return c.conn.Target() -} - -func newStoreGatewayClientPool(discovery client.PoolServiceDiscovery, clientConfig ClientConfig, logger log.Logger, reg prometheus.Registerer) *client.Pool { - // We prefer sane defaults instead of exposing further config options. - clientCfg := grpcclient.Config{ - MaxRecvMsgSize: 100 << 20, - MaxSendMsgSize: 16 << 20, - GRPCCompression: "", - RateLimit: 0, - RateLimitBurst: 0, - BackoffOnRatelimits: false, - TLSEnabled: clientConfig.TLSEnabled, - TLS: clientConfig.TLS, - } - poolCfg := client.PoolConfig{ - CheckInterval: time.Minute, - HealthCheckEnabled: true, - HealthCheckTimeout: 10 * time.Second, - } - - clientsCount := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "storegateway_clients", - Help: "The current number of store-gateway clients in the pool.", - ConstLabels: map[string]string{"client": "querier"}, - }) - - return client.NewPool("store-gateway", poolCfg, discovery, newStoreGatewayClientFactory(clientCfg, reg), clientsCount, logger) -} - -type ClientConfig struct { - TLSEnabled bool `yaml:"tls_enabled"` - TLS tls.ClientConfig `yaml:",inline"` -} - -func (cfg *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.BoolVar(&cfg.TLSEnabled, prefix+".tls-enabled", cfg.TLSEnabled, "Enable TLS for gRPC client connecting to store-gateway.") - cfg.TLS.RegisterFlagsWithPrefix(prefix, f) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go deleted file mode 100644 index ebade343d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go +++ /dev/null @@ -1,474 +0,0 @@ -package tenantfederation - -import ( - "context" - "fmt" - "sort" - "strings" - - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/weaveworks/common/user" - - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util/concurrency" - "github.com/cortexproject/cortex/pkg/util/spanlogger" -) - -const ( - defaultTenantLabel = "__tenant_id__" - retainExistingPrefix = "original_" - maxConcurrency = 16 -) - -// NewQueryable returns a queryable that iterates through all the tenant IDs -// that are part of the request and aggregates the results from each tenant's -// Querier by sending of subsequent requests. -// By setting byPassWithSingleQuerier to true the mergeQuerier gets by-passed -// and results for request with a single querier will not contain the -// "__tenant_id__" label. This allows a smoother transition, when enabling -// tenant federation in a cluster. -// The result contains a label "__tenant_id__" to identify the tenant ID that -// it originally resulted from. -// If the label "__tenant_id__" is already existing, its value is overwritten -// by the tenant ID and the previous value is exposed through a new label -// prefixed with "original_". This behaviour is not implemented recursively. -func NewQueryable(upstream storage.Queryable, byPassWithSingleQuerier bool) storage.Queryable { - return NewMergeQueryable(defaultTenantLabel, tenantQuerierCallback(upstream), byPassWithSingleQuerier) -} - -func tenantQuerierCallback(queryable storage.Queryable) MergeQuerierCallback { - return func(ctx context.Context, mint int64, maxt int64) ([]string, []storage.Querier, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, nil, err - } - - var queriers = make([]storage.Querier, len(tenantIDs)) - for pos, tenantID := range tenantIDs { - q, err := queryable.Querier( - user.InjectOrgID(ctx, tenantID), - mint, - maxt, - ) - if err != nil { - return nil, nil, err - } - queriers[pos] = q - } - - return tenantIDs, queriers, nil - } -} - -// MergeQuerierCallback returns the underlying queriers and their IDs relevant -// for the query. -type MergeQuerierCallback func(ctx context.Context, mint int64, maxt int64) (ids []string, queriers []storage.Querier, err error) - -// NewMergeQueryable returns a queryable that merges results from multiple -// underlying Queryables. The underlying queryables and its label values to be -// considered are returned by a MergeQuerierCallback. -// By setting byPassWithSingleQuerier to true the mergeQuerier gets by-passed -// and results for request with a single querier will not contain the id label. -// This allows a smoother transition, when enabling tenant federation in a -// cluster. -// Results contain a label `idLabelName` to identify the underlying queryable -// that it originally resulted from. -// If the label `idLabelName` is already existing, its value is overwritten and -// the previous value is exposed through a new label prefixed with "original_". -// This behaviour is not implemented recursively. -func NewMergeQueryable(idLabelName string, callback MergeQuerierCallback, byPassWithSingleQuerier bool) storage.Queryable { - return &mergeQueryable{ - idLabelName: idLabelName, - callback: callback, - byPassWithSingleQuerier: byPassWithSingleQuerier, - } -} - -type mergeQueryable struct { - idLabelName string - byPassWithSingleQuerier bool - callback MergeQuerierCallback -} - -// Querier returns a new mergeQuerier, which aggregates results from multiple -// underlying queriers into a single result. -func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { - // TODO: it's necessary to think how to override context inside querier - // to mark spans created inside querier as child of a span created inside - // methods of merged querier. - ids, queriers, err := m.callback(ctx, mint, maxt) - if err != nil { - return nil, err - } - - // by pass when only single querier is returned - if m.byPassWithSingleQuerier && len(queriers) == 1 { - return queriers[0], nil - } - - return &mergeQuerier{ - ctx: ctx, - idLabelName: m.idLabelName, - queriers: queriers, - ids: ids, - }, nil -} - -// mergeQuerier aggregates the results from underlying queriers and adds a -// label `idLabelName` to identify the queryable that the metric resulted -// from. -// If the label `idLabelName` is already existing, its value is overwritten and -// the previous value is exposed through a new label prefixed with "original_". -// This behaviour is not implemented recursively -type mergeQuerier struct { - ctx context.Context - queriers []storage.Querier - idLabelName string - ids []string -} - -// LabelValues returns all potential values for a label name. It is not safe -// to use the strings beyond the lifefime of the querier. -// For the label `idLabelName` it will return all the underlying ids available. -// For the label "original_" + `idLabelName it will return all the values -// of the underlying queriers for `idLabelName`. -func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - log, _ := spanlogger.New(m.ctx, "mergeQuerier.LabelValues") - defer log.Span.Finish() - - matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) - - if name == m.idLabelName { - var labelValues = make([]string, 0, len(matchedTenants)) - for _, id := range m.ids { - if _, matched := matchedTenants[id]; matched { - labelValues = append(labelValues, id) - } - } - return labelValues, nil, nil - } - - // ensure the name of a retained label gets handled under the original - // label name - if name == retainExistingPrefix+m.idLabelName { - name = m.idLabelName - } - - return m.mergeDistinctStringSliceWithTenants(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) { - return q.LabelValues(name, filteredMatchers...) - }, matchedTenants) -} - -// LabelNames returns all the unique label names present in the underlying -// queriers. It also adds the `idLabelName` and if present in the original -// results the original `idLabelName`. -func (m *mergeQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - log, _ := spanlogger.New(m.ctx, "mergeQuerier.LabelNames") - defer log.Span.Finish() - - matchedTenants, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) - - labelNames, warnings, err := m.mergeDistinctStringSliceWithTenants(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) { - return q.LabelNames(filteredMatchers...) - }, matchedTenants) - if err != nil { - return nil, nil, err - } - - // check if the `idLabelName` exists in the original result - var idLabelNameExists bool - labelPos := sort.SearchStrings(labelNames, m.idLabelName) - if labelPos < len(labelNames) && labelNames[labelPos] == m.idLabelName { - idLabelNameExists = true - } - - labelToAdd := m.idLabelName - - // if `idLabelName` already exists, we need to add the name prefix with - // retainExistingPrefix. - if idLabelNameExists { - labelToAdd = retainExistingPrefix + m.idLabelName - labelPos = sort.SearchStrings(labelNames, labelToAdd) - } - - // insert label at the correct position - labelNames = append(labelNames, "") - copy(labelNames[labelPos+1:], labelNames[labelPos:]) - labelNames[labelPos] = labelToAdd - - return labelNames, warnings, nil -} - -type stringSliceFunc func(context.Context, storage.Querier) ([]string, storage.Warnings, error) - -type stringSliceFuncJob struct { - querier storage.Querier - id string - result []string - warnings storage.Warnings -} - -// mergeDistinctStringSliceWithTenants aggregates stringSliceFunc call -// results from queriers whose tenant ids match the tenants map. If a nil map is -// provided, all queriers are used. It removes duplicates and sorts the result. -// It doesn't require the output of the stringSliceFunc to be sorted, as results -// of LabelValues are not sorted. -func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(f stringSliceFunc, tenants map[string]struct{}) ([]string, storage.Warnings, error) { - var jobs []interface{} - - for pos, id := range m.ids { - if tenants != nil { - if _, matched := tenants[id]; !matched { - continue - } - } - - jobs = append(jobs, &stringSliceFuncJob{ - querier: m.queriers[pos], - id: m.ids[pos], - }) - } - - run := func(ctx context.Context, jobIntf interface{}) error { - job, ok := jobIntf.(*stringSliceFuncJob) - if !ok { - return fmt.Errorf("unexpected type %T", jobIntf) - } - - var err error - job.result, job.warnings, err = f(ctx, job.querier) - if err != nil { - return errors.Wrapf(err, "error querying %s %s", rewriteLabelName(m.idLabelName), job.id) - } - - return nil - } - - err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run) - if err != nil { - return nil, nil, err - } - - // aggregate warnings and deduplicate string results - var warnings storage.Warnings - resultMap := make(map[string]struct{}) - for _, jobIntf := range jobs { - job, ok := jobIntf.(*stringSliceFuncJob) - if !ok { - return nil, nil, fmt.Errorf("unexpected type %T", jobIntf) - } - - for _, e := range job.result { - resultMap[e] = struct{}{} - } - - for _, w := range job.warnings { - warnings = append(warnings, errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(m.idLabelName), job.id)) - } - } - - var result = make([]string, 0, len(resultMap)) - for e := range resultMap { - result = append(result, e) - } - sort.Strings(result) - return result, warnings, nil -} - -// Close releases the resources of the Querier. -func (m *mergeQuerier) Close() error { - errs := tsdb_errors.NewMulti() - for pos, id := range m.ids { - errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for %s %s", rewriteLabelName(m.idLabelName), id)) - } - return errs.Err() -} - -type selectJob struct { - pos int - querier storage.Querier - id string -} - -// Select returns a set of series that matches the given label matchers. If the -// `idLabelName` is matched on, it only considers those queriers -// matching. The forwarded labelSelector is not containing those that operate -// on `idLabelName`. -func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - log, ctx := spanlogger.New(m.ctx, "mergeQuerier.Select") - defer log.Span.Finish() - matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, m.ids, matchers...) - var jobs = make([]interface{}, len(matchedValues)) - var seriesSets = make([]storage.SeriesSet, len(matchedValues)) - var jobPos int - for labelPos := range m.ids { - if _, matched := matchedValues[m.ids[labelPos]]; !matched { - continue - } - jobs[jobPos] = &selectJob{ - pos: jobPos, - querier: m.queriers[labelPos], - id: m.ids[labelPos], - } - jobPos++ - } - - run := func(ctx context.Context, jobIntf interface{}) error { - job, ok := jobIntf.(*selectJob) - if !ok { - return fmt.Errorf("unexpected type %T", jobIntf) - } - seriesSets[job.pos] = &addLabelsSeriesSet{ - upstream: job.querier.Select(sortSeries, hints, filteredMatchers...), - labels: labels.Labels{ - { - Name: m.idLabelName, - Value: job.id, - }, - }, - } - return nil - } - - err := concurrency.ForEach(ctx, jobs, maxConcurrency, run) - if err != nil { - return storage.ErrSeriesSet(err) - } - - return storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge) -} - -// filterValuesByMatchers applies matchers to inputed `idLabelName` and -// `ids`. A map of matched values is returned and also all label matchers not -// matching the `idLabelName`. -// In case a label matcher is set on a label conflicting with `idLabelName`, we -// need to rename this labelMatcher's name to its original name. This is used -// to as part of Select in the mergeQueryable, to ensure only relevant queries -// are considered and the forwarded matchers do not contain matchers on the -// `idLabelName`. -func filterValuesByMatchers(idLabelName string, ids []string, matchers ...*labels.Matcher) (matchedIDs map[string]struct{}, unrelatedMatchers []*labels.Matcher) { - // this contains the matchers which are not related to idLabelName - unrelatedMatchers = make([]*labels.Matcher, 0, len(matchers)) - - // build map of values to consider for the matchers - matchedIDs = make(map[string]struct{}, len(ids)) - for _, value := range ids { - matchedIDs[value] = struct{}{} - } - - for _, m := range matchers { - switch m.Name { - // matcher has idLabelName to target a specific tenant(s) - case idLabelName: - for value := range matchedIDs { - if !m.Matches(value) { - delete(matchedIDs, value) - } - } - - // check if has the retained label name - case retainExistingPrefix + idLabelName: - // rewrite label to the original name, by copying matcher and - // replacing the label name - rewrittenM := *m - rewrittenM.Name = idLabelName - unrelatedMatchers = append(unrelatedMatchers, &rewrittenM) - - default: - unrelatedMatchers = append(unrelatedMatchers, m) - } - } - - return matchedIDs, unrelatedMatchers -} - -type addLabelsSeriesSet struct { - upstream storage.SeriesSet - labels labels.Labels - currSeries storage.Series -} - -func (m *addLabelsSeriesSet) Next() bool { - m.currSeries = nil - return m.upstream.Next() -} - -// At returns full series. Returned series should be iteratable even after Next is called. -func (m *addLabelsSeriesSet) At() storage.Series { - if m.currSeries == nil { - upstream := m.upstream.At() - m.currSeries = &addLabelsSeries{ - upstream: upstream, - labels: setLabelsRetainExisting(upstream.Labels(), m.labels...), - } - } - return m.currSeries -} - -// The error that iteration as failed with. -// When an error occurs, set cannot continue to iterate. -func (m *addLabelsSeriesSet) Err() error { - return errors.Wrapf(m.upstream.Err(), "error querying %s", labelsToString(m.labels)) -} - -// A collection of warnings for the whole set. -// Warnings could be return even iteration has not failed with error. -func (m *addLabelsSeriesSet) Warnings() storage.Warnings { - upstream := m.upstream.Warnings() - warnings := make(storage.Warnings, len(upstream)) - for pos := range upstream { - warnings[pos] = errors.Wrapf(upstream[pos], "warning querying %s", labelsToString(m.labels)) - } - return warnings -} - -// rewrite label name to be more readable in error output -func rewriteLabelName(s string) string { - return strings.TrimRight(strings.TrimLeft(s, "_"), "_") -} - -// this outputs a more readable error format -func labelsToString(labels labels.Labels) string { - parts := make([]string, len(labels)) - for pos, l := range labels { - parts[pos] = rewriteLabelName(l.Name) + " " + l.Value - } - return strings.Join(parts, ", ") -} - -type addLabelsSeries struct { - upstream storage.Series - labels labels.Labels -} - -// Labels returns the complete set of labels. For series it means all labels identifying the series. -func (a *addLabelsSeries) Labels() labels.Labels { - return a.labels -} - -// Iterator returns a new, independent iterator of the data of the series. -func (a *addLabelsSeries) Iterator() chunkenc.Iterator { - return a.upstream.Iterator() -} - -// this sets a label and preserves an existing value a new label prefixed with -// original_. It doesn't do this recursively. -func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels { - lb := labels.NewBuilder(src) - - for _, additionalL := range additionalLabels { - if oldValue := src.Get(additionalL.Name); oldValue != "" { - lb.Set( - retainExistingPrefix+additionalL.Name, - oldValue, - ) - } - lb.Set(additionalL.Name, additionalL.Value) - } - - return lb.Labels() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go b/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go deleted file mode 100644 index af5bd7b92..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go +++ /dev/null @@ -1,14 +0,0 @@ -package tenantfederation - -import ( - "flag" -) - -type Config struct { - // Enabled switches on support for multi tenant query federation - Enabled bool `yaml:"enabled"` -} - -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.BoolVar(&cfg.Enabled, "tenant-federation.enabled", false, "If enabled on all Cortex services, queries can be federated across multiple tenants. The tenant IDs involved need to be specified separated by a `|` character in the `X-Scope-OrgID` header (experimental).") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/querier/testutils.go deleted file mode 100644 index 2489f712a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/testutils.go +++ /dev/null @@ -1,79 +0,0 @@ -package querier - -import ( - "context" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/scrape" - "github.com/stretchr/testify/mock" - - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/prom1/storage/metric" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -type MockDistributor struct { - mock.Mock -} - -func (m *MockDistributor) Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) { - args := m.Called(ctx, from, to, matchers) - return args.Get(0).(model.Matrix), args.Error(1) -} -func (m *MockDistributor) QueryExemplars(ctx context.Context, from, to model.Time, matchers ...[]*labels.Matcher) (*client.ExemplarQueryResponse, error) { - args := m.Called(ctx, from, to, matchers) - return args.Get(0).(*client.ExemplarQueryResponse), args.Error(1) -} -func (m *MockDistributor) QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (*client.QueryStreamResponse, error) { - args := m.Called(ctx, from, to, matchers) - return args.Get(0).(*client.QueryStreamResponse), args.Error(1) -} -func (m *MockDistributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, lbl model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - args := m.Called(ctx, from, to, lbl, matchers) - return args.Get(0).([]string), args.Error(1) -} -func (m *MockDistributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, lbl model.LabelName, matchers ...*labels.Matcher) ([]string, error) { - args := m.Called(ctx, from, to, lbl, matchers) - return args.Get(0).([]string), args.Error(1) -} -func (m *MockDistributor) LabelNames(ctx context.Context, from, to model.Time) ([]string, error) { - args := m.Called(ctx, from, to) - return args.Get(0).([]string), args.Error(1) -} -func (m *MockDistributor) LabelNamesStream(ctx context.Context, from, to model.Time) ([]string, error) { - args := m.Called(ctx, from, to) - return args.Get(0).([]string), args.Error(1) -} -func (m *MockDistributor) MetricsForLabelMatchers(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) { - args := m.Called(ctx, from, to, matchers) - return args.Get(0).([]metric.Metric), args.Error(1) -} -func (m *MockDistributor) MetricsForLabelMatchersStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) { - args := m.Called(ctx, from, to, matchers) - return args.Get(0).([]metric.Metric), args.Error(1) -} - -func (m *MockDistributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { - args := m.Called(ctx) - return args.Get(0).([]scrape.MetricMetadata), args.Error(1) -} - -type TestConfig struct { - Cfg Config - Distributor Distributor - Stores []QueryableWithFilter -} - -func DefaultQuerierConfig() Config { - querierCfg := Config{} - flagext.DefaultValues(&querierCfg) - return querierCfg -} - -func DefaultLimitsConfig() validation.Limits { - limits := validation.Limits{} - flagext.DefaultValues(&limits) - return limits -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/timeseries_series_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/timeseries_series_set.go deleted file mode 100644 index ac1480e03..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/timeseries_series_set.go +++ /dev/null @@ -1,103 +0,0 @@ -package querier - -import ( - "sort" - - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/cortexproject/cortex/pkg/cortexpb" -) - -// timeSeriesSeriesSet is a wrapper around a cortexpb.TimeSeries slice to implement to SeriesSet interface -type timeSeriesSeriesSet struct { - ts []cortexpb.TimeSeries - i int -} - -func newTimeSeriesSeriesSet(series []cortexpb.TimeSeries) *timeSeriesSeriesSet { - sort.Sort(byTimeSeriesLabels(series)) - return &timeSeriesSeriesSet{ - ts: series, - i: -1, - } -} - -// Next implements storage.SeriesSet interface. -func (t *timeSeriesSeriesSet) Next() bool { t.i++; return t.i < len(t.ts) } - -// At implements storage.SeriesSet interface. -func (t *timeSeriesSeriesSet) At() storage.Series { - if t.i < 0 { - return nil - } - return ×eries{series: t.ts[t.i]} -} - -// Err implements storage.SeriesSet interface. -func (t *timeSeriesSeriesSet) Err() error { return nil } - -// Warnings implements storage.SeriesSet interface. -func (t *timeSeriesSeriesSet) Warnings() storage.Warnings { return nil } - -// timeseries is a type wrapper that implements the storage.Series interface -type timeseries struct { - series cortexpb.TimeSeries -} - -// timeSeriesSeriesIterator is a wrapper around a cortexpb.TimeSeries to implement the SeriesIterator interface -type timeSeriesSeriesIterator struct { - ts *timeseries - i int -} - -type byTimeSeriesLabels []cortexpb.TimeSeries - -func (b byTimeSeriesLabels) Len() int { return len(b) } -func (b byTimeSeriesLabels) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byTimeSeriesLabels) Less(i, j int) bool { - return labels.Compare(cortexpb.FromLabelAdaptersToLabels(b[i].Labels), cortexpb.FromLabelAdaptersToLabels(b[j].Labels)) < 0 -} - -// Labels implements the storage.Series interface. -// Conversion is safe because ingester sets these by calling client.FromLabelsToLabelAdapters which guarantees labels are sorted. -func (t *timeseries) Labels() labels.Labels { - return cortexpb.FromLabelAdaptersToLabels(t.series.Labels) -} - -// Iterator implements the storage.Series interface -func (t *timeseries) Iterator() chunkenc.Iterator { - return &timeSeriesSeriesIterator{ - ts: t, - i: -1, - } -} - -// Seek implements SeriesIterator interface -func (t *timeSeriesSeriesIterator) Seek(s int64) bool { - offset := 0 - if t.i > 0 { - offset = t.i // only advance via Seek - } - - t.i = sort.Search(len(t.ts.series.Samples[offset:]), func(i int) bool { - return t.ts.series.Samples[offset+i].TimestampMs >= s - }) + offset - - return t.i < len(t.ts.series.Samples) -} - -// At implements the SeriesIterator interface -func (t *timeSeriesSeriesIterator) At() (int64, float64) { - if t.i < 0 || t.i >= len(t.ts.series.Samples) { - return 0, 0 - } - return t.ts.series.Samples[t.i].TimestampMs, t.ts.series.Samples[t.i].Value -} - -// Next implements the SeriesIterator interface -func (t *timeSeriesSeriesIterator) Next() bool { t.i++; return t.i < len(t.ts.series.Samples) } - -// Err implements the SeriesIterator interface -func (t *timeSeriesSeriesIterator) Err() error { return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go deleted file mode 100644 index 9189ff1dd..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go +++ /dev/null @@ -1,148 +0,0 @@ -package worker - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/weaveworks/common/httpgrpc" - "google.golang.org/grpc" - - "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" - "github.com/cortexproject/cortex/pkg/querier/stats" - querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/util/backoff" -) - -var ( - processorBackoffConfig = backoff.Config{ - MinBackoff: 50 * time.Millisecond, - MaxBackoff: 1 * time.Second, - } -) - -func newFrontendProcessor(cfg Config, handler RequestHandler, log log.Logger) processor { - return &frontendProcessor{ - log: log, - handler: handler, - maxMessageSize: cfg.GRPCClientConfig.MaxSendMsgSize, - querierID: cfg.QuerierID, - } -} - -// Handles incoming queries from frontend. -type frontendProcessor struct { - handler RequestHandler - maxMessageSize int - querierID string - - log log.Logger -} - -// notifyShutdown implements processor. -func (fp *frontendProcessor) notifyShutdown(ctx context.Context, conn *grpc.ClientConn, address string) { - client := frontendv1pb.NewFrontendClient(conn) - - req := &frontendv1pb.NotifyClientShutdownRequest{ClientID: fp.querierID} - if _, err := client.NotifyClientShutdown(ctx, req); err != nil { - // Since we're shutting down there's nothing we can do except logging it. - level.Warn(fp.log).Log("msg", "failed to notify querier shutdown to query-frontend", "address", address, "err", err) - } -} - -// runOne loops, trying to establish a stream to the frontend to begin request processing. -func (fp *frontendProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) { - client := frontendv1pb.NewFrontendClient(conn) - - backoff := backoff.New(ctx, processorBackoffConfig) - for backoff.Ongoing() { - c, err := client.Process(ctx) - if err != nil { - level.Error(fp.log).Log("msg", "error contacting frontend", "address", address, "err", err) - backoff.Wait() - continue - } - - if err := fp.process(c); err != nil { - level.Error(fp.log).Log("msg", "error processing requests", "address", address, "err", err) - backoff.Wait() - continue - } - - backoff.Reset() - } -} - -// process loops processing requests on an established stream. -func (fp *frontendProcessor) process(c frontendv1pb.Frontend_ProcessClient) error { - // Build a child context so we can cancel a query when the stream is closed. - ctx, cancel := context.WithCancel(c.Context()) - defer cancel() - - for { - request, err := c.Recv() - if err != nil { - return err - } - - switch request.Type { - case frontendv1pb.HTTP_REQUEST: - // Handle the request on a "background" goroutine, so we go back to - // blocking on c.Recv(). This allows us to detect the stream closing - // and cancel the query. We don't actually handle queries in parallel - // here, as we're running in lock step with the server - each Recv is - // paired with a Send. - go fp.runRequest(ctx, request.HttpRequest, request.StatsEnabled, func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error { - return c.Send(&frontendv1pb.ClientToFrontend{ - HttpResponse: response, - Stats: stats, - }) - }) - - case frontendv1pb.GET_ID: - err := c.Send(&frontendv1pb.ClientToFrontend{ClientID: fp.querierID}) - if err != nil { - return err - } - - default: - return fmt.Errorf("unknown request type: %v", request.Type) - } - } -} - -func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, statsEnabled bool, sendHTTPResponse func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error) { - var stats *querier_stats.Stats - if statsEnabled { - stats, ctx = querier_stats.ContextWithEmptyStats(ctx) - } - - response, err := fp.handler.Handle(ctx, request) - if err != nil { - var ok bool - response, ok = httpgrpc.HTTPResponseFromError(err) - if !ok { - response = &httpgrpc.HTTPResponse{ - Code: http.StatusInternalServerError, - Body: []byte(err.Error()), - } - } - } - - // Ensure responses that are too big are not retried. - if len(response.Body) >= fp.maxMessageSize { - errMsg := fmt.Sprintf("response larger than the max (%d vs %d)", len(response.Body), fp.maxMessageSize) - response = &httpgrpc.HTTPResponse{ - Code: http.StatusRequestEntityTooLarge, - Body: []byte(errMsg), - } - level.Error(fp.log).Log("msg", "error processing query", "err", errMsg) - } - - if err := sendHTTPResponse(response, stats); err != nil { - level.Error(fp.log).Log("msg", "error processing requests", "err", err) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go deleted file mode 100644 index 5d675c88a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go +++ /dev/null @@ -1,86 +0,0 @@ -package worker - -import ( - "context" - "sync" - "time" - - "go.uber.org/atomic" - "google.golang.org/grpc" -) - -const ( - notifyShutdownTimeout = 5 * time.Second -) - -// Manages processor goroutines for single grpc connection. -type processorManager struct { - p processor - conn *grpc.ClientConn - address string - - // Main context to control all goroutines. - ctx context.Context - wg sync.WaitGroup - - // Cancel functions for individual goroutines. - cancelsMu sync.Mutex - cancels []context.CancelFunc - - currentProcessors *atomic.Int32 -} - -func newProcessorManager(ctx context.Context, p processor, conn *grpc.ClientConn, address string) *processorManager { - return &processorManager{ - p: p, - ctx: ctx, - conn: conn, - address: address, - currentProcessors: atomic.NewInt32(0), - } -} - -func (pm *processorManager) stop() { - // Notify the remote query-frontend or query-scheduler we're shutting down. - // We use a new context to make sure it's not cancelled. - notifyCtx, cancel := context.WithTimeout(context.Background(), notifyShutdownTimeout) - defer cancel() - pm.p.notifyShutdown(notifyCtx, pm.conn, pm.address) - - // Stop all goroutines. - pm.concurrency(0) - - // Wait until they finish. - pm.wg.Wait() - - _ = pm.conn.Close() -} - -func (pm *processorManager) concurrency(n int) { - pm.cancelsMu.Lock() - defer pm.cancelsMu.Unlock() - - if n < 0 { - n = 0 - } - - for len(pm.cancels) < n { - ctx, cancel := context.WithCancel(pm.ctx) - pm.cancels = append(pm.cancels, cancel) - - pm.wg.Add(1) - go func() { - defer pm.wg.Done() - - pm.currentProcessors.Inc() - defer pm.currentProcessors.Dec() - - pm.p.processQueriesOnSingleStream(ctx, pm.conn, pm.address) - }() - } - - for len(pm.cancels) > n { - pm.cancels[0]() - pm.cancels = pm.cancels[1:] - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go deleted file mode 100644 index ec9b52e6d..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go +++ /dev/null @@ -1,228 +0,0 @@ -package worker - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - otgrpc "github.com/opentracing-contrib/go-grpc" - "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/middleware" - "github.com/weaveworks/common/user" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" - querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" - "github.com/cortexproject/cortex/pkg/util/backoff" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" - util_log "github.com/cortexproject/cortex/pkg/util/log" - cortexmiddleware "github.com/cortexproject/cortex/pkg/util/middleware" - "github.com/cortexproject/cortex/pkg/util/services" -) - -func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, reg prometheus.Registerer) (*schedulerProcessor, []services.Service) { - p := &schedulerProcessor{ - log: log, - handler: handler, - maxMessageSize: cfg.GRPCClientConfig.MaxSendMsgSize, - querierID: cfg.QuerierID, - grpcConfig: cfg.GRPCClientConfig, - - frontendClientRequestDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Name: "cortex_querier_query_frontend_request_duration_seconds", - Help: "Time spend doing requests to frontend.", - Buckets: prometheus.ExponentialBuckets(0.001, 4, 6), - }, []string{"operation", "status_code"}), - } - - frontendClientsGauge := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_querier_query_frontend_clients", - Help: "The current number of clients connected to query-frontend.", - }) - - poolConfig := client.PoolConfig{ - CheckInterval: 5 * time.Second, - HealthCheckEnabled: true, - HealthCheckTimeout: 1 * time.Second, - } - - p.frontendPool = client.NewPool("frontend", poolConfig, nil, p.createFrontendClient, frontendClientsGauge, log) - return p, []services.Service{p.frontendPool} -} - -// Handles incoming queries from query-scheduler. -type schedulerProcessor struct { - log log.Logger - handler RequestHandler - grpcConfig grpcclient.Config - maxMessageSize int - querierID string - - frontendPool *client.Pool - frontendClientRequestDuration *prometheus.HistogramVec -} - -// notifyShutdown implements processor. -func (sp *schedulerProcessor) notifyShutdown(ctx context.Context, conn *grpc.ClientConn, address string) { - client := schedulerpb.NewSchedulerForQuerierClient(conn) - - req := &schedulerpb.NotifyQuerierShutdownRequest{QuerierID: sp.querierID} - if _, err := client.NotifyQuerierShutdown(ctx, req); err != nil { - // Since we're shutting down there's nothing we can do except logging it. - level.Warn(sp.log).Log("msg", "failed to notify querier shutdown to query-scheduler", "address", address, "err", err) - } -} - -func (sp *schedulerProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) { - schedulerClient := schedulerpb.NewSchedulerForQuerierClient(conn) - - backoff := backoff.New(ctx, processorBackoffConfig) - for backoff.Ongoing() { - c, err := schedulerClient.QuerierLoop(ctx) - if err == nil { - err = c.Send(&schedulerpb.QuerierToScheduler{QuerierID: sp.querierID}) - } - - if err != nil { - level.Error(sp.log).Log("msg", "error contacting scheduler", "err", err, "addr", address) - backoff.Wait() - continue - } - - if err := sp.querierLoop(c, address); err != nil { - level.Error(sp.log).Log("msg", "error processing requests from scheduler", "err", err, "addr", address) - backoff.Wait() - continue - } - - backoff.Reset() - } -} - -// process loops processing requests on an established stream. -func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_QuerierLoopClient, address string) error { - // Build a child context so we can cancel a query when the stream is closed. - ctx, cancel := context.WithCancel(c.Context()) - defer cancel() - - for { - request, err := c.Recv() - if err != nil { - return err - } - - // Handle the request on a "background" goroutine, so we go back to - // blocking on c.Recv(). This allows us to detect the stream closing - // and cancel the query. We don't actually handle queries in parallel - // here, as we're running in lock step with the server - each Recv is - // paired with a Send. - go func() { - // We need to inject user into context for sending response back. - ctx := user.InjectOrgID(ctx, request.UserID) - - tracer := opentracing.GlobalTracer() - // Ignore errors here. If we cannot get parent span, we just don't create new one. - parentSpanContext, _ := httpgrpcutil.GetParentSpanForRequest(tracer, request.HttpRequest) - if parentSpanContext != nil { - queueSpan, spanCtx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, "querier_processor_runRequest", opentracing.ChildOf(parentSpanContext)) - defer queueSpan.Finish() - - ctx = spanCtx - } - logger := util_log.WithContext(ctx, sp.log) - - sp.runRequest(ctx, logger, request.QueryID, request.FrontendAddress, request.StatsEnabled, request.HttpRequest) - - // Report back to scheduler that processing of the query has finished. - if err := c.Send(&schedulerpb.QuerierToScheduler{}); err != nil { - level.Error(logger).Log("msg", "error notifying scheduler about finished query", "err", err, "addr", address) - } - }() - } -} - -func (sp *schedulerProcessor) runRequest(ctx context.Context, logger log.Logger, queryID uint64, frontendAddress string, statsEnabled bool, request *httpgrpc.HTTPRequest) { - var stats *querier_stats.Stats - if statsEnabled { - stats, ctx = querier_stats.ContextWithEmptyStats(ctx) - } - - response, err := sp.handler.Handle(ctx, request) - if err != nil { - var ok bool - response, ok = httpgrpc.HTTPResponseFromError(err) - if !ok { - response = &httpgrpc.HTTPResponse{ - Code: http.StatusInternalServerError, - Body: []byte(err.Error()), - } - } - } - - // Ensure responses that are too big are not retried. - if len(response.Body) >= sp.maxMessageSize { - level.Error(logger).Log("msg", "response larger than max message size", "size", len(response.Body), "maxMessageSize", sp.maxMessageSize) - - errMsg := fmt.Sprintf("response larger than the max message size (%d vs %d)", len(response.Body), sp.maxMessageSize) - response = &httpgrpc.HTTPResponse{ - Code: http.StatusRequestEntityTooLarge, - Body: []byte(errMsg), - } - } - - c, err := sp.frontendPool.GetClientFor(frontendAddress) - if err == nil { - // Response is empty and uninteresting. - _, err = c.(frontendv2pb.FrontendForQuerierClient).QueryResult(ctx, &frontendv2pb.QueryResultRequest{ - QueryID: queryID, - HttpResponse: response, - Stats: stats, - }) - } - if err != nil { - level.Error(logger).Log("msg", "error notifying frontend about finished query", "err", err, "frontend", frontendAddress) - } -} - -func (sp *schedulerProcessor) createFrontendClient(addr string) (client.PoolClient, error) { - opts, err := sp.grpcConfig.DialOption([]grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), - middleware.ClientUserHeaderInterceptor, - cortexmiddleware.PrometheusGRPCUnaryInstrumentation(sp.frontendClientRequestDuration), - }, nil) - - if err != nil { - return nil, err - } - - conn, err := grpc.Dial(addr, opts...) - if err != nil { - return nil, err - } - - return &frontendClient{ - FrontendForQuerierClient: frontendv2pb.NewFrontendForQuerierClient(conn), - HealthClient: grpc_health_v1.NewHealthClient(conn), - conn: conn, - }, nil -} - -type frontendClient struct { - frontendv2pb.FrontendForQuerierClient - grpc_health_v1.HealthClient - conn *grpc.ClientConn -} - -func (fc *frontendClient) Close() error { - return fc.conn.Close() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go deleted file mode 100644 index 49c807bce..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go +++ /dev/null @@ -1,272 +0,0 @@ -package worker - -import ( - "context" - "flag" - "os" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/httpgrpc" - "google.golang.org/grpc" - - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/services" -) - -type Config struct { - FrontendAddress string `yaml:"frontend_address"` - SchedulerAddress string `yaml:"scheduler_address"` - DNSLookupPeriod time.Duration `yaml:"dns_lookup_duration"` - - Parallelism int `yaml:"parallelism"` - MatchMaxConcurrency bool `yaml:"match_max_concurrent"` - MaxConcurrentRequests int `yaml:"-"` // Must be same as passed to PromQL Engine. - - QuerierID string `yaml:"id"` - - GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` -} - -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.SchedulerAddress, "querier.scheduler-address", "", "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.") - f.StringVar(&cfg.FrontendAddress, "querier.frontend-address", "", "Address of query frontend service, in host:port format. If -querier.scheduler-address is set as well, querier will use scheduler instead. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.") - - f.DurationVar(&cfg.DNSLookupPeriod, "querier.dns-lookup-period", 10*time.Second, "How often to query DNS for query-frontend or query-scheduler address.") - - f.IntVar(&cfg.Parallelism, "querier.worker-parallelism", 10, "Number of simultaneous queries to process per query-frontend or query-scheduler.") - f.BoolVar(&cfg.MatchMaxConcurrency, "querier.worker-match-max-concurrent", false, "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.") - f.StringVar(&cfg.QuerierID, "querier.id", "", "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.") - - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) -} - -func (cfg *Config) Validate(log log.Logger) error { - if cfg.FrontendAddress != "" && cfg.SchedulerAddress != "" { - return errors.New("frontend address and scheduler address are mutually exclusive, please use only one") - } - return cfg.GRPCClientConfig.Validate(log) -} - -// Handler for HTTP requests wrapped in protobuf messages. -type RequestHandler interface { - Handle(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) -} - -// Single processor handles all streaming operations to query-frontend or query-scheduler to fetch queries -// and process them. -type processor interface { - // Each invocation of processQueriesOnSingleStream starts new streaming operation to query-frontend - // or query-scheduler to fetch queries and execute them. - // - // This method must react on context being finished, and stop when that happens. - // - // processorManager (not processor) is responsible for starting as many goroutines as needed for each connection. - processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) - - // notifyShutdown notifies the remote query-frontend or query-scheduler that the querier is - // shutting down. - notifyShutdown(ctx context.Context, conn *grpc.ClientConn, address string) -} - -type querierWorker struct { - *services.BasicService - - cfg Config - log log.Logger - - processor processor - - subservices *services.Manager - - mu sync.Mutex - // Set to nil when stop is called... no more managers are created afterwards. - managers map[string]*processorManager -} - -func NewQuerierWorker(cfg Config, handler RequestHandler, log log.Logger, reg prometheus.Registerer) (services.Service, error) { - if cfg.QuerierID == "" { - hostname, err := os.Hostname() - if err != nil { - return nil, errors.Wrap(err, "failed to get hostname for configuring querier ID") - } - cfg.QuerierID = hostname - } - - var processor processor - var servs []services.Service - var address string - - switch { - case cfg.SchedulerAddress != "": - level.Info(log).Log("msg", "Starting querier worker connected to query-scheduler", "scheduler", cfg.SchedulerAddress) - - address = cfg.SchedulerAddress - processor, servs = newSchedulerProcessor(cfg, handler, log, reg) - - case cfg.FrontendAddress != "": - level.Info(log).Log("msg", "Starting querier worker connected to query-frontend", "frontend", cfg.FrontendAddress) - - address = cfg.FrontendAddress - processor = newFrontendProcessor(cfg, handler, log) - - default: - return nil, errors.New("no query-scheduler or query-frontend address") - } - - return newQuerierWorkerWithProcessor(cfg, log, processor, address, servs) -} - -func newQuerierWorkerWithProcessor(cfg Config, log log.Logger, processor processor, address string, servs []services.Service) (*querierWorker, error) { - f := &querierWorker{ - cfg: cfg, - log: log, - managers: map[string]*processorManager{}, - processor: processor, - } - - // Empty address is only used in tests, where individual targets are added manually. - if address != "" { - w, err := util.NewDNSWatcher(address, cfg.DNSLookupPeriod, f) - if err != nil { - return nil, err - } - - servs = append(servs, w) - } - - if len(servs) > 0 { - subservices, err := services.NewManager(servs...) - if err != nil { - return nil, errors.Wrap(err, "querier worker subservices") - } - - f.subservices = subservices - } - - f.BasicService = services.NewIdleService(f.starting, f.stopping) - return f, nil -} - -func (w *querierWorker) starting(ctx context.Context) error { - if w.subservices == nil { - return nil - } - return services.StartManagerAndAwaitHealthy(ctx, w.subservices) -} - -func (w *querierWorker) stopping(_ error) error { - // Stop all goroutines fetching queries. Note that in Stopping state, - // worker no longer creates new managers in AddressAdded method. - w.mu.Lock() - for _, m := range w.managers { - m.stop() - } - w.mu.Unlock() - - if w.subservices == nil { - return nil - } - - // Stop DNS watcher and services used by processor. - return services.StopManagerAndAwaitStopped(context.Background(), w.subservices) -} - -func (w *querierWorker) AddressAdded(address string) { - ctx := w.ServiceContext() - if ctx == nil || ctx.Err() != nil { - return - } - - w.mu.Lock() - defer w.mu.Unlock() - - if m := w.managers[address]; m != nil { - return - } - - level.Info(w.log).Log("msg", "adding connection", "addr", address) - conn, err := w.connect(context.Background(), address) - if err != nil { - level.Error(w.log).Log("msg", "error connecting", "addr", address, "err", err) - return - } - - w.managers[address] = newProcessorManager(ctx, w.processor, conn, address) - // Called with lock. - w.resetConcurrency() -} - -func (w *querierWorker) AddressRemoved(address string) { - level.Info(w.log).Log("msg", "removing connection", "addr", address) - - w.mu.Lock() - p := w.managers[address] - delete(w.managers, address) - // Called with lock. - w.resetConcurrency() - w.mu.Unlock() - - if p != nil { - p.stop() - } -} - -// Must be called with lock. -func (w *querierWorker) resetConcurrency() { - totalConcurrency := 0 - index := 0 - - for _, m := range w.managers { - concurrency := 0 - - if w.cfg.MatchMaxConcurrency { - concurrency = w.cfg.MaxConcurrentRequests / len(w.managers) - - // If max concurrency does not evenly divide into our frontends a subset will be chosen - // to receive an extra connection. Frontend addresses were shuffled above so this will be a - // random selection of frontends. - if index < w.cfg.MaxConcurrentRequests%len(w.managers) { - level.Warn(w.log).Log("msg", "max concurrency is not evenly divisible across targets, adding an extra connection", "addr", m.address) - concurrency++ - } - } else { - concurrency = w.cfg.Parallelism - } - - // If concurrency is 0 then MaxConcurrentRequests is less than the total number of - // frontends/schedulers. In order to prevent accidentally starving a frontend or scheduler we are just going to - // always connect once to every target. This is dangerous b/c we may start exceeding PromQL - // max concurrency. - if concurrency == 0 { - concurrency = 1 - } - - totalConcurrency += concurrency - m.concurrency(concurrency) - index++ - } - - if totalConcurrency > w.cfg.MaxConcurrentRequests { - level.Warn(w.log).Log("msg", "total worker concurrency is greater than promql max concurrency. Queries may be queued in the querier which reduces QOS") - } -} - -func (w *querierWorker) connect(ctx context.Context, address string) (*grpc.ClientConn, error) { - // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. - opts, err := w.cfg.GRPCClientConfig.DialOption(nil, nil) - if err != nil { - return nil, err - } - - conn, err := grpc.DialContext(ctx, address, opts...) - if err != nil { - return nil, err - } - return conn, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go deleted file mode 100644 index 4d912f402..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go +++ /dev/null @@ -1,205 +0,0 @@ -package client - -import ( - "context" - "fmt" - "io" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/user" - "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/cortexproject/cortex/pkg/ring/util" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// PoolClient is the interface that should be implemented by a -// client managed by the pool. -type PoolClient interface { - grpc_health_v1.HealthClient - io.Closer -} - -// PoolFactory defines the signature for a client factory. -type PoolFactory func(addr string) (PoolClient, error) - -// PoolServiceDiscovery defines the signature of a function returning the list -// of known service endpoints. This function is used to remove stale clients from -// the pool (a stale client is a client connected to a service endpoint no more -// active). -type PoolServiceDiscovery func() ([]string, error) - -// PoolConfig is config for creating a Pool. -type PoolConfig struct { - CheckInterval time.Duration - HealthCheckEnabled bool - HealthCheckTimeout time.Duration -} - -// Pool holds a cache of grpc_health_v1 clients. -type Pool struct { - services.Service - - cfg PoolConfig - discovery PoolServiceDiscovery - factory PoolFactory - logger log.Logger - clientName string - - sync.RWMutex - clients map[string]PoolClient - - clientsMetric prometheus.Gauge -} - -// NewPool creates a new Pool. -func NewPool(clientName string, cfg PoolConfig, discovery PoolServiceDiscovery, factory PoolFactory, clientsMetric prometheus.Gauge, logger log.Logger) *Pool { - p := &Pool{ - cfg: cfg, - discovery: discovery, - factory: factory, - logger: logger, - clientName: clientName, - clients: map[string]PoolClient{}, - clientsMetric: clientsMetric, - } - - p.Service = services. - NewTimerService(cfg.CheckInterval, nil, p.iteration, nil). - WithName(fmt.Sprintf("%s client pool", p.clientName)) - return p -} - -func (p *Pool) iteration(ctx context.Context) error { - p.removeStaleClients() - if p.cfg.HealthCheckEnabled { - p.cleanUnhealthy() - } - return nil -} - -func (p *Pool) fromCache(addr string) (PoolClient, bool) { - p.RLock() - defer p.RUnlock() - client, ok := p.clients[addr] - return client, ok -} - -// GetClientFor gets the client for the specified address. If it does not exist it will make a new client -// at that address -func (p *Pool) GetClientFor(addr string) (PoolClient, error) { - client, ok := p.fromCache(addr) - if ok { - return client, nil - } - - p.Lock() - defer p.Unlock() - client, ok = p.clients[addr] - if ok { - return client, nil - } - - client, err := p.factory(addr) - if err != nil { - return nil, err - } - p.clients[addr] = client - if p.clientsMetric != nil { - p.clientsMetric.Add(1) - } - return client, nil -} - -// RemoveClientFor removes the client with the specified address -func (p *Pool) RemoveClientFor(addr string) { - p.Lock() - defer p.Unlock() - client, ok := p.clients[addr] - if ok { - delete(p.clients, addr) - if p.clientsMetric != nil { - p.clientsMetric.Add(-1) - } - // Close in the background since this operation may take awhile and we have a mutex - go func(addr string, closer PoolClient) { - if err := closer.Close(); err != nil { - level.Error(p.logger).Log("msg", fmt.Sprintf("error closing connection to %s", p.clientName), "addr", addr, "err", err) - } - }(addr, client) - } -} - -// RegisteredAddresses returns all the service addresses for which there's an active client. -func (p *Pool) RegisteredAddresses() []string { - result := []string{} - p.RLock() - defer p.RUnlock() - for addr := range p.clients { - result = append(result, addr) - } - return result -} - -// Count returns how many clients are in the cache -func (p *Pool) Count() int { - p.RLock() - defer p.RUnlock() - return len(p.clients) -} - -func (p *Pool) removeStaleClients() { - // Only if service discovery has been configured. - if p.discovery == nil { - return - } - - serviceAddrs, err := p.discovery() - if err != nil { - level.Error(p.logger).Log("msg", "error removing stale clients", "err", err) - return - } - - for _, addr := range p.RegisteredAddresses() { - if util.StringsContain(serviceAddrs, addr) { - continue - } - level.Info(p.logger).Log("msg", "removing stale client", "addr", addr) - p.RemoveClientFor(addr) - } -} - -// cleanUnhealthy loops through all servers and deletes any that fails a healthcheck. -func (p *Pool) cleanUnhealthy() { - for _, addr := range p.RegisteredAddresses() { - client, ok := p.fromCache(addr) - // not ok means someone removed a client between the start of this loop and now - if ok { - err := healthCheck(client, p.cfg.HealthCheckTimeout) - if err != nil { - level.Warn(p.logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err) - p.RemoveClientFor(addr) - } - } - } -} - -// healthCheck will check if the client is still healthy, returning an error if it is not -func healthCheck(client PoolClient, timeout time.Duration) error { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - ctx = user.InjectOrgID(ctx, "0") - - resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) - if err != nil { - return err - } - if resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { - return fmt.Errorf("failing healthcheck status: %s", resp.Status) - } - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go deleted file mode 100644 index 797b171c0..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go +++ /dev/null @@ -1,25 +0,0 @@ -package client - -import ( - "errors" - - "github.com/cortexproject/cortex/pkg/ring" -) - -func NewRingServiceDiscovery(r ring.ReadRing) PoolServiceDiscovery { - return func() ([]string, error) { - replicationSet, err := r.GetAllHealthy(ring.Reporting) - if errors.Is(err, ring.ErrEmptyRing) { - return nil, nil - } - if err != nil { - return nil, err - } - - var addrs []string - for _, instance := range replicationSet.Instances { - addrs = append(addrs, instance.Addr) - } - return addrs, nil - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go deleted file mode 100644 index e8dde218f..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go +++ /dev/null @@ -1,553 +0,0 @@ -package ruler - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gorilla/mux" - "github.com/pkg/errors" - v1 "github.com/prometheus/client_golang/api/prometheus/v1" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/rulefmt" - "github.com/weaveworks/common/user" - "gopkg.in/yaml.v3" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ruler/rulespb" - "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/tenant" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// In order to reimplement the prometheus rules API, a large amount of code was copied over -// This is required because the prometheus api implementation does not allow us to return errors -// on rule lookups, which might fail in Cortex's case. - -type response struct { - Status string `json:"status"` - Data interface{} `json:"data"` - ErrorType v1.ErrorType `json:"errorType"` - Error string `json:"error"` -} - -// AlertDiscovery has info for all active alerts. -type AlertDiscovery struct { - Alerts []*Alert `json:"alerts"` -} - -// Alert has info for an alert. -type Alert struct { - Labels labels.Labels `json:"labels"` - Annotations labels.Labels `json:"annotations"` - State string `json:"state"` - ActiveAt *time.Time `json:"activeAt"` - Value string `json:"value"` -} - -// RuleDiscovery has info for all rules -type RuleDiscovery struct { - RuleGroups []*RuleGroup `json:"groups"` -} - -// RuleGroup has info for rules which are part of a group -type RuleGroup struct { - Name string `json:"name"` - File string `json:"file"` - // In order to preserve rule ordering, while exposing type (alerting or recording) - // specific properties, both alerting and recording rules are exposed in the - // same array. - Rules []rule `json:"rules"` - Interval float64 `json:"interval"` - LastEvaluation time.Time `json:"lastEvaluation"` - EvaluationTime float64 `json:"evaluationTime"` -} - -type rule interface{} - -type alertingRule struct { - // State can be "pending", "firing", "inactive". - State string `json:"state"` - Name string `json:"name"` - Query string `json:"query"` - Duration float64 `json:"duration"` - Labels labels.Labels `json:"labels"` - Annotations labels.Labels `json:"annotations"` - Alerts []*Alert `json:"alerts"` - Health string `json:"health"` - LastError string `json:"lastError"` - Type v1.RuleType `json:"type"` - LastEvaluation time.Time `json:"lastEvaluation"` - EvaluationTime float64 `json:"evaluationTime"` -} - -type recordingRule struct { - Name string `json:"name"` - Query string `json:"query"` - Labels labels.Labels `json:"labels"` - Health string `json:"health"` - LastError string `json:"lastError"` - Type v1.RuleType `json:"type"` - LastEvaluation time.Time `json:"lastEvaluation"` - EvaluationTime float64 `json:"evaluationTime"` -} - -func respondError(logger log.Logger, w http.ResponseWriter, msg string) { - b, err := json.Marshal(&response{ - Status: "error", - ErrorType: v1.ErrServer, - Error: msg, - Data: nil, - }) - - if err != nil { - level.Error(logger).Log("msg", "error marshaling json response", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusInternalServerError) - if n, err := w.Write(b); err != nil { - level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) - } -} - -// API is used to handle HTTP requests for the ruler service -type API struct { - ruler *Ruler - store rulestore.RuleStore - - logger log.Logger -} - -// NewAPI returns a new API struct with the provided ruler and rule store -func NewAPI(r *Ruler, s rulestore.RuleStore, logger log.Logger) *API { - return &API{ - ruler: r, - store: s, - logger: logger, - } -} - -func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), a.logger) - userID, err := tenant.TenantID(req.Context()) - if err != nil || userID == "" { - level.Error(logger).Log("msg", "error extracting org id from context", "err", err) - respondError(logger, w, "no valid org id found") - return - } - - w.Header().Set("Content-Type", "application/json") - rgs, err := a.ruler.GetRules(req.Context()) - - if err != nil { - respondError(logger, w, err.Error()) - return - } - - groups := make([]*RuleGroup, 0, len(rgs)) - - for _, g := range rgs { - grp := RuleGroup{ - Name: g.Group.Name, - File: g.Group.Namespace, - Rules: make([]rule, len(g.ActiveRules)), - Interval: g.Group.Interval.Seconds(), - LastEvaluation: g.GetEvaluationTimestamp(), - EvaluationTime: g.GetEvaluationDuration().Seconds(), - } - - for i, rl := range g.ActiveRules { - if g.ActiveRules[i].Rule.Alert != "" { - alerts := make([]*Alert, 0, len(rl.Alerts)) - for _, a := range rl.Alerts { - alerts = append(alerts, &Alert{ - Labels: cortexpb.FromLabelAdaptersToLabels(a.Labels), - Annotations: cortexpb.FromLabelAdaptersToLabels(a.Annotations), - State: a.GetState(), - ActiveAt: &a.ActiveAt, - Value: strconv.FormatFloat(a.Value, 'e', -1, 64), - }) - } - grp.Rules[i] = alertingRule{ - State: rl.GetState(), - Name: rl.Rule.GetAlert(), - Query: rl.Rule.GetExpr(), - Duration: rl.Rule.For.Seconds(), - Labels: cortexpb.FromLabelAdaptersToLabels(rl.Rule.Labels), - Annotations: cortexpb.FromLabelAdaptersToLabels(rl.Rule.Annotations), - Alerts: alerts, - Health: rl.GetHealth(), - LastError: rl.GetLastError(), - LastEvaluation: rl.GetEvaluationTimestamp(), - EvaluationTime: rl.GetEvaluationDuration().Seconds(), - Type: v1.RuleTypeAlerting, - } - } else { - grp.Rules[i] = recordingRule{ - Name: rl.Rule.GetRecord(), - Query: rl.Rule.GetExpr(), - Labels: cortexpb.FromLabelAdaptersToLabels(rl.Rule.Labels), - Health: rl.GetHealth(), - LastError: rl.GetLastError(), - LastEvaluation: rl.GetEvaluationTimestamp(), - EvaluationTime: rl.GetEvaluationDuration().Seconds(), - Type: v1.RuleTypeRecording, - } - } - } - groups = append(groups, &grp) - } - - // keep data.groups are in order - sort.Slice(groups, func(i, j int) bool { - return groups[i].File < groups[j].File - }) - - b, err := json.Marshal(&response{ - Status: "success", - Data: &RuleDiscovery{RuleGroups: groups}, - }) - if err != nil { - level.Error(logger).Log("msg", "error marshaling json response", "err", err) - respondError(logger, w, "unable to marshal the requested data") - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if n, err := w.Write(b); err != nil { - level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) - } -} - -func (a *API) PrometheusAlerts(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), a.logger) - userID, err := tenant.TenantID(req.Context()) - if err != nil || userID == "" { - level.Error(logger).Log("msg", "error extracting org id from context", "err", err) - respondError(logger, w, "no valid org id found") - return - } - - w.Header().Set("Content-Type", "application/json") - rgs, err := a.ruler.GetRules(req.Context()) - - if err != nil { - respondError(logger, w, err.Error()) - return - } - - alerts := []*Alert{} - - for _, g := range rgs { - for _, rl := range g.ActiveRules { - if rl.Rule.Alert != "" { - for _, a := range rl.Alerts { - alerts = append(alerts, &Alert{ - Labels: cortexpb.FromLabelAdaptersToLabels(a.Labels), - Annotations: cortexpb.FromLabelAdaptersToLabels(a.Annotations), - State: a.GetState(), - ActiveAt: &a.ActiveAt, - Value: strconv.FormatFloat(a.Value, 'e', -1, 64), - }) - } - } - } - } - - b, err := json.Marshal(&response{ - Status: "success", - Data: &AlertDiscovery{Alerts: alerts}, - }) - if err != nil { - level.Error(logger).Log("msg", "error marshaling json response", "err", err) - respondError(logger, w, "unable to marshal the requested data") - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if n, err := w.Write(b); err != nil { - level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) - } -} - -var ( - // ErrNoNamespace signals that no namespace was specified in the request - ErrNoNamespace = errors.New("a namespace must be provided in the request") - // ErrNoGroupName signals a group name url parameter was not found - ErrNoGroupName = errors.New("a matching group name must be provided in the request") - // ErrNoRuleGroups signals the rule group requested does not exist - ErrNoRuleGroups = errors.New("no rule groups found") - // ErrBadRuleGroup is returned when the provided rule group can not be unmarshalled - ErrBadRuleGroup = errors.New("unable to decoded rule group") -) - -func marshalAndSend(output interface{}, w http.ResponseWriter, logger log.Logger) { - d, err := yaml.Marshal(&output) - if err != nil { - level.Error(logger).Log("msg", "error marshalling yaml rule groups", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/yaml") - if _, err := w.Write(d); err != nil { - level.Error(logger).Log("msg", "error writing yaml response", "err", err) - return - } -} - -func respondAccepted(w http.ResponseWriter, logger log.Logger) { - b, err := json.Marshal(&response{ - Status: "success", - }) - if err != nil { - level.Error(logger).Log("msg", "error marshaling json response", "err", err) - respondError(logger, w, "unable to marshal the requested data") - return - } - w.Header().Set("Content-Type", "application/json") - - // Return a status accepted because the rule has been stored and queued for polling, but is not currently active - w.WriteHeader(http.StatusAccepted) - if n, err := w.Write(b); err != nil { - level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) - } -} - -// parseNamespace parses the namespace from the provided set of params, in this -// api these params are derived from the url path -func parseNamespace(params map[string]string) (string, error) { - namespace, exists := params["namespace"] - if !exists { - return "", ErrNoNamespace - } - - namespace, err := url.PathUnescape(namespace) - if err != nil { - return "", err - } - - return namespace, nil -} - -// parseGroupName parses the group name from the provided set of params, in this -// api these params are derived from the url path -func parseGroupName(params map[string]string) (string, error) { - groupName, exists := params["groupName"] - if !exists { - return "", ErrNoGroupName - } - - groupName, err := url.PathUnescape(groupName) - if err != nil { - return "", err - } - - return groupName, nil -} - -// parseRequest parses the incoming request to parse out the userID, rules namespace, and rule group name -// and returns them in that order. It also allows users to require a namespace or group name and return -// an error if it they can not be parsed. -func parseRequest(req *http.Request, requireNamespace, requireGroup bool) (string, string, string, error) { - userID, err := tenant.TenantID(req.Context()) - if err != nil { - return "", "", "", user.ErrNoOrgID - } - - vars := mux.Vars(req) - - namespace, err := parseNamespace(vars) - if err != nil { - if err != ErrNoNamespace || requireNamespace { - return "", "", "", err - } - } - - group, err := parseGroupName(vars) - if err != nil { - if err != ErrNoGroupName || requireGroup { - return "", "", "", err - } - } - - return userID, namespace, group, nil -} - -func (a *API) ListRules(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), a.logger) - - userID, namespace, _, err := parseRequest(req, false, false) - if err != nil { - respondError(logger, w, err.Error()) - return - } - - level.Debug(logger).Log("msg", "retrieving rule groups with namespace", "userID", userID, "namespace", namespace) - rgs, err := a.store.ListRuleGroupsForUserAndNamespace(req.Context(), userID, namespace) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - if len(rgs) == 0 { - level.Info(logger).Log("msg", "no rule groups found", "userID", userID) - http.Error(w, ErrNoRuleGroups.Error(), http.StatusNotFound) - return - } - - err = a.store.LoadRuleGroups(req.Context(), map[string]rulespb.RuleGroupList{userID: rgs}) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - level.Debug(logger).Log("msg", "retrieved rule groups from rule store", "userID", userID, "num_namespaces", len(rgs)) - - formatted := rgs.Formatted() - marshalAndSend(formatted, w, logger) -} - -func (a *API) GetRuleGroup(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), a.logger) - userID, namespace, groupName, err := parseRequest(req, true, true) - if err != nil { - respondError(logger, w, err.Error()) - return - } - - rg, err := a.store.GetRuleGroup(req.Context(), userID, namespace, groupName) - if err != nil { - if errors.Is(err, rulestore.ErrGroupNotFound) { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - formatted := rulespb.FromProto(rg) - marshalAndSend(formatted, w, logger) -} - -func (a *API) CreateRuleGroup(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), a.logger) - userID, namespace, _, err := parseRequest(req, true, false) - if err != nil { - respondError(logger, w, err.Error()) - return - } - - payload, err := ioutil.ReadAll(req.Body) - if err != nil { - level.Error(logger).Log("msg", "unable to read rule group payload", "err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - level.Debug(logger).Log("msg", "attempting to unmarshal rulegroup", "userID", userID, "group", string(payload)) - - rg := rulefmt.RuleGroup{} - err = yaml.Unmarshal(payload, &rg) - if err != nil { - level.Error(logger).Log("msg", "unable to unmarshal rule group payload", "err", err.Error()) - http.Error(w, ErrBadRuleGroup.Error(), http.StatusBadRequest) - return - } - - errs := a.ruler.manager.ValidateRuleGroup(rg) - if len(errs) > 0 { - e := []string{} - for _, err := range errs { - level.Error(logger).Log("msg", "unable to validate rule group payload", "err", err.Error()) - e = append(e, err.Error()) - } - - http.Error(w, strings.Join(e, ", "), http.StatusBadRequest) - return - } - - if err := a.ruler.AssertMaxRulesPerRuleGroup(userID, len(rg.Rules)); err != nil { - level.Error(logger).Log("msg", "limit validation failure", "err", err.Error(), "user", userID) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - rgs, err := a.store.ListRuleGroupsForUserAndNamespace(req.Context(), userID, "") - if err != nil { - level.Error(logger).Log("msg", "unable to fetch current rule groups for validation", "err", err.Error(), "user", userID) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if err := a.ruler.AssertMaxRuleGroups(userID, len(rgs)+1); err != nil { - level.Error(logger).Log("msg", "limit validation failure", "err", err.Error(), "user", userID) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - rgProto := rulespb.ToProto(userID, namespace, rg) - - level.Debug(logger).Log("msg", "attempting to store rulegroup", "userID", userID, "group", rgProto.String()) - err = a.store.SetRuleGroup(req.Context(), userID, namespace, rgProto) - if err != nil { - level.Error(logger).Log("msg", "unable to store rule group", "err", err.Error()) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - respondAccepted(w, logger) -} - -func (a *API) DeleteNamespace(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), a.logger) - - userID, namespace, _, err := parseRequest(req, true, false) - if err != nil { - respondError(logger, w, err.Error()) - return - } - - err = a.store.DeleteNamespace(req.Context(), userID, namespace) - if err != nil { - if err == rulestore.ErrGroupNamespaceNotFound { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - respondError(logger, w, err.Error()) - return - } - - respondAccepted(w, logger) -} - -func (a *API) DeleteRuleGroup(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), a.logger) - - userID, namespace, groupName, err := parseRequest(req, true, true) - if err != nil { - respondError(logger, w, err.Error()) - return - } - - err = a.store.DeleteRuleGroup(req.Context(), userID, namespace, groupName) - if err != nil { - if err == rulestore.ErrGroupNotFound { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - respondError(logger, w, err.Error()) - return - } - - respondAccepted(w, logger) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/client_pool.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/client_pool.go deleted file mode 100644 index 238adfdb7..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/client_pool.go +++ /dev/null @@ -1,101 +0,0 @@ -package ruler - -import ( - "time" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// ClientsPool is the interface used to get the client from the pool for a specified address. -type ClientsPool interface { - services.Service - // GetClientFor returns the ruler client for the given address. - GetClientFor(addr string) (RulerClient, error) -} - -type rulerClientsPool struct { - *client.Pool -} - -func (p *rulerClientsPool) GetClientFor(addr string) (RulerClient, error) { - c, err := p.Pool.GetClientFor(addr) - if err != nil { - return nil, err - } - return c.(RulerClient), nil -} - -func newRulerClientPool(clientCfg grpcclient.Config, logger log.Logger, reg prometheus.Registerer) ClientsPool { - // We prefer sane defaults instead of exposing further config options. - poolCfg := client.PoolConfig{ - CheckInterval: time.Minute, - HealthCheckEnabled: true, - HealthCheckTimeout: 10 * time.Second, - } - - clientsCount := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ruler_clients", - Help: "The current number of ruler clients in the pool.", - }) - - return &rulerClientsPool{ - client.NewPool("ruler", poolCfg, nil, newRulerClientFactory(clientCfg, reg), clientsCount, logger), - } -} - -func newRulerClientFactory(clientCfg grpcclient.Config, reg prometheus.Registerer) client.PoolFactory { - requestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Name: "cortex_ruler_client_request_duration_seconds", - Help: "Time spent executing requests to the ruler.", - Buckets: prometheus.ExponentialBuckets(0.008, 4, 7), - }, []string{"operation", "status_code"}) - - return func(addr string) (client.PoolClient, error) { - return dialRulerClient(clientCfg, addr, requestDuration) - } -} - -func dialRulerClient(clientCfg grpcclient.Config, addr string, requestDuration *prometheus.HistogramVec) (*rulerExtendedClient, error) { - opts, err := clientCfg.DialOption(grpcclient.Instrument(requestDuration)) - if err != nil { - return nil, err - } - - conn, err := grpc.Dial(addr, opts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to dial ruler %s", addr) - } - - return &rulerExtendedClient{ - RulerClient: NewRulerClient(conn), - HealthClient: grpc_health_v1.NewHealthClient(conn), - conn: conn, - }, nil -} - -type rulerExtendedClient struct { - RulerClient - grpc_health_v1.HealthClient - conn *grpc.ClientConn -} - -func (c *rulerExtendedClient) Close() error { - return c.conn.Close() -} - -func (c *rulerExtendedClient) String() string { - return c.RemoteAddress() -} - -func (c *rulerExtendedClient) RemoteAddress() string { - return c.conn.Target() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go deleted file mode 100644 index c355f1596..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go +++ /dev/null @@ -1,296 +0,0 @@ -package ruler - -import ( - "context" - "errors" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/notifier" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/rules" - "github.com/prometheus/prometheus/storage" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/querier" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -// Pusher is an ingester server that accepts pushes. -type Pusher interface { - Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) -} - -type PusherAppender struct { - failedWrites prometheus.Counter - totalWrites prometheus.Counter - - ctx context.Context - pusher Pusher - labels []labels.Labels - samples []cortexpb.Sample - userID string - evaluationDelay time.Duration -} - -func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - a.labels = append(a.labels, l) - - // Adapt staleness markers for ruler evaluation delay. As the upstream code - // is using the actual time, when there is a no longer available series. - // This then causes 'out of order' append failures once the series is - // becoming available again. - // see https://github.com/prometheus/prometheus/blob/6c56a1faaaad07317ff585bda75b99bdba0517ad/rules/manager.go#L647-L660 - // Similar to staleness markers, the rule manager also appends actual time to the ALERTS and ALERTS_FOR_STATE series. - // See: https://github.com/prometheus/prometheus/blob/ae086c73cb4d6db9e8b67d5038d3704fea6aec4a/rules/alerting.go#L414-L417 - metricName := l.Get(labels.MetricName) - if a.evaluationDelay > 0 && (value.IsStaleNaN(v) || metricName == "ALERTS" || metricName == "ALERTS_FOR_STATE") { - t -= a.evaluationDelay.Milliseconds() - } - - a.samples = append(a.samples, cortexpb.Sample{ - TimestampMs: t, - Value: v, - }) - return 0, nil -} - -func (a *PusherAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { - return 0, errors.New("exemplars are unsupported") -} - -func (a *PusherAppender) Commit() error { - a.totalWrites.Inc() - - // Since a.pusher is distributor, client.ReuseSlice will be called in a.pusher.Push. - // We shouldn't call client.ReuseSlice here. - _, err := a.pusher.Push(user.InjectOrgID(a.ctx, a.userID), cortexpb.ToWriteRequest(a.labels, a.samples, nil, cortexpb.RULE)) - - if err != nil { - // Don't report errors that ended with 4xx HTTP status code (series limits, duplicate samples, out of order, etc.) - if resp, ok := httpgrpc.HTTPResponseFromError(err); !ok || resp.Code/100 != 4 { - a.failedWrites.Inc() - } - } - - a.labels = nil - a.samples = nil - return err -} - -func (a *PusherAppender) Rollback() error { - a.labels = nil - a.samples = nil - return nil -} - -// PusherAppendable fulfills the storage.Appendable interface for prometheus manager -type PusherAppendable struct { - pusher Pusher - userID string - rulesLimits RulesLimits - - totalWrites prometheus.Counter - failedWrites prometheus.Counter -} - -func NewPusherAppendable(pusher Pusher, userID string, limits RulesLimits, totalWrites, failedWrites prometheus.Counter) *PusherAppendable { - return &PusherAppendable{ - pusher: pusher, - userID: userID, - rulesLimits: limits, - totalWrites: totalWrites, - failedWrites: failedWrites, - } -} - -// Appender returns a storage.Appender -func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { - return &PusherAppender{ - failedWrites: t.failedWrites, - totalWrites: t.totalWrites, - - ctx: ctx, - pusher: t.pusher, - userID: t.userID, - evaluationDelay: t.rulesLimits.EvaluationDelay(t.userID), - } -} - -// RulesLimits defines limits used by Ruler. -type RulesLimits interface { - EvaluationDelay(userID string) time.Duration - RulerTenantShardSize(userID string) int - RulerMaxRuleGroupsPerTenant(userID string) int - RulerMaxRulesPerRuleGroup(userID string) int -} - -// EngineQueryFunc returns a new query function using the rules.EngineQueryFunc function -// and passing an altered timestamp. -func EngineQueryFunc(engine *promql.Engine, q storage.Queryable, overrides RulesLimits, userID string) rules.QueryFunc { - return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - orig := rules.EngineQueryFunc(engine, q) - // Delay the evaluation of all rules by a set interval to give a buffer - // to metric that haven't been forwarded to cortex yet. - evaluationDelay := overrides.EvaluationDelay(userID) - return orig(ctx, qs, t.Add(-evaluationDelay)) - } -} - -func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Counter) rules.QueryFunc { - return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - queries.Inc() - result, err := qf(ctx, qs, t) - - // We only care about errors returned by underlying Queryable. Errors returned by PromQL engine are "user-errors", - // and not interesting here. - qerr := QueryableError{} - if err != nil && errors.As(err, &qerr) { - origErr := qerr.Unwrap() - - // Not all errors returned by Queryable are interesting, only those that would result in 500 status code. - // - // We rely on TranslateToPromqlApiError to do its job here... it returns nil, if err is nil. - // It returns promql.ErrStorage, if error should be reported back as 500. - // Other errors it returns are either for canceled or timed-out queriers (we're not reporting those as failures), - // or various user-errors (limits, duplicate samples, etc. ... also not failures). - // - // All errors will still be counted towards "evaluation failures" metrics and logged by Prometheus Ruler, - // but we only want internal errors here. - if _, ok := querier.TranslateToPromqlAPIError(origErr).(promql.ErrStorage); ok { - failedQueries.Inc() - } - - // Return unwrapped error. - return result, origErr - } - - return result, err - } -} - -func RecordAndReportRuleQueryMetrics(qf rules.QueryFunc, queryTime prometheus.Counter, logger log.Logger) rules.QueryFunc { - if queryTime == nil { - return qf - } - - return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - // If we've been passed a counter we want to record the wall time spent executing this request. - timer := prometheus.NewTimer(nil) - defer func() { - querySeconds := timer.ObserveDuration().Seconds() - queryTime.Add(querySeconds) - - // Log ruler query stats. - logMessage := []interface{}{ - "msg", "query stats", - "component", "ruler", - "cortex_ruler_query_seconds_total", querySeconds, - "query", qs, - } - level.Info(util_log.WithContext(ctx, logger)).Log(logMessage...) - }() - - result, err := qf(ctx, qs, t) - return result, err - } -} - -// This interface mimicks rules.Manager API. Interface is used to simplify tests. -type RulesManager interface { - // Starts rules manager. Blocks until Stop is called. - Run() - - // Stops rules manager. (Unblocks Run.) - Stop() - - // Updates rules manager state. - Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc rules.RuleGroupPostProcessFunc) error - - // Returns current rules groups. - RuleGroups() []*rules.Group -} - -// ManagerFactory is a function that creates new RulesManager for given user and notifier.Manager. -type ManagerFactory func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager - -func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engine *promql.Engine, overrides RulesLimits, reg prometheus.Registerer) ManagerFactory { - totalWrites := promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ruler_write_requests_total", - Help: "Number of write requests to ingesters.", - }) - failedWrites := promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ruler_write_requests_failed_total", - Help: "Number of failed write requests to ingesters.", - }) - - totalQueries := promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ruler_queries_total", - Help: "Number of queries executed by ruler.", - }) - failedQueries := promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ruler_queries_failed_total", - Help: "Number of failed queries by ruler.", - }) - var rulerQuerySeconds *prometheus.CounterVec - if cfg.EnableQueryStats { - rulerQuerySeconds = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ruler_query_seconds_total", - Help: "Total amount of wall clock time spent processing queries by the ruler.", - }, []string{"user"}) - } - - // Wrap errors returned by Queryable to our wrapper, so that we can distinguish between those errors - // and errors returned by PromQL engine. Errors from Queryable can be either caused by user (limits) or internal errors. - // Errors from PromQL are always "user" errors. - q = querier.NewErrorTranslateQueryableWithFn(q, WrapQueryableErrors) - - return func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager { - var queryTime prometheus.Counter = nil - if rulerQuerySeconds != nil { - queryTime = rulerQuerySeconds.WithLabelValues(userID) - } - - return rules.NewManager(&rules.ManagerOptions{ - Appendable: NewPusherAppendable(p, userID, overrides, totalWrites, failedWrites), - Queryable: q, - QueryFunc: RecordAndReportRuleQueryMetrics(MetricsQueryFunc(EngineQueryFunc(engine, q, overrides, userID), totalQueries, failedQueries), queryTime, logger), - Context: user.InjectOrgID(ctx, userID), - ExternalURL: cfg.ExternalURL.URL, - NotifyFunc: SendAlerts(notifier, cfg.ExternalURL.URL.String()), - Logger: log.With(logger, "user", userID), - Registerer: reg, - OutageTolerance: cfg.OutageTolerance, - ForGracePeriod: cfg.ForGracePeriod, - ResendDelay: cfg.ResendDelay, - }) - } -} - -type QueryableError struct { - err error -} - -func (q QueryableError) Unwrap() error { - return q.err -} - -func (q QueryableError) Error() string { - return q.err.Error() -} - -func WrapQueryableErrors(err error) error { - if err == nil { - return err - } - - return QueryableError{err: err} -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go deleted file mode 100644 index 65bb4bf71..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go +++ /dev/null @@ -1,28 +0,0 @@ -package ruler - -import ( - "github.com/cortexproject/cortex/pkg/ring" -) - -func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { - // When we initialize the ruler instance in the ring we want to start from - // a clean situation, so whatever is the state we set it ACTIVE, while we keep existing - // tokens (if any). - var tokens []uint32 - if instanceExists { - tokens = instanceDesc.GetTokens() - } - - takenTokens := ringDesc.GetTokens() - newTokens := ring.GenerateTokens(r.cfg.Ring.NumTokens-len(tokens), takenTokens) - - // Tokens sorting will be enforced by the parent caller. - tokens = append(tokens, newTokens...) - - return ring.ACTIVE, tokens -} - -func (r *Ruler) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} -func (r *Ruler) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go deleted file mode 100644 index 4527c562c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go +++ /dev/null @@ -1,301 +0,0 @@ -package ruler - -import ( - "context" - "fmt" - "net/http" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - ot "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/model/rulefmt" - "github.com/prometheus/prometheus/notifier" - promRules "github.com/prometheus/prometheus/rules" - "github.com/weaveworks/common/user" - "golang.org/x/net/context/ctxhttp" - - "github.com/cortexproject/cortex/pkg/ruler/rulespb" -) - -type DefaultMultiTenantManager struct { - cfg Config - notifierCfg *config.Config - managerFactory ManagerFactory - - mapper *mapper - - // Structs for holding per-user Prometheus rules Managers - // and a corresponding metrics struct - userManagerMtx sync.Mutex - userManagers map[string]RulesManager - userManagerMetrics *ManagerMetrics - - // Per-user notifiers with separate queues. - notifiersMtx sync.Mutex - notifiers map[string]*rulerNotifier - - managersTotal prometheus.Gauge - lastReloadSuccessful *prometheus.GaugeVec - lastReloadSuccessfulTimestamp *prometheus.GaugeVec - configUpdatesTotal *prometheus.CounterVec - registry prometheus.Registerer - logger log.Logger -} - -func NewDefaultMultiTenantManager(cfg Config, managerFactory ManagerFactory, reg prometheus.Registerer, logger log.Logger) (*DefaultMultiTenantManager, error) { - ncfg, err := buildNotifierConfig(&cfg) - if err != nil { - return nil, err - } - - userManagerMetrics := NewManagerMetrics(cfg.DisableRuleGroupLabel) - if reg != nil { - reg.MustRegister(userManagerMetrics) - } - - return &DefaultMultiTenantManager{ - cfg: cfg, - notifierCfg: ncfg, - managerFactory: managerFactory, - notifiers: map[string]*rulerNotifier{}, - mapper: newMapper(cfg.RulePath, logger), - userManagers: map[string]RulesManager{}, - userManagerMetrics: userManagerMetrics, - managersTotal: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "ruler_managers_total", - Help: "Total number of managers registered and running in the ruler", - }), - lastReloadSuccessful: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "ruler_config_last_reload_successful", - Help: "Boolean set to 1 whenever the last configuration reload attempt was successful.", - }, []string{"user"}), - lastReloadSuccessfulTimestamp: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "ruler_config_last_reload_successful_seconds", - Help: "Timestamp of the last successful configuration reload.", - }, []string{"user"}), - configUpdatesTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "ruler_config_updates_total", - Help: "Total number of config updates triggered by a user", - }, []string{"user"}), - registry: reg, - logger: logger, - }, nil -} - -func (r *DefaultMultiTenantManager) SyncRuleGroups(ctx context.Context, ruleGroups map[string]rulespb.RuleGroupList) { - // A lock is taken to ensure if this function is called concurrently, then each call - // returns after the call map files and check for updates - r.userManagerMtx.Lock() - defer r.userManagerMtx.Unlock() - - for userID, ruleGroup := range ruleGroups { - r.syncRulesToManager(ctx, userID, ruleGroup) - } - - // Check for deleted users and remove them - for userID, mngr := range r.userManagers { - if _, exists := ruleGroups[userID]; !exists { - go mngr.Stop() - delete(r.userManagers, userID) - - r.removeNotifier(userID) - r.mapper.cleanupUser(userID) - r.lastReloadSuccessful.DeleteLabelValues(userID) - r.lastReloadSuccessfulTimestamp.DeleteLabelValues(userID) - r.configUpdatesTotal.DeleteLabelValues(userID) - r.userManagerMetrics.RemoveUserRegistry(userID) - level.Info(r.logger).Log("msg", "deleted rule manager and local rule files", "user", userID) - } - } - - r.managersTotal.Set(float64(len(r.userManagers))) -} - -// syncRulesToManager maps the rule files to disk, detects any changes and will create/update the -// the users Prometheus Rules Manager. -func (r *DefaultMultiTenantManager) syncRulesToManager(ctx context.Context, user string, groups rulespb.RuleGroupList) { - // Map the files to disk and return the file names to be passed to the users manager if they - // have been updated - update, files, err := r.mapper.MapRules(user, groups.Formatted()) - if err != nil { - r.lastReloadSuccessful.WithLabelValues(user).Set(0) - level.Error(r.logger).Log("msg", "unable to map rule files", "user", user, "err", err) - return - } - - manager, exists := r.userManagers[user] - if !exists || update { - level.Debug(r.logger).Log("msg", "updating rules", "user", user) - r.configUpdatesTotal.WithLabelValues(user).Inc() - if !exists { - level.Debug(r.logger).Log("msg", "creating rule manager for user", "user", user) - manager, err = r.newManager(ctx, user) - if err != nil { - r.lastReloadSuccessful.WithLabelValues(user).Set(0) - level.Error(r.logger).Log("msg", "unable to create rule manager", "user", user, "err", err) - return - } - // manager.Run() starts running the manager and blocks until Stop() is called. - // Hence run it as another goroutine. - go manager.Run() - r.userManagers[user] = manager - } - err = manager.Update(r.cfg.EvaluationInterval, files, r.cfg.ExternalLabels, r.cfg.ExternalURL.String(), nil) - if err != nil { - r.lastReloadSuccessful.WithLabelValues(user).Set(0) - level.Error(r.logger).Log("msg", "unable to update rule manager", "user", user, "err", err) - return - } - - r.lastReloadSuccessful.WithLabelValues(user).Set(1) - r.lastReloadSuccessfulTimestamp.WithLabelValues(user).SetToCurrentTime() - } -} - -// newManager creates a prometheus rule manager wrapped with a user id -// configured storage, appendable, notifier, and instrumentation -func (r *DefaultMultiTenantManager) newManager(ctx context.Context, userID string) (RulesManager, error) { - // Create a new Prometheus registry and register it within - // our metrics struct for the provided user if it doesn't already exist. - reg := prometheus.NewRegistry() - r.userManagerMetrics.AddUserRegistry(userID, reg) - - notifier, err := r.getOrCreateNotifier(userID, reg) - if err != nil { - return nil, err - } - - return r.managerFactory(ctx, userID, notifier, r.logger, reg), nil -} - -func (r *DefaultMultiTenantManager) removeNotifier(userID string) { - r.notifiersMtx.Lock() - defer r.notifiersMtx.Unlock() - - if n, ok := r.notifiers[userID]; ok { - n.stop() - } - - delete(r.notifiers, userID) -} - -func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string, userManagerRegistry prometheus.Registerer) (*notifier.Manager, error) { - r.notifiersMtx.Lock() - defer r.notifiersMtx.Unlock() - - n, ok := r.notifiers[userID] - if ok { - // When there is a stale user, we stop the notifier but do not remove it - n.run() - return n.notifier, nil - } - - n = newRulerNotifier(¬ifier.Options{ - QueueCapacity: r.cfg.NotificationQueueCapacity, - Registerer: userManagerRegistry, - Do: func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - // Note: The passed-in context comes from the Prometheus notifier - // and does *not* contain the userID. So it needs to be added to the context - // here before using the context to inject the userID into the HTTP request. - ctx = user.InjectOrgID(ctx, userID) - if err := user.InjectOrgIDIntoHTTPRequest(ctx, req); err != nil { - return nil, err - } - // Jaeger complains the passed-in context has an invalid span ID, so start a new root span - sp := ot.GlobalTracer().StartSpan("notify", ot.Tag{Key: "organization", Value: userID}) - defer sp.Finish() - ctx = ot.ContextWithSpan(ctx, sp) - _ = ot.GlobalTracer().Inject(sp.Context(), ot.HTTPHeaders, ot.HTTPHeadersCarrier(req.Header)) - return ctxhttp.Do(ctx, client, req) - }, - }, log.With(r.logger, "user", userID)) - - n.run() - - // This should never fail, unless there's a programming mistake. - if err := n.applyConfig(r.notifierCfg); err != nil { - return nil, err - } - - r.notifiers[userID] = n - return n.notifier, nil -} - -func (r *DefaultMultiTenantManager) GetRules(userID string) []*promRules.Group { - var groups []*promRules.Group - r.userManagerMtx.Lock() - if mngr, exists := r.userManagers[userID]; exists { - groups = mngr.RuleGroups() - } - r.userManagerMtx.Unlock() - return groups -} - -func (r *DefaultMultiTenantManager) Stop() { - r.notifiersMtx.Lock() - for _, n := range r.notifiers { - n.stop() - } - r.notifiersMtx.Unlock() - - level.Info(r.logger).Log("msg", "stopping user managers") - wg := sync.WaitGroup{} - r.userManagerMtx.Lock() - for user, manager := range r.userManagers { - level.Debug(r.logger).Log("msg", "shutting down user manager", "user", user) - wg.Add(1) - go func(manager RulesManager, user string) { - manager.Stop() - wg.Done() - level.Debug(r.logger).Log("msg", "user manager shut down", "user", user) - }(manager, user) - } - wg.Wait() - r.userManagerMtx.Unlock() - level.Info(r.logger).Log("msg", "all user managers stopped") - - // cleanup user rules directories - r.mapper.cleanup() -} - -func (*DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []error { - var errs []error - - if g.Name == "" { - errs = append(errs, errors.New("invalid rules config: rule group name must not be empty")) - return errs - } - - if len(g.Rules) == 0 { - errs = append(errs, fmt.Errorf("invalid rules config: rule group '%s' has no rules", g.Name)) - return errs - } - - for i, r := range g.Rules { - for _, err := range r.Validate() { - var ruleName string - if r.Alert.Value != "" { - ruleName = r.Alert.Value - } else { - ruleName = r.Record.Value - } - errs = append(errs, &rulefmt.Error{ - Group: g.Name, - Rule: i, - RuleName: ruleName, - Err: err, - }) - } - } - - return errs -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go deleted file mode 100644 index 7bb3d43c9..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go +++ /dev/null @@ -1,224 +0,0 @@ -package ruler - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/util" -) - -// ManagerMetrics aggregates metrics exported by the Prometheus -// rules package and returns them as Cortex metrics -type ManagerMetrics struct { - regs *util.UserRegistries - disableRuleGroupLabel bool - - EvalDuration *prometheus.Desc - IterationDuration *prometheus.Desc - IterationsMissed *prometheus.Desc - IterationsScheduled *prometheus.Desc - EvalTotal *prometheus.Desc - EvalFailures *prometheus.Desc - GroupInterval *prometheus.Desc - GroupLastEvalTime *prometheus.Desc - GroupLastDuration *prometheus.Desc - GroupRules *prometheus.Desc - GroupLastEvalSamples *prometheus.Desc - - NotificationLatency *prometheus.Desc - NotificationErrors *prometheus.Desc - NotificationSent *prometheus.Desc - NotificationDropped *prometheus.Desc - NotificationQueueLength *prometheus.Desc - NotificationQueueCapacity *prometheus.Desc - AlertmanagersDiscovered *prometheus.Desc -} - -// NewManagerMetrics returns a ManagerMetrics struct -func NewManagerMetrics(disableRuleGroupLabel bool) *ManagerMetrics { - commonLabels := []string{"user"} - if !disableRuleGroupLabel { - commonLabels = append(commonLabels, "rule_group") - } - return &ManagerMetrics{ - regs: util.NewUserRegistries(), - disableRuleGroupLabel: disableRuleGroupLabel, - - EvalDuration: prometheus.NewDesc( - "cortex_prometheus_rule_evaluation_duration_seconds", - "The duration for a rule to execute.", - []string{"user"}, - nil, - ), - IterationDuration: prometheus.NewDesc( - "cortex_prometheus_rule_group_duration_seconds", - "The duration of rule group evaluations.", - []string{"user"}, - nil, - ), - IterationsMissed: prometheus.NewDesc( - "cortex_prometheus_rule_group_iterations_missed_total", - "The total number of rule group evaluations missed due to slow rule group evaluation.", - commonLabels, - nil, - ), - IterationsScheduled: prometheus.NewDesc( - "cortex_prometheus_rule_group_iterations_total", - "The total number of scheduled rule group evaluations, whether executed or missed.", - commonLabels, - nil, - ), - EvalTotal: prometheus.NewDesc( - "cortex_prometheus_rule_evaluations_total", - "The total number of rule evaluations.", - commonLabels, - nil, - ), - EvalFailures: prometheus.NewDesc( - "cortex_prometheus_rule_evaluation_failures_total", - "The total number of rule evaluation failures.", - commonLabels, - nil, - ), - GroupInterval: prometheus.NewDesc( - "cortex_prometheus_rule_group_interval_seconds", - "The interval of a rule group.", - commonLabels, - nil, - ), - GroupLastEvalTime: prometheus.NewDesc( - "cortex_prometheus_rule_group_last_evaluation_timestamp_seconds", - "The timestamp of the last rule group evaluation in seconds.", - commonLabels, - nil, - ), - GroupLastDuration: prometheus.NewDesc( - "cortex_prometheus_rule_group_last_duration_seconds", - "The duration of the last rule group evaluation.", - commonLabels, - nil, - ), - GroupRules: prometheus.NewDesc( - "cortex_prometheus_rule_group_rules", - "The number of rules.", - commonLabels, - nil, - ), - GroupLastEvalSamples: prometheus.NewDesc( - "cortex_prometheus_last_evaluation_samples", - "The number of samples returned during the last rule group evaluation.", - commonLabels, - nil, - ), - - // Prometheus' ruler's notification metrics - NotificationLatency: prometheus.NewDesc( - "cortex_prometheus_notifications_latency_seconds", - "Latency quantiles for sending alert notifications.", - []string{"user"}, - nil, - ), - - NotificationErrors: prometheus.NewDesc( - "cortex_prometheus_notifications_errors_total", - "Total number of errors sending alert notifications.", - []string{"user", "alertmanager"}, - nil, - ), - NotificationSent: prometheus.NewDesc( - "cortex_prometheus_notifications_sent_total", - "Total number of alerts sent.", - []string{"user", "alertmanager"}, - nil, - ), - NotificationDropped: prometheus.NewDesc( - "cortex_prometheus_notifications_dropped_total", - "Total number of alerts dropped due to errors when sending to Alertmanager.", - []string{"user"}, - nil, - ), - NotificationQueueLength: prometheus.NewDesc( - "cortex_prometheus_notifications_queue_length", - "The number of alert notifications in the queue.", - []string{"user"}, - nil, - ), - NotificationQueueCapacity: prometheus.NewDesc( - "cortex_prometheus_notifications_queue_capacity", - "The capacity of the alert notifications queue.", - []string{"user"}, - nil, - ), - AlertmanagersDiscovered: prometheus.NewDesc( - "cortex_prometheus_notifications_alertmanagers_discovered", - "The number of alertmanagers discovered and active.", - []string{"user"}, - nil, - ), - } -} - -// AddUserRegistry adds a user-specific Prometheus registry. -func (m *ManagerMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { - m.regs.AddUserRegistry(user, reg) -} - -// RemoveUserRegistry removes user-specific Prometheus registry. -func (m *ManagerMetrics) RemoveUserRegistry(user string) { - m.regs.RemoveUserRegistry(user, true) -} - -// Describe implements the Collector interface -func (m *ManagerMetrics) Describe(out chan<- *prometheus.Desc) { - out <- m.EvalDuration - out <- m.IterationDuration - out <- m.IterationsMissed - out <- m.IterationsScheduled - out <- m.EvalTotal - out <- m.EvalFailures - out <- m.GroupInterval - out <- m.GroupLastEvalTime - out <- m.GroupLastDuration - out <- m.GroupRules - out <- m.GroupLastEvalSamples - - out <- m.NotificationLatency - out <- m.NotificationErrors - out <- m.NotificationSent - out <- m.NotificationDropped - out <- m.NotificationQueueLength - out <- m.NotificationQueueCapacity - out <- m.AlertmanagersDiscovered -} - -// Collect implements the Collector interface -func (m *ManagerMetrics) Collect(out chan<- prometheus.Metric) { - data := m.regs.BuildMetricFamiliesPerUser() - labels := []string{} - if !m.disableRuleGroupLabel { - labels = append(labels, "rule_group") - } - // WARNING: It is important that all metrics generated in this method are "Per User". - // Thanks to that we can actually *remove* metrics for given user (see RemoveUserRegistry). - // If same user is later re-added, all metrics will start from 0, which is fine. - - data.SendSumOfSummariesPerUser(out, m.EvalDuration, "prometheus_rule_evaluation_duration_seconds") - data.SendSumOfSummariesPerUser(out, m.IterationDuration, "prometheus_rule_group_duration_seconds") - - data.SendSumOfCountersPerUserWithLabels(out, m.IterationsMissed, "prometheus_rule_group_iterations_missed_total", labels...) - data.SendSumOfCountersPerUserWithLabels(out, m.IterationsScheduled, "prometheus_rule_group_iterations_total", labels...) - data.SendSumOfCountersPerUserWithLabels(out, m.EvalTotal, "prometheus_rule_evaluations_total", labels...) - data.SendSumOfCountersPerUserWithLabels(out, m.EvalFailures, "prometheus_rule_evaluation_failures_total", labels...) - data.SendSumOfGaugesPerUserWithLabels(out, m.GroupInterval, "prometheus_rule_group_interval_seconds", labels...) - data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastEvalTime, "prometheus_rule_group_last_evaluation_timestamp_seconds", labels...) - data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastDuration, "prometheus_rule_group_last_duration_seconds", labels...) - data.SendSumOfGaugesPerUserWithLabels(out, m.GroupRules, "prometheus_rule_group_rules", labels...) - data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastEvalSamples, "prometheus_rule_group_last_evaluation_samples", labels...) - - data.SendSumOfSummariesPerUser(out, m.NotificationLatency, "prometheus_notifications_latency_seconds") - data.SendSumOfCountersPerUserWithLabels(out, m.NotificationErrors, "prometheus_notifications_errors_total", "alertmanager") - data.SendSumOfCountersPerUserWithLabels(out, m.NotificationSent, "prometheus_notifications_sent_total", "alertmanager") - data.SendSumOfCountersPerUser(out, m.NotificationDropped, "prometheus_notifications_dropped_total") - data.SendSumOfGaugesPerUser(out, m.NotificationQueueLength, "prometheus_notifications_queue_length") - data.SendSumOfGaugesPerUser(out, m.NotificationQueueCapacity, "prometheus_notifications_queue_capacity") - data.SendSumOfGaugesPerUser(out, m.AlertmanagersDiscovered, "prometheus_notifications_alertmanagers_discovered") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go deleted file mode 100644 index c3b715b7e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go +++ /dev/null @@ -1,161 +0,0 @@ -package ruler - -import ( - "crypto/md5" - "net/url" - "path/filepath" - "sort" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/rulefmt" - "github.com/spf13/afero" - "gopkg.in/yaml.v3" -) - -// mapper is designed to enusre the provided rule sets are identical -// to the on-disk rules tracked by the prometheus manager -type mapper struct { - Path string // Path specifies the directory in which rule files will be mapped. - - FS afero.Fs - logger log.Logger -} - -func newMapper(path string, logger log.Logger) *mapper { - m := &mapper{ - Path: path, - FS: afero.NewOsFs(), - logger: logger, - } - m.cleanup() - - return m -} - -func (m *mapper) cleanupUser(userID string) { - dirPath := filepath.Join(m.Path, userID) - err := m.FS.RemoveAll(dirPath) - if err != nil { - level.Warn(m.logger).Log("msg", "unable to remove user directory", "path", dirPath, "err", err) - } -} - -// cleanup removes all of the user directories in the path of the mapper -func (m *mapper) cleanup() { - level.Info(m.logger).Log("msg", "cleaning up mapped rules directory", "path", m.Path) - - users, err := m.users() - if err != nil { - level.Error(m.logger).Log("msg", "unable to read rules directory", "path", m.Path, "err", err) - return - } - - for _, u := range users { - m.cleanupUser(u) - } -} - -func (m *mapper) users() ([]string, error) { - var result []string - - dirs, err := afero.ReadDir(m.FS, m.Path) - for _, u := range dirs { - if u.IsDir() { - result = append(result, u.Name()) - } - } - - return result, err -} - -func (m *mapper) MapRules(user string, ruleConfigs map[string][]rulefmt.RuleGroup) (bool, []string, error) { - anyUpdated := false - filenames := []string{} - - // user rule files will be stored as `///` - path := filepath.Join(m.Path, user) - err := m.FS.MkdirAll(path, 0777) - if err != nil { - return false, nil, err - } - - // write all rule configs to disk - for filename, groups := range ruleConfigs { - // Store the encoded file name to better handle `/` characters - encodedFileName := url.PathEscape(filename) - fullFileName := filepath.Join(path, encodedFileName) - - fileUpdated, err := m.writeRuleGroupsIfNewer(groups, fullFileName) - if err != nil { - return false, nil, err - } - filenames = append(filenames, fullFileName) - anyUpdated = anyUpdated || fileUpdated - } - - // and clean any up that shouldn't exist - existingFiles, err := afero.ReadDir(m.FS, path) - if err != nil { - return false, nil, err - } - - for _, existingFile := range existingFiles { - fullFileName := filepath.Join(path, existingFile.Name()) - - // Ensure the namespace is decoded from a url path encoding to see if it is still required - decodedNamespace, err := url.PathUnescape(existingFile.Name()) - if err != nil { - level.Warn(m.logger).Log("msg", "unable to remove rule file on disk", "file", fullFileName, "err", err) - continue - } - - ruleGroups := ruleConfigs[string(decodedNamespace)] - - if ruleGroups == nil { - err = m.FS.Remove(fullFileName) - if err != nil { - level.Warn(m.logger).Log("msg", "unable to remove rule file on disk", "file", fullFileName, "err", err) - } - anyUpdated = true - } - } - - return anyUpdated, filenames, nil -} - -func (m *mapper) writeRuleGroupsIfNewer(groups []rulefmt.RuleGroup, filename string) (bool, error) { - sort.Slice(groups, func(i, j int) bool { - return groups[i].Name > groups[j].Name - }) - - rgs := rulefmt.RuleGroups{Groups: groups} - - d, err := yaml.Marshal(&rgs) - if err != nil { - return false, err - } - - _, err = m.FS.Stat(filename) - if err == nil { - current, err := afero.ReadFile(m.FS, filename) - if err != nil { - return false, err - } - newHash := md5.New() - currentHash := md5.New() - - // bailout if there is no update - if string(currentHash.Sum(current)) == string(newHash.Sum(d)) { - return false, nil - } - } - - level.Info(m.logger).Log("msg", "updating rule file", "file", filename) - err = afero.WriteFile(m.FS, filename, d, 0777) - if err != nil { - return false, err - } - - return true, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go deleted file mode 100644 index b8fa7536c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go +++ /dev/null @@ -1,201 +0,0 @@ -package ruler - -import ( - "context" - "flag" - "fmt" - "net/url" - "regexp" - "strings" - "sync" - - gklog "github.com/go-kit/log" - "github.com/go-kit/log/level" - config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/dns" - "github.com/prometheus/prometheus/notifier" - - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/tls" -) - -type NotifierConfig struct { - TLS tls.ClientConfig `yaml:",inline"` - BasicAuth util.BasicAuth `yaml:",inline"` -} - -func (cfg *NotifierConfig) RegisterFlags(f *flag.FlagSet) { - cfg.TLS.RegisterFlagsWithPrefix("ruler.alertmanager-client", f) - cfg.BasicAuth.RegisterFlagsWithPrefix("ruler.alertmanager-client.", f) -} - -// rulerNotifier bundles a notifier.Manager together with an associated -// Alertmanager service discovery manager and handles the lifecycle -// of both actors. -type rulerNotifier struct { - notifier *notifier.Manager - sdCancel context.CancelFunc - sdManager *discovery.Manager - wg sync.WaitGroup - logger gklog.Logger -} - -func newRulerNotifier(o *notifier.Options, l gklog.Logger) *rulerNotifier { - sdCtx, sdCancel := context.WithCancel(context.Background()) - return &rulerNotifier{ - notifier: notifier.NewManager(o, l), - sdCancel: sdCancel, - sdManager: discovery.NewManager(sdCtx, l), - logger: l, - } -} - -// run starts the notifier. This function doesn't block and returns immediately. -func (rn *rulerNotifier) run() { - rn.wg.Add(2) - go func() { - if err := rn.sdManager.Run(); err != nil { - level.Error(rn.logger).Log("msg", "error starting notifier discovery manager", "err", err) - } - rn.wg.Done() - }() - go func() { - rn.notifier.Run(rn.sdManager.SyncCh()) - rn.wg.Done() - }() -} - -func (rn *rulerNotifier) applyConfig(cfg *config.Config) error { - if err := rn.notifier.ApplyConfig(cfg); err != nil { - return err - } - - sdCfgs := make(map[string]discovery.Configs) - for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() { - sdCfgs[k] = v.ServiceDiscoveryConfigs - } - return rn.sdManager.ApplyConfig(sdCfgs) -} - -func (rn *rulerNotifier) stop() { - rn.sdCancel() - rn.notifier.Stop() - rn.wg.Wait() -} - -// Builds a Prometheus config.Config from a ruler.Config with just the required -// options to configure notifications to Alertmanager. -func buildNotifierConfig(rulerConfig *Config) (*config.Config, error) { - amURLs := strings.Split(rulerConfig.AlertmanagerURL, ",") - validURLs := make([]*url.URL, 0, len(amURLs)) - - srvDNSregexp := regexp.MustCompile(`^_.+._.+`) - for _, h := range amURLs { - url, err := url.Parse(h) - if err != nil { - return nil, err - } - - if url.String() == "" { - continue - } - - // Given we only support SRV lookups as part of service discovery, we need to ensure - // hosts provided follow this specification: _service._proto.name - // e.g. _http._tcp.alertmanager.com - if rulerConfig.AlertmanagerDiscovery && !srvDNSregexp.MatchString(url.Host) { - return nil, fmt.Errorf("when alertmanager-discovery is on, host name must be of the form _portname._tcp.service.fqdn (is %q)", url.Host) - } - - validURLs = append(validURLs, url) - } - - if len(validURLs) == 0 { - return &config.Config{}, nil - } - - apiVersion := config.AlertmanagerAPIVersionV1 - if rulerConfig.AlertmanangerEnableV2API { - apiVersion = config.AlertmanagerAPIVersionV2 - } - - amConfigs := make([]*config.AlertmanagerConfig, 0, len(validURLs)) - for _, url := range validURLs { - amConfigs = append(amConfigs, amConfigFromURL(rulerConfig, url, apiVersion)) - } - - promConfig := &config.Config{ - GlobalConfig: config.GlobalConfig{ - ExternalLabels: rulerConfig.ExternalLabels, - }, - AlertingConfig: config.AlertingConfig{ - AlertmanagerConfigs: amConfigs, - }, - } - - return promConfig, nil -} - -func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.AlertmanagerAPIVersion) *config.AlertmanagerConfig { - var sdConfig discovery.Configs - if rulerConfig.AlertmanagerDiscovery { - sdConfig = discovery.Configs{ - &dns.SDConfig{ - Names: []string{url.Host}, - RefreshInterval: model.Duration(rulerConfig.AlertmanagerRefreshInterval), - Type: "SRV", - Port: 0, // Ignored, because of SRV. - }, - } - - } else { - sdConfig = discovery.Configs{ - discovery.StaticConfig{ - { - Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(url.Host)}}, - }, - }, - } - } - - amConfig := &config.AlertmanagerConfig{ - APIVersion: apiVersion, - Scheme: url.Scheme, - PathPrefix: url.Path, - Timeout: model.Duration(rulerConfig.NotificationTimeout), - ServiceDiscoveryConfigs: sdConfig, - HTTPClientConfig: config_util.HTTPClientConfig{ - TLSConfig: config_util.TLSConfig{ - CAFile: rulerConfig.Notifier.TLS.CAPath, - CertFile: rulerConfig.Notifier.TLS.CertPath, - KeyFile: rulerConfig.Notifier.TLS.KeyPath, - InsecureSkipVerify: rulerConfig.Notifier.TLS.InsecureSkipVerify, - ServerName: rulerConfig.Notifier.TLS.ServerName, - }, - }, - } - - // Check the URL for basic authentication information first - if url.User != nil { - amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{ - Username: url.User.Username(), - } - - if password, isSet := url.User.Password(); isSet { - amConfig.HTTPClientConfig.BasicAuth.Password = config_util.Secret(password) - } - } - - // Override URL basic authentication configs with hard coded config values if present - if rulerConfig.Notifier.BasicAuth.IsEnabled() { - amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{ - Username: rulerConfig.Notifier.BasicAuth.Username, - Password: config_util.Secret(rulerConfig.Notifier.BasicAuth.Password), - } - } - - return amConfig -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go deleted file mode 100644 index ebb8c864a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ /dev/null @@ -1,909 +0,0 @@ -package ruler - -import ( - "context" - "flag" - "fmt" - "hash/fnv" - "net/http" - "net/url" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/rulefmt" - "github.com/prometheus/prometheus/notifier" - promRules "github.com/prometheus/prometheus/rules" - "github.com/prometheus/prometheus/util/strutil" - "github.com/weaveworks/common/user" - "golang.org/x/sync/errgroup" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/ruler/rulespb" - "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/concurrency" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} - - // Validation errors. - errInvalidShardingStrategy = errors.New("invalid sharding strategy") - errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") -) - -const ( - // ringKey is the key under which we store the rulers ring in the KVStore. - ringKey = "ring" - - // Number of concurrent group list and group loads operations. - loadRulesConcurrency = 10 - fetchRulesConcurrency = 16 - - rulerSyncReasonInitial = "initial" - rulerSyncReasonPeriodic = "periodic" - rulerSyncReasonRingChange = "ring-change" - - // Limit errors - errMaxRuleGroupsPerUserLimitExceeded = "per-user rule groups limit (limit: %d actual: %d) exceeded" - errMaxRulesPerRuleGroupPerUserLimitExceeded = "per-user rules per rule group limit (limit: %d actual: %d) exceeded" - - // errors - errListAllUser = "unable to list the ruler users" -) - -// Config is the configuration for the recording rules server. -type Config struct { - // This is used for template expansion in alerts; must be a valid URL. - ExternalURL flagext.URLValue `yaml:"external_url"` - // Labels to add to all alerts - ExternalLabels labels.Labels `yaml:"external_labels,omitempty" doc:"nocli|description=Labels to add to all alerts."` - // GRPC Client configuration. - ClientTLSConfig grpcclient.Config `yaml:"ruler_client"` - // How frequently to evaluate rules by default. - EvaluationInterval time.Duration `yaml:"evaluation_interval"` - // How frequently to poll for updated rules. - PollInterval time.Duration `yaml:"poll_interval"` - // Rule Storage and Polling configuration. - StoreConfig RuleStoreConfig `yaml:"storage" doc:"description=Deprecated. Use -ruler-storage.* CLI flags and their respective YAML config options instead."` - // Path to store rule files for prom manager. - RulePath string `yaml:"rule_path"` - - // URL of the Alertmanager to send notifications to. - // If your are configuring the ruler to send to a Cortex Alertmanager, - // ensure this includes any path set in the Alertmanager external URL. - AlertmanagerURL string `yaml:"alertmanager_url"` - // Whether to use DNS SRV records to discover Alertmanager. - AlertmanagerDiscovery bool `yaml:"enable_alertmanager_discovery"` - // How long to wait between refreshing the list of Alertmanager based on DNS service discovery. - AlertmanagerRefreshInterval time.Duration `yaml:"alertmanager_refresh_interval"` - // Enables the ruler notifier to use the Alertmananger V2 API. - AlertmanangerEnableV2API bool `yaml:"enable_alertmanager_v2"` - // Capacity of the queue for notifications to be sent to the Alertmanager. - NotificationQueueCapacity int `yaml:"notification_queue_capacity"` - // HTTP timeout duration when sending notifications to the Alertmanager. - NotificationTimeout time.Duration `yaml:"notification_timeout"` - // Client configs for interacting with the Alertmanager - Notifier NotifierConfig `yaml:"alertmanager_client"` - - // Max time to tolerate outage for restoring "for" state of alert. - OutageTolerance time.Duration `yaml:"for_outage_tolerance"` - // Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. - ForGracePeriod time.Duration `yaml:"for_grace_period"` - // Minimum amount of time to wait before resending an alert to Alertmanager. - ResendDelay time.Duration `yaml:"resend_delay"` - - // Enable sharding rule groups. - EnableSharding bool `yaml:"enable_sharding"` - ShardingStrategy string `yaml:"sharding_strategy"` - SearchPendingFor time.Duration `yaml:"search_pending_for"` - Ring RingConfig `yaml:"ring"` - FlushCheckPeriod time.Duration `yaml:"flush_period"` - - EnableAPI bool `yaml:"enable_api"` - - EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` - DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` - - RingCheckPeriod time.Duration `yaml:"-"` - - EnableQueryStats bool `yaml:"query_stats_enabled"` - DisableRuleGroupLabel bool `yaml:"disable_rule_group_label"` -} - -// Validate config and returns error on failure -func (cfg *Config) Validate(limits validation.Limits, log log.Logger) error { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { - return errInvalidShardingStrategy - } - - if cfg.ShardingStrategy == util.ShardingStrategyShuffle && limits.RulerTenantShardSize <= 0 { - return errInvalidTenantShardSize - } - - if err := cfg.StoreConfig.Validate(); err != nil { - return errors.Wrap(err, "invalid storage config") - } - if err := cfg.ClientTLSConfig.Validate(log); err != nil { - return errors.Wrap(err, "invalid ruler gRPC client config") - } - return nil -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.ClientTLSConfig.RegisterFlagsWithPrefix("ruler.client", f) - cfg.StoreConfig.RegisterFlags(f) - cfg.Ring.RegisterFlags(f) - cfg.Notifier.RegisterFlags(f) - - // Deprecated Flags that will be maintained to avoid user disruption - - //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods - flagext.DeprecatedFlag(f, "ruler.client-timeout", "This flag has been renamed to ruler.configs.client-timeout", util_log.Logger) - //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods - flagext.DeprecatedFlag(f, "ruler.group-timeout", "This flag is no longer functional.", util_log.Logger) - //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods - flagext.DeprecatedFlag(f, "ruler.num-workers", "This flag is no longer functional. For increased concurrency horizontal sharding is recommended", util_log.Logger) - - cfg.ExternalURL.URL, _ = url.Parse("") // Must be non-nil - f.Var(&cfg.ExternalURL, "ruler.external.url", "URL of alerts return path.") - f.DurationVar(&cfg.EvaluationInterval, "ruler.evaluation-interval", 1*time.Minute, "How frequently to evaluate rules") - f.DurationVar(&cfg.PollInterval, "ruler.poll-interval", 1*time.Minute, "How frequently to poll for rule changes") - - f.StringVar(&cfg.AlertmanagerURL, "ruler.alertmanager-url", "", "Comma-separated list of URL(s) of the Alertmanager(s) to send notifications to. Each Alertmanager URL is treated as a separate group in the configuration. Multiple Alertmanagers in HA per group can be supported by using DNS resolution via -ruler.alertmanager-discovery.") - f.BoolVar(&cfg.AlertmanagerDiscovery, "ruler.alertmanager-discovery", false, "Use DNS SRV records to discover Alertmanager hosts.") - f.DurationVar(&cfg.AlertmanagerRefreshInterval, "ruler.alertmanager-refresh-interval", 1*time.Minute, "How long to wait between refreshing DNS resolutions of Alertmanager hosts.") - f.BoolVar(&cfg.AlertmanangerEnableV2API, "ruler.alertmanager-use-v2", false, "If enabled requests to Alertmanager will utilize the V2 API.") - f.IntVar(&cfg.NotificationQueueCapacity, "ruler.notification-queue-capacity", 10000, "Capacity of the queue for notifications to be sent to the Alertmanager.") - f.DurationVar(&cfg.NotificationTimeout, "ruler.notification-timeout", 10*time.Second, "HTTP timeout duration when sending notifications to the Alertmanager.") - - f.DurationVar(&cfg.SearchPendingFor, "ruler.search-pending-for", 5*time.Minute, "Time to spend searching for a pending ruler when shutting down.") - f.BoolVar(&cfg.EnableSharding, "ruler.enable-sharding", false, "Distribute rule evaluation using ring backend") - f.StringVar(&cfg.ShardingStrategy, "ruler.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) - f.DurationVar(&cfg.FlushCheckPeriod, "ruler.flush-period", 1*time.Minute, "Period with which to attempt to flush rule groups.") - f.StringVar(&cfg.RulePath, "ruler.rule-path", "/rules", "file path to store temporary rule files for the prometheus rule managers") - f.BoolVar(&cfg.EnableAPI, "experimental.ruler.enable-api", false, "Enable the ruler api") - f.DurationVar(&cfg.OutageTolerance, "ruler.for-outage-tolerance", time.Hour, `Max time to tolerate outage for restoring "for" state of alert.`) - f.DurationVar(&cfg.ForGracePeriod, "ruler.for-grace-period", 10*time.Minute, `Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period.`) - f.DurationVar(&cfg.ResendDelay, "ruler.resend-delay", time.Minute, `Minimum amount of time to wait before resending an alert to Alertmanager.`) - - f.Var(&cfg.EnabledTenants, "ruler.enabled-tenants", "Comma separated list of tenants whose rules this ruler can evaluate. If specified, only these tenants will be handled by ruler, otherwise this ruler can process rules from all tenants. Subject to sharding.") - f.Var(&cfg.DisabledTenants, "ruler.disabled-tenants", "Comma separated list of tenants whose rules this ruler cannot evaluate. If specified, a ruler that would normally pick the specified tenant(s) for processing will ignore them instead. Subject to sharding.") - - f.BoolVar(&cfg.EnableQueryStats, "ruler.query-stats-enabled", false, "Report the wall time for ruler queries to complete as a per user metric and as an info level log message.") - f.BoolVar(&cfg.DisableRuleGroupLabel, "ruler.disable-rule-group-label", false, "Disable the rule_group label on exported metrics") - - cfg.RingCheckPeriod = 5 * time.Second -} - -// MultiTenantManager is the interface of interaction with a Manager that is tenant aware. -type MultiTenantManager interface { - // SyncRuleGroups is used to sync the Manager with rules from the RuleStore. - // If existing user is missing in the ruleGroups map, its ruler manager will be stopped. - SyncRuleGroups(ctx context.Context, ruleGroups map[string]rulespb.RuleGroupList) - // GetRules fetches rules for a particular tenant (userID). - GetRules(userID string) []*promRules.Group - // Stop stops all Manager components. - Stop() - // ValidateRuleGroup validates a rulegroup - ValidateRuleGroup(rulefmt.RuleGroup) []error -} - -// Ruler evaluates rules. -// +---------------------------------------------------------------+ -// | | -// | Query +-------------+ | -// | +------------------> | | -// | | | Store | | -// | | +----------------+ | | -// | | | Rules +-------------+ | -// | | | | -// | | | | -// | | | | -// | +----+-v----+ Filter +------------+ | -// | | +-----------> | | -// | | Ruler | | Ring | | -// | | <-----------+ | | -// | +-------+---+ Rules +------------+ | -// | | | -// | | | -// | | | -// | | Load +-----------------+ | -// | +--------------> | | -// | | Manager | | -// | | | | -// | +-----------------+ | -// | | -// +---------------------------------------------------------------+ -type Ruler struct { - services.Service - - cfg Config - lifecycler *ring.BasicLifecycler - ring *ring.Ring - store rulestore.RuleStore - manager MultiTenantManager - limits RulesLimits - - subservices *services.Manager - subservicesWatcher *services.FailureWatcher - - // Pool of clients used to connect to other ruler replicas. - clientsPool ClientsPool - - ringCheckErrors prometheus.Counter - rulerSync *prometheus.CounterVec - - allowedTenants *util.AllowedTenants - - registry prometheus.Registerer - logger log.Logger -} - -// NewRuler creates a new ruler from a distributor and chunk store. -func NewRuler(cfg Config, manager MultiTenantManager, reg prometheus.Registerer, logger log.Logger, ruleStore rulestore.RuleStore, limits RulesLimits) (*Ruler, error) { - return newRuler(cfg, manager, reg, logger, ruleStore, limits, newRulerClientPool(cfg.ClientTLSConfig, logger, reg)) -} - -func newRuler(cfg Config, manager MultiTenantManager, reg prometheus.Registerer, logger log.Logger, ruleStore rulestore.RuleStore, limits RulesLimits, clientPool ClientsPool) (*Ruler, error) { - ruler := &Ruler{ - cfg: cfg, - store: ruleStore, - manager: manager, - registry: reg, - logger: logger, - limits: limits, - clientsPool: clientPool, - allowedTenants: util.NewAllowedTenants(cfg.EnabledTenants, cfg.DisabledTenants), - - ringCheckErrors: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ruler_ring_check_errors_total", - Help: "Number of errors that have occurred when checking the ring for ownership", - }), - - rulerSync: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ruler_sync_rules_total", - Help: "Total number of times the ruler sync operation triggered.", - }, []string{"reason"}), - } - - if len(cfg.EnabledTenants) > 0 { - level.Info(ruler.logger).Log("msg", "ruler using enabled users", "enabled", strings.Join(cfg.EnabledTenants, ", ")) - } - if len(cfg.DisabledTenants) > 0 { - level.Info(ruler.logger).Log("msg", "ruler using disabled users", "disabled", strings.Join(cfg.DisabledTenants, ", ")) - } - - if cfg.EnableSharding { - ringStore, err := kv.NewClient( - cfg.Ring.KVStore, - ring.GetCodec(), - kv.RegistererWithKVName(prometheus.WrapRegistererWithPrefix("cortex_", reg), "ruler"), - logger, - ) - if err != nil { - return nil, errors.Wrap(err, "create KV store client") - } - - if err = enableSharding(ruler, ringStore); err != nil { - return nil, errors.Wrap(err, "setup ruler sharding ring") - } - } - - ruler.Service = services.NewBasicService(ruler.starting, ruler.run, ruler.stopping) - return ruler, nil -} - -func enableSharding(r *Ruler, ringStore kv.Client) error { - lifecyclerCfg, err := r.cfg.Ring.ToLifecyclerConfig(r.logger) - if err != nil { - return errors.Wrap(err, "failed to initialize ruler's lifecycler config") - } - - // Define lifecycler delegates in reverse order (last to be called defined first because they're - // chained via "next delegate"). - delegate := ring.BasicLifecyclerDelegate(r) - delegate = ring.NewLeaveOnStoppingDelegate(delegate, r.logger) - delegate = ring.NewAutoForgetDelegate(r.cfg.Ring.HeartbeatTimeout*ringAutoForgetUnhealthyPeriods, delegate, r.logger) - - rulerRingName := "ruler" - r.lifecycler, err = ring.NewBasicLifecycler(lifecyclerCfg, rulerRingName, ringKey, ringStore, delegate, r.logger, prometheus.WrapRegistererWithPrefix("cortex_", r.registry)) - if err != nil { - return errors.Wrap(err, "failed to initialize ruler's lifecycler") - } - - r.ring, err = ring.NewWithStoreClientAndStrategy(r.cfg.Ring.ToRingConfig(), rulerRingName, ringKey, ringStore, ring.NewIgnoreUnhealthyInstancesReplicationStrategy(), prometheus.WrapRegistererWithPrefix("cortex_", r.registry), r.logger) - if err != nil { - return errors.Wrap(err, "failed to initialize ruler's ring") - } - - return nil -} - -func (r *Ruler) starting(ctx context.Context) error { - // If sharding is enabled, start the used subservices. - if r.cfg.EnableSharding { - var err error - - if r.subservices, err = services.NewManager(r.lifecycler, r.ring, r.clientsPool); err != nil { - return errors.Wrap(err, "unable to start ruler subservices") - } - - r.subservicesWatcher = services.NewFailureWatcher() - r.subservicesWatcher.WatchManager(r.subservices) - - if err = services.StartManagerAndAwaitHealthy(ctx, r.subservices); err != nil { - return errors.Wrap(err, "unable to start ruler subservices") - } - } - - // TODO: ideally, ruler would wait until its queryable is finished starting. - return nil -} - -// Stop stops the Ruler. -// Each function of the ruler is terminated before leaving the ring -func (r *Ruler) stopping(_ error) error { - r.manager.Stop() - - if r.subservices != nil { - _ = services.StopManagerAndAwaitStopped(context.Background(), r.subservices) - } - return nil -} - -type sender interface { - Send(alerts ...*notifier.Alert) -} - -// SendAlerts implements a rules.NotifyFunc for a Notifier. -// It filters any non-firing alerts from the input. -// -// Copied from Prometheus's main.go. -func SendAlerts(n sender, externalURL string) promRules.NotifyFunc { - return func(ctx context.Context, expr string, alerts ...*promRules.Alert) { - var res []*notifier.Alert - - for _, alert := range alerts { - a := ¬ifier.Alert{ - StartsAt: alert.FiredAt, - Labels: alert.Labels, - Annotations: alert.Annotations, - GeneratorURL: externalURL + strutil.TableLinkForExpression(expr), - } - if !alert.ResolvedAt.IsZero() { - a.EndsAt = alert.ResolvedAt - } else { - a.EndsAt = alert.ValidUntil - } - res = append(res, a) - } - - if len(alerts) > 0 { - n.Send(res...) - } - } -} - -var sep = []byte("/") - -func tokenForGroup(g *rulespb.RuleGroupDesc) uint32 { - ringHasher := fnv.New32a() - - // Hasher never returns err. - _, _ = ringHasher.Write([]byte(g.User)) - _, _ = ringHasher.Write(sep) - _, _ = ringHasher.Write([]byte(g.Namespace)) - _, _ = ringHasher.Write(sep) - _, _ = ringHasher.Write([]byte(g.Name)) - - return ringHasher.Sum32() -} - -func instanceOwnsRuleGroup(r ring.ReadRing, g *rulespb.RuleGroupDesc, instanceAddr string) (bool, error) { - hash := tokenForGroup(g) - - rlrs, err := r.Get(hash, RingOp, nil, nil, nil) - if err != nil { - return false, errors.Wrap(err, "error reading ring to verify rule group ownership") - } - - return rlrs.Instances[0].Addr == instanceAddr, nil -} - -func (r *Ruler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if r.cfg.EnableSharding { - r.ring.ServeHTTP(w, req) - } else { - var unshardedPage = ` - - - - - Cortex Ruler Status - - -

Cortex Ruler Status

-

Ruler running with shards disabled

- - ` - util.WriteHTMLResponse(w, unshardedPage) - } -} - -func (r *Ruler) run(ctx context.Context) error { - level.Info(r.logger).Log("msg", "ruler up and running") - - tick := time.NewTicker(r.cfg.PollInterval) - defer tick.Stop() - - var ringTickerChan <-chan time.Time - var ringLastState ring.ReplicationSet - - if r.cfg.EnableSharding { - ringLastState, _ = r.ring.GetAllHealthy(RingOp) - ringTicker := time.NewTicker(util.DurationWithJitter(r.cfg.RingCheckPeriod, 0.2)) - defer ringTicker.Stop() - ringTickerChan = ringTicker.C - } - - r.syncRules(ctx, rulerSyncReasonInitial) - for { - select { - case <-ctx.Done(): - return nil - case <-tick.C: - r.syncRules(ctx, rulerSyncReasonPeriodic) - case <-ringTickerChan: - // We ignore the error because in case of error it will return an empty - // replication set which we use to compare with the previous state. - currRingState, _ := r.ring.GetAllHealthy(RingOp) - - if ring.HasReplicationSetChanged(ringLastState, currRingState) { - ringLastState = currRingState - r.syncRules(ctx, rulerSyncReasonRingChange) - } - case err := <-r.subservicesWatcher.Chan(): - return errors.Wrap(err, "ruler subservice failed") - } - } -} - -func (r *Ruler) syncRules(ctx context.Context, reason string) { - level.Debug(r.logger).Log("msg", "syncing rules", "reason", reason) - r.rulerSync.WithLabelValues(reason).Inc() - - configs, err := r.listRules(ctx) - if err != nil { - level.Error(r.logger).Log("msg", "unable to list rules", "err", err) - return - } - - err = r.store.LoadRuleGroups(ctx, configs) - if err != nil { - level.Error(r.logger).Log("msg", "unable to load rules owned by this ruler", "err", err) - return - } - - // This will also delete local group files for users that are no longer in 'configs' map. - r.manager.SyncRuleGroups(ctx, configs) -} - -func (r *Ruler) listRules(ctx context.Context) (result map[string]rulespb.RuleGroupList, err error) { - switch { - case !r.cfg.EnableSharding: - result, err = r.listRulesNoSharding(ctx) - - case r.cfg.ShardingStrategy == util.ShardingStrategyDefault: - result, err = r.listRulesShardingDefault(ctx) - - case r.cfg.ShardingStrategy == util.ShardingStrategyShuffle: - result, err = r.listRulesShuffleSharding(ctx) - - default: - return nil, errors.New("invalid sharding configuration") - } - - if err != nil { - return - } - - for userID := range result { - if !r.allowedTenants.IsAllowed(userID) { - level.Debug(r.logger).Log("msg", "ignoring rule groups for user, not allowed", "user", userID) - delete(result, userID) - } - } - return -} - -func (r *Ruler) listRulesNoSharding(ctx context.Context) (map[string]rulespb.RuleGroupList, error) { - return r.store.ListAllRuleGroups(ctx) -} - -func (r *Ruler) listRulesShardingDefault(ctx context.Context) (map[string]rulespb.RuleGroupList, error) { - configs, err := r.store.ListAllRuleGroups(ctx) - if err != nil { - return nil, err - } - - filteredConfigs := make(map[string]rulespb.RuleGroupList) - for userID, groups := range configs { - filtered := filterRuleGroups(userID, groups, r.ring, r.lifecycler.GetInstanceAddr(), r.logger, r.ringCheckErrors) - if len(filtered) > 0 { - filteredConfigs[userID] = filtered - } - } - return filteredConfigs, nil -} - -func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulespb.RuleGroupList, error) { - users, err := r.store.ListAllUsers(ctx) - if err != nil { - return nil, errors.Wrap(err, "unable to list users of ruler") - } - - // Only users in userRings will be used in the to load the rules. - userRings := map[string]ring.ReadRing{} - for _, u := range users { - if shardSize := r.limits.RulerTenantShardSize(u); shardSize > 0 { - subRing := r.ring.ShuffleShard(u, shardSize) - - // Include the user only if it belongs to this ruler shard. - if subRing.HasInstance(r.lifecycler.GetInstanceID()) { - userRings[u] = subRing - } - } else { - // A shard size of 0 means shuffle sharding is disabled for this specific user. - // In that case we use the full ring so that rule groups will be sharded across all rulers. - userRings[u] = r.ring - } - } - - if len(userRings) == 0 { - return nil, nil - } - - userCh := make(chan string, len(userRings)) - for u := range userRings { - userCh <- u - } - close(userCh) - - mu := sync.Mutex{} - result := map[string]rulespb.RuleGroupList{} - - concurrency := loadRulesConcurrency - if len(userRings) < concurrency { - concurrency = len(userRings) - } - - g, gctx := errgroup.WithContext(ctx) - for i := 0; i < concurrency; i++ { - g.Go(func() error { - for userID := range userCh { - groups, err := r.store.ListRuleGroupsForUserAndNamespace(gctx, userID, "") - if err != nil { - return errors.Wrapf(err, "failed to fetch rule groups for user %s", userID) - } - - filtered := filterRuleGroups(userID, groups, userRings[userID], r.lifecycler.GetInstanceAddr(), r.logger, r.ringCheckErrors) - if len(filtered) == 0 { - continue - } - - mu.Lock() - result[userID] = filtered - mu.Unlock() - } - return nil - }) - } - - err = g.Wait() - return result, err -} - -// filterRuleGroups returns map of rule groups that given instance "owns" based on supplied ring. -// This function only uses User, Namespace, and Name fields of individual RuleGroups. -// -// Reason why this function is not a method on Ruler is to make sure we don't accidentally use r.ring, -// but only ring passed as parameter. -func filterRuleGroups(userID string, ruleGroups []*rulespb.RuleGroupDesc, ring ring.ReadRing, instanceAddr string, log log.Logger, ringCheckErrors prometheus.Counter) []*rulespb.RuleGroupDesc { - // Prune the rule group to only contain rules that this ruler is responsible for, based on ring. - var result []*rulespb.RuleGroupDesc - for _, g := range ruleGroups { - owned, err := instanceOwnsRuleGroup(ring, g, instanceAddr) - if err != nil { - ringCheckErrors.Inc() - level.Error(log).Log("msg", "failed to check if the ruler replica owns the rule group", "user", userID, "namespace", g.Namespace, "group", g.Name, "err", err) - continue - } - - if owned { - level.Debug(log).Log("msg", "rule group owned", "user", g.User, "namespace", g.Namespace, "name", g.Name) - result = append(result, g) - } else { - level.Debug(log).Log("msg", "rule group not owned, ignoring", "user", g.User, "namespace", g.Namespace, "name", g.Name) - } - } - - return result -} - -// GetRules retrieves the running rules from this ruler and all running rulers in the ring if -// sharding is enabled -func (r *Ruler) GetRules(ctx context.Context) ([]*GroupStateDesc, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, fmt.Errorf("no user id found in context") - } - - if r.cfg.EnableSharding { - return r.getShardedRules(ctx, userID) - } - - return r.getLocalRules(userID) -} - -func (r *Ruler) getLocalRules(userID string) ([]*GroupStateDesc, error) { - groups := r.manager.GetRules(userID) - - groupDescs := make([]*GroupStateDesc, 0, len(groups)) - prefix := filepath.Join(r.cfg.RulePath, userID) + "/" - - for _, group := range groups { - interval := group.Interval() - - // The mapped filename is url path escaped encoded to make handling `/` characters easier - decodedNamespace, err := url.PathUnescape(strings.TrimPrefix(group.File(), prefix)) - if err != nil { - return nil, errors.Wrap(err, "unable to decode rule filename") - } - - groupDesc := &GroupStateDesc{ - Group: &rulespb.RuleGroupDesc{ - Name: group.Name(), - Namespace: string(decodedNamespace), - Interval: interval, - User: userID, - }, - - EvaluationTimestamp: group.GetLastEvaluation(), - EvaluationDuration: group.GetEvaluationTime(), - } - for _, r := range group.Rules() { - lastError := "" - if r.LastError() != nil { - lastError = r.LastError().Error() - } - - var ruleDesc *RuleStateDesc - switch rule := r.(type) { - case *promRules.AlertingRule: - rule.ActiveAlerts() - alerts := []*AlertStateDesc{} - for _, a := range rule.ActiveAlerts() { - alerts = append(alerts, &AlertStateDesc{ - State: a.State.String(), - Labels: cortexpb.FromLabelsToLabelAdapters(a.Labels), - Annotations: cortexpb.FromLabelsToLabelAdapters(a.Annotations), - Value: a.Value, - ActiveAt: a.ActiveAt, - FiredAt: a.FiredAt, - ResolvedAt: a.ResolvedAt, - LastSentAt: a.LastSentAt, - ValidUntil: a.ValidUntil, - }) - } - ruleDesc = &RuleStateDesc{ - Rule: &rulespb.RuleDesc{ - Expr: rule.Query().String(), - Alert: rule.Name(), - For: rule.HoldDuration(), - Labels: cortexpb.FromLabelsToLabelAdapters(rule.Labels()), - Annotations: cortexpb.FromLabelsToLabelAdapters(rule.Annotations()), - }, - State: rule.State().String(), - Health: string(rule.Health()), - LastError: lastError, - Alerts: alerts, - EvaluationTimestamp: rule.GetEvaluationTimestamp(), - EvaluationDuration: rule.GetEvaluationDuration(), - } - case *promRules.RecordingRule: - ruleDesc = &RuleStateDesc{ - Rule: &rulespb.RuleDesc{ - Record: rule.Name(), - Expr: rule.Query().String(), - Labels: cortexpb.FromLabelsToLabelAdapters(rule.Labels()), - }, - Health: string(rule.Health()), - LastError: lastError, - EvaluationTimestamp: rule.GetEvaluationTimestamp(), - EvaluationDuration: rule.GetEvaluationDuration(), - } - default: - return nil, errors.Errorf("failed to assert type of rule '%v'", rule.Name()) - } - groupDesc.ActiveRules = append(groupDesc.ActiveRules, ruleDesc) - } - groupDescs = append(groupDescs, groupDesc) - } - return groupDescs, nil -} - -func (r *Ruler) getShardedRules(ctx context.Context, userID string) ([]*GroupStateDesc, error) { - ring := ring.ReadRing(r.ring) - - if shardSize := r.limits.RulerTenantShardSize(userID); shardSize > 0 && r.cfg.ShardingStrategy == util.ShardingStrategyShuffle { - ring = r.ring.ShuffleShard(userID, shardSize) - } - - rulers, err := ring.GetReplicationSetForOperation(RingOp) - if err != nil { - return nil, err - } - - ctx, err = user.InjectIntoGRPCRequest(ctx) - if err != nil { - return nil, fmt.Errorf("unable to inject user ID into grpc request, %v", err) - } - - var ( - mergedMx sync.Mutex - merged []*GroupStateDesc - ) - - // Concurrently fetch rules from all rulers. Since rules are not replicated, - // we need all requests to succeed. - jobs := concurrency.CreateJobsFromStrings(rulers.GetAddresses()) - err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { - addr := job.(string) - - rulerClient, err := r.clientsPool.GetClientFor(addr) - if err != nil { - return errors.Wrapf(err, "unable to get client for ruler %s", addr) - } - - newGrps, err := rulerClient.Rules(ctx, &RulesRequest{}) - if err != nil { - return errors.Wrapf(err, "unable to retrieve rules from ruler %s", addr) - } - - mergedMx.Lock() - merged = append(merged, newGrps.Groups...) - mergedMx.Unlock() - - return nil - }) - - return merged, err -} - -// Rules implements the rules service -func (r *Ruler) Rules(ctx context.Context, in *RulesRequest) (*RulesResponse, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, fmt.Errorf("no user id found in context") - } - - groupDescs, err := r.getLocalRules(userID) - if err != nil { - return nil, err - } - - return &RulesResponse{Groups: groupDescs}, nil -} - -// AssertMaxRuleGroups limit has not been reached compared to the current -// number of total rule groups in input and returns an error if so. -func (r *Ruler) AssertMaxRuleGroups(userID string, rg int) error { - limit := r.limits.RulerMaxRuleGroupsPerTenant(userID) - - if limit <= 0 { - return nil - } - - if rg <= limit { - return nil - } - - return fmt.Errorf(errMaxRuleGroupsPerUserLimitExceeded, limit, rg) -} - -// AssertMaxRulesPerRuleGroup limit has not been reached compared to the current -// number of rules in a rule group in input and returns an error if so. -func (r *Ruler) AssertMaxRulesPerRuleGroup(userID string, rules int) error { - limit := r.limits.RulerMaxRulesPerRuleGroup(userID) - - if limit <= 0 { - return nil - } - - if rules <= limit { - return nil - } - return fmt.Errorf(errMaxRulesPerRuleGroupPerUserLimitExceeded, limit, rules) -} - -func (r *Ruler) DeleteTenantConfiguration(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), r.logger) - - userID, err := tenant.TenantID(req.Context()) - if err != nil { - // When Cortex is running, it uses Auth Middleware for checking X-Scope-OrgID and injecting tenant into context. - // Auth Middleware sends http.StatusUnauthorized if X-Scope-OrgID is missing, so we do too here, for consistency. - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - err = r.store.DeleteNamespace(req.Context(), userID, "") // Empty namespace = delete all rule groups. - if err != nil && !errors.Is(err, rulestore.ErrGroupNamespaceNotFound) { - respondError(logger, w, err.Error()) - return - } - - level.Info(logger).Log("msg", "deleted all tenant rule groups", "user", userID) - w.WriteHeader(http.StatusOK) -} - -func (r *Ruler) ListAllRules(w http.ResponseWriter, req *http.Request) { - logger := util_log.WithContext(req.Context(), r.logger) - - userIDs, err := r.store.ListAllUsers(req.Context()) - if err != nil { - level.Error(logger).Log("msg", errListAllUser, "err", err) - http.Error(w, fmt.Sprintf("%s: %s", errListAllUser, err.Error()), http.StatusInternalServerError) - return - } - - done := make(chan struct{}) - iter := make(chan interface{}) - - go func() { - util.StreamWriteYAMLResponse(w, iter, logger) - close(done) - }() - - err = concurrency.ForEachUser(req.Context(), userIDs, fetchRulesConcurrency, func(ctx context.Context, userID string) error { - rg, err := r.store.ListRuleGroupsForUserAndNamespace(ctx, userID, "") - if err != nil { - return errors.Wrapf(err, "failed to fetch ruler config for user %s", userID) - } - userRules := map[string]rulespb.RuleGroupList{userID: rg} - if err := r.store.LoadRuleGroups(ctx, userRules); err != nil { - return errors.Wrapf(err, "failed to load ruler config for user %s", userID) - } - data := map[string]map[string][]rulefmt.RuleGroup{userID: userRules[userID].Formatted()} - - select { - case iter <- data: - case <-done: // stop early, if sending response has already finished - } - - return nil - }) - if err != nil { - level.Error(logger).Log("msg", "failed to list all ruler configs", "err", err) - } - close(iter) - <-done -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.pb.go deleted file mode 100644 index 1d3501eb2..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.pb.go +++ /dev/null @@ -1,2380 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ruler.proto - -package ruler - -import ( - context "context" - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/cortexproject/cortex/pkg/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - rulespb "github.com/cortexproject/cortex/pkg/ruler/rulespb" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type RulesRequest struct { -} - -func (m *RulesRequest) Reset() { *m = RulesRequest{} } -func (*RulesRequest) ProtoMessage() {} -func (*RulesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{0} -} -func (m *RulesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RulesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RulesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RulesRequest.Merge(m, src) -} -func (m *RulesRequest) XXX_Size() int { - return m.Size() -} -func (m *RulesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RulesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RulesRequest proto.InternalMessageInfo - -type RulesResponse struct { - Groups []*GroupStateDesc `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` -} - -func (m *RulesResponse) Reset() { *m = RulesResponse{} } -func (*RulesResponse) ProtoMessage() {} -func (*RulesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{1} -} -func (m *RulesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RulesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RulesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RulesResponse.Merge(m, src) -} -func (m *RulesResponse) XXX_Size() int { - return m.Size() -} -func (m *RulesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RulesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RulesResponse proto.InternalMessageInfo - -func (m *RulesResponse) GetGroups() []*GroupStateDesc { - if m != nil { - return m.Groups - } - return nil -} - -// GroupStateDesc is a proto representation of a cortex rule group -type GroupStateDesc struct { - Group *rulespb.RuleGroupDesc `protobuf:"bytes,1,opt,name=group,proto3" json:"group,omitempty"` - ActiveRules []*RuleStateDesc `protobuf:"bytes,2,rep,name=active_rules,json=activeRules,proto3" json:"active_rules,omitempty"` - EvaluationTimestamp time.Time `protobuf:"bytes,3,opt,name=evaluationTimestamp,proto3,stdtime" json:"evaluationTimestamp"` - EvaluationDuration time.Duration `protobuf:"bytes,4,opt,name=evaluationDuration,proto3,stdduration" json:"evaluationDuration"` -} - -func (m *GroupStateDesc) Reset() { *m = GroupStateDesc{} } -func (*GroupStateDesc) ProtoMessage() {} -func (*GroupStateDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{2} -} -func (m *GroupStateDesc) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupStateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GroupStateDesc.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GroupStateDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupStateDesc.Merge(m, src) -} -func (m *GroupStateDesc) XXX_Size() int { - return m.Size() -} -func (m *GroupStateDesc) XXX_DiscardUnknown() { - xxx_messageInfo_GroupStateDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupStateDesc proto.InternalMessageInfo - -func (m *GroupStateDesc) GetGroup() *rulespb.RuleGroupDesc { - if m != nil { - return m.Group - } - return nil -} - -func (m *GroupStateDesc) GetActiveRules() []*RuleStateDesc { - if m != nil { - return m.ActiveRules - } - return nil -} - -func (m *GroupStateDesc) GetEvaluationTimestamp() time.Time { - if m != nil { - return m.EvaluationTimestamp - } - return time.Time{} -} - -func (m *GroupStateDesc) GetEvaluationDuration() time.Duration { - if m != nil { - return m.EvaluationDuration - } - return 0 -} - -// RuleStateDesc is a proto representation of a Prometheus Rule -type RuleStateDesc struct { - Rule *rulespb.RuleDesc `protobuf:"bytes,1,opt,name=rule,proto3" json:"rule,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - Health string `protobuf:"bytes,3,opt,name=health,proto3" json:"health,omitempty"` - LastError string `protobuf:"bytes,4,opt,name=lastError,proto3" json:"lastError,omitempty"` - Alerts []*AlertStateDesc `protobuf:"bytes,5,rep,name=alerts,proto3" json:"alerts,omitempty"` - EvaluationTimestamp time.Time `protobuf:"bytes,6,opt,name=evaluationTimestamp,proto3,stdtime" json:"evaluationTimestamp"` - EvaluationDuration time.Duration `protobuf:"bytes,7,opt,name=evaluationDuration,proto3,stdduration" json:"evaluationDuration"` -} - -func (m *RuleStateDesc) Reset() { *m = RuleStateDesc{} } -func (*RuleStateDesc) ProtoMessage() {} -func (*RuleStateDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{3} -} -func (m *RuleStateDesc) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuleStateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RuleStateDesc.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RuleStateDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuleStateDesc.Merge(m, src) -} -func (m *RuleStateDesc) XXX_Size() int { - return m.Size() -} -func (m *RuleStateDesc) XXX_DiscardUnknown() { - xxx_messageInfo_RuleStateDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_RuleStateDesc proto.InternalMessageInfo - -func (m *RuleStateDesc) GetRule() *rulespb.RuleDesc { - if m != nil { - return m.Rule - } - return nil -} - -func (m *RuleStateDesc) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *RuleStateDesc) GetHealth() string { - if m != nil { - return m.Health - } - return "" -} - -func (m *RuleStateDesc) GetLastError() string { - if m != nil { - return m.LastError - } - return "" -} - -func (m *RuleStateDesc) GetAlerts() []*AlertStateDesc { - if m != nil { - return m.Alerts - } - return nil -} - -func (m *RuleStateDesc) GetEvaluationTimestamp() time.Time { - if m != nil { - return m.EvaluationTimestamp - } - return time.Time{} -} - -func (m *RuleStateDesc) GetEvaluationDuration() time.Duration { - if m != nil { - return m.EvaluationDuration - } - return 0 -} - -type AlertStateDesc struct { - State string `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,2,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"labels"` - Annotations []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,3,rep,name=annotations,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"annotations"` - Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` - ActiveAt time.Time `protobuf:"bytes,5,opt,name=active_at,json=activeAt,proto3,stdtime" json:"active_at"` - FiredAt time.Time `protobuf:"bytes,6,opt,name=fired_at,json=firedAt,proto3,stdtime" json:"fired_at"` - ResolvedAt time.Time `protobuf:"bytes,7,opt,name=resolved_at,json=resolvedAt,proto3,stdtime" json:"resolved_at"` - LastSentAt time.Time `protobuf:"bytes,8,opt,name=last_sent_at,json=lastSentAt,proto3,stdtime" json:"last_sent_at"` - ValidUntil time.Time `protobuf:"bytes,9,opt,name=valid_until,json=validUntil,proto3,stdtime" json:"valid_until"` -} - -func (m *AlertStateDesc) Reset() { *m = AlertStateDesc{} } -func (*AlertStateDesc) ProtoMessage() {} -func (*AlertStateDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_9ecbec0a4cfddea6, []int{4} -} -func (m *AlertStateDesc) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlertStateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlertStateDesc.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlertStateDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlertStateDesc.Merge(m, src) -} -func (m *AlertStateDesc) XXX_Size() int { - return m.Size() -} -func (m *AlertStateDesc) XXX_DiscardUnknown() { - xxx_messageInfo_AlertStateDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_AlertStateDesc proto.InternalMessageInfo - -func (m *AlertStateDesc) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *AlertStateDesc) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *AlertStateDesc) GetActiveAt() time.Time { - if m != nil { - return m.ActiveAt - } - return time.Time{} -} - -func (m *AlertStateDesc) GetFiredAt() time.Time { - if m != nil { - return m.FiredAt - } - return time.Time{} -} - -func (m *AlertStateDesc) GetResolvedAt() time.Time { - if m != nil { - return m.ResolvedAt - } - return time.Time{} -} - -func (m *AlertStateDesc) GetLastSentAt() time.Time { - if m != nil { - return m.LastSentAt - } - return time.Time{} -} - -func (m *AlertStateDesc) GetValidUntil() time.Time { - if m != nil { - return m.ValidUntil - } - return time.Time{} -} - -func init() { - proto.RegisterType((*RulesRequest)(nil), "ruler.RulesRequest") - proto.RegisterType((*RulesResponse)(nil), "ruler.RulesResponse") - proto.RegisterType((*GroupStateDesc)(nil), "ruler.GroupStateDesc") - proto.RegisterType((*RuleStateDesc)(nil), "ruler.RuleStateDesc") - proto.RegisterType((*AlertStateDesc)(nil), "ruler.AlertStateDesc") -} - -func init() { proto.RegisterFile("ruler.proto", fileDescriptor_9ecbec0a4cfddea6) } - -var fileDescriptor_9ecbec0a4cfddea6 = []byte{ - // 677 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcf, 0x4f, 0x13, 0x41, - 0x14, 0xde, 0x29, 0x6c, 0x69, 0xa7, 0x88, 0xc9, 0x50, 0xcd, 0xda, 0x98, 0x29, 0xa9, 0x17, 0x62, - 0xc2, 0x92, 0x20, 0x89, 0xf1, 0x80, 0x66, 0x09, 0xe8, 0xc5, 0x83, 0x59, 0xd4, 0x2b, 0x99, 0xb6, - 0xc3, 0xb2, 0xba, 0xec, 0xac, 0x33, 0xb3, 0x0d, 0x47, 0xfe, 0x04, 0x8e, 0x9e, 0x3d, 0xf9, 0xa7, - 0x70, 0xe4, 0x48, 0x8c, 0x41, 0x59, 0x2e, 0x1e, 0xf9, 0x13, 0xcc, 0xfc, 0x58, 0xdb, 0x2a, 0x26, - 0x6e, 0x0c, 0x97, 0x76, 0xdf, 0xbc, 0xf7, 0x7d, 0xdf, 0xbc, 0xef, 0xcd, 0x0c, 0x6c, 0xf1, 0x3c, - 0xa1, 0xdc, 0xcf, 0x38, 0x93, 0x0c, 0xb9, 0x3a, 0xe8, 0xac, 0x44, 0xb1, 0xdc, 0xcf, 0xfb, 0xfe, - 0x80, 0x1d, 0xac, 0x46, 0x2c, 0x62, 0xab, 0x3a, 0xdb, 0xcf, 0xf7, 0x74, 0xa4, 0x03, 0xfd, 0x65, - 0x50, 0x1d, 0x1c, 0x31, 0x16, 0x25, 0x74, 0x5c, 0x35, 0xcc, 0x39, 0x91, 0x31, 0x4b, 0x6d, 0xbe, - 0xfb, 0x7b, 0x5e, 0xc6, 0x07, 0x54, 0x48, 0x72, 0x90, 0xd9, 0x82, 0x27, 0x13, 0x7a, 0x03, 0xc6, - 0x25, 0x3d, 0xcc, 0x38, 0x7b, 0x47, 0x07, 0xd2, 0x46, 0xab, 0xd9, 0xfb, 0xa8, 0x4c, 0xf4, 0xed, - 0x87, 0x85, 0x6e, 0xfc, 0x0b, 0x54, 0x77, 0xa5, 0x7f, 0x45, 0xd6, 0x37, 0xff, 0x06, 0xde, 0x5b, - 0x80, 0xf3, 0xa1, 0x0a, 0x43, 0xfa, 0x21, 0xa7, 0x42, 0xf6, 0x9e, 0xc2, 0x5b, 0x36, 0x16, 0x19, - 0x4b, 0x05, 0x45, 0x2b, 0xb0, 0x1e, 0x71, 0x96, 0x67, 0xc2, 0x03, 0x4b, 0x33, 0xcb, 0xad, 0xb5, - 0x3b, 0xbe, 0xf1, 0xeb, 0x85, 0x5a, 0xdc, 0x91, 0x44, 0xd2, 0x2d, 0x2a, 0x06, 0xa1, 0x2d, 0xea, - 0x7d, 0xaa, 0xc1, 0x85, 0xe9, 0x14, 0x7a, 0x08, 0x5d, 0x9d, 0xf4, 0xc0, 0x12, 0x58, 0x6e, 0xad, - 0xb5, 0x7d, 0xa3, 0xaf, 0x64, 0x74, 0xa5, 0xc6, 0x9b, 0x12, 0xf4, 0x18, 0xce, 0x93, 0x81, 0x8c, - 0x47, 0x74, 0x57, 0x17, 0x79, 0x35, 0xad, 0xd9, 0xb6, 0x9a, 0x0a, 0x32, 0x96, 0x6c, 0x99, 0x4a, - 0xbd, 0x5d, 0xf4, 0x16, 0x2e, 0xd2, 0x11, 0x49, 0x72, 0x6d, 0xfb, 0xeb, 0xd2, 0x5e, 0x6f, 0x46, - 0x4b, 0x76, 0x7c, 0x33, 0x00, 0xbf, 0x1c, 0x80, 0xff, 0xab, 0x62, 0xb3, 0x71, 0x72, 0xde, 0x75, - 0x8e, 0xbf, 0x75, 0x41, 0x78, 0x1d, 0x01, 0xda, 0x81, 0x68, 0xbc, 0xbc, 0x65, 0xc7, 0xea, 0xcd, - 0x6a, 0xda, 0x7b, 0x7f, 0xd0, 0x96, 0x05, 0x86, 0xf5, 0xa3, 0x62, 0xbd, 0x06, 0xde, 0xfb, 0x5a, - 0x33, 0x2e, 0x8f, 0x3d, 0x7a, 0x00, 0x67, 0x55, 0x8b, 0xd6, 0xa2, 0xdb, 0x13, 0x16, 0xe9, 0x56, - 0x75, 0x12, 0xb5, 0xa1, 0x2b, 0x14, 0xc2, 0xab, 0x2d, 0x81, 0xe5, 0x66, 0x68, 0x02, 0x74, 0x17, - 0xd6, 0xf7, 0x29, 0x49, 0xe4, 0xbe, 0x6e, 0xb6, 0x19, 0xda, 0x08, 0xdd, 0x87, 0xcd, 0x84, 0x08, - 0xb9, 0xcd, 0x39, 0xe3, 0x7a, 0xc3, 0xcd, 0x70, 0xbc, 0xa0, 0xc6, 0x4a, 0x12, 0xca, 0xa5, 0xf0, - 0xdc, 0xa9, 0xb1, 0x06, 0x6a, 0x71, 0x62, 0xac, 0xa6, 0xe8, 0x6f, 0xf6, 0xd6, 0x6f, 0xc6, 0xde, - 0xb9, 0xff, 0xb3, 0xf7, 0xc8, 0x85, 0x0b, 0xd3, 0x7d, 0x8c, 0xad, 0x03, 0x93, 0xd6, 0xa5, 0xb0, - 0x9e, 0x90, 0x3e, 0x4d, 0xca, 0x73, 0xb6, 0xe8, 0x97, 0x77, 0xcc, 0x7f, 0xa9, 0xd6, 0x5f, 0x91, - 0x98, 0x6f, 0x06, 0x4a, 0xeb, 0xcb, 0x79, 0xb7, 0xd2, 0x1d, 0x35, 0xf8, 0x60, 0x48, 0x32, 0x49, - 0x79, 0x68, 0x55, 0xd0, 0x21, 0x6c, 0x91, 0x34, 0x65, 0x52, 0x6f, 0x53, 0x78, 0x33, 0x37, 0x2a, - 0x3a, 0x29, 0xa5, 0xfa, 0x57, 0x3e, 0x51, 0x7d, 0x10, 0x40, 0x68, 0x02, 0x14, 0xc0, 0xa6, 0xbd, - 0x6d, 0x44, 0x7a, 0x6e, 0x85, 0x59, 0x36, 0x0c, 0x2c, 0x90, 0xe8, 0x19, 0x6c, 0xec, 0xc5, 0x9c, - 0x0e, 0x15, 0x43, 0x95, 0xd3, 0x30, 0xa7, 0x51, 0x81, 0x44, 0xdb, 0xb0, 0xc5, 0xa9, 0x60, 0xc9, - 0xc8, 0x70, 0xcc, 0x55, 0xe0, 0x80, 0x25, 0x30, 0x90, 0xe8, 0x39, 0x9c, 0x57, 0x87, 0x7b, 0x57, - 0xd0, 0x54, 0x2a, 0x9e, 0x46, 0x15, 0x1e, 0x85, 0xdc, 0xa1, 0xa9, 0x34, 0xdb, 0x19, 0x91, 0x24, - 0x1e, 0xee, 0xe6, 0xa9, 0x8c, 0x13, 0xaf, 0x59, 0x85, 0x46, 0x03, 0xdf, 0x28, 0xdc, 0xda, 0x06, - 0x74, 0xd5, 0xe5, 0xe5, 0x68, 0xdd, 0x7c, 0x08, 0xb4, 0x38, 0xf1, 0x86, 0x95, 0xaf, 0x6d, 0xa7, - 0x3d, 0xbd, 0x68, 0x9e, 0xdc, 0x9e, 0xb3, 0xb9, 0x7e, 0x7a, 0x81, 0x9d, 0xb3, 0x0b, 0xec, 0x5c, - 0x5d, 0x60, 0x70, 0x54, 0x60, 0xf0, 0xb9, 0xc0, 0xe0, 0xa4, 0xc0, 0xe0, 0xb4, 0xc0, 0xe0, 0x7b, - 0x81, 0xc1, 0x8f, 0x02, 0x3b, 0x57, 0x05, 0x06, 0xc7, 0x97, 0xd8, 0x39, 0xbd, 0xc4, 0xce, 0xd9, - 0x25, 0x76, 0xfa, 0x75, 0xbd, 0xbd, 0x47, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x29, 0x57, 0x9d, - 0xdd, 0xd2, 0x06, 0x00, 0x00, -} - -func (this *RulesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RulesRequest) - if !ok { - that2, ok := that.(RulesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *RulesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RulesResponse) - if !ok { - that2, ok := that.(RulesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Groups) != len(that1.Groups) { - return false - } - for i := range this.Groups { - if !this.Groups[i].Equal(that1.Groups[i]) { - return false - } - } - return true -} -func (this *GroupStateDesc) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GroupStateDesc) - if !ok { - that2, ok := that.(GroupStateDesc) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Group.Equal(that1.Group) { - return false - } - if len(this.ActiveRules) != len(that1.ActiveRules) { - return false - } - for i := range this.ActiveRules { - if !this.ActiveRules[i].Equal(that1.ActiveRules[i]) { - return false - } - } - if !this.EvaluationTimestamp.Equal(that1.EvaluationTimestamp) { - return false - } - if this.EvaluationDuration != that1.EvaluationDuration { - return false - } - return true -} -func (this *RuleStateDesc) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RuleStateDesc) - if !ok { - that2, ok := that.(RuleStateDesc) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Rule.Equal(that1.Rule) { - return false - } - if this.State != that1.State { - return false - } - if this.Health != that1.Health { - return false - } - if this.LastError != that1.LastError { - return false - } - if len(this.Alerts) != len(that1.Alerts) { - return false - } - for i := range this.Alerts { - if !this.Alerts[i].Equal(that1.Alerts[i]) { - return false - } - } - if !this.EvaluationTimestamp.Equal(that1.EvaluationTimestamp) { - return false - } - if this.EvaluationDuration != that1.EvaluationDuration { - return false - } - return true -} -func (this *AlertStateDesc) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*AlertStateDesc) - if !ok { - that2, ok := that.(AlertStateDesc) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.State != that1.State { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Annotations) != len(that1.Annotations) { - return false - } - for i := range this.Annotations { - if !this.Annotations[i].Equal(that1.Annotations[i]) { - return false - } - } - if this.Value != that1.Value { - return false - } - if !this.ActiveAt.Equal(that1.ActiveAt) { - return false - } - if !this.FiredAt.Equal(that1.FiredAt) { - return false - } - if !this.ResolvedAt.Equal(that1.ResolvedAt) { - return false - } - if !this.LastSentAt.Equal(that1.LastSentAt) { - return false - } - if !this.ValidUntil.Equal(that1.ValidUntil) { - return false - } - return true -} -func (this *RulesRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&ruler.RulesRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RulesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&ruler.RulesResponse{") - if this.Groups != nil { - s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GroupStateDesc) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&ruler.GroupStateDesc{") - if this.Group != nil { - s = append(s, "Group: "+fmt.Sprintf("%#v", this.Group)+",\n") - } - if this.ActiveRules != nil { - s = append(s, "ActiveRules: "+fmt.Sprintf("%#v", this.ActiveRules)+",\n") - } - s = append(s, "EvaluationTimestamp: "+fmt.Sprintf("%#v", this.EvaluationTimestamp)+",\n") - s = append(s, "EvaluationDuration: "+fmt.Sprintf("%#v", this.EvaluationDuration)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RuleStateDesc) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&ruler.RuleStateDesc{") - if this.Rule != nil { - s = append(s, "Rule: "+fmt.Sprintf("%#v", this.Rule)+",\n") - } - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - s = append(s, "Health: "+fmt.Sprintf("%#v", this.Health)+",\n") - s = append(s, "LastError: "+fmt.Sprintf("%#v", this.LastError)+",\n") - if this.Alerts != nil { - s = append(s, "Alerts: "+fmt.Sprintf("%#v", this.Alerts)+",\n") - } - s = append(s, "EvaluationTimestamp: "+fmt.Sprintf("%#v", this.EvaluationTimestamp)+",\n") - s = append(s, "EvaluationDuration: "+fmt.Sprintf("%#v", this.EvaluationDuration)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *AlertStateDesc) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 13) - s = append(s, "&ruler.AlertStateDesc{") - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "ActiveAt: "+fmt.Sprintf("%#v", this.ActiveAt)+",\n") - s = append(s, "FiredAt: "+fmt.Sprintf("%#v", this.FiredAt)+",\n") - s = append(s, "ResolvedAt: "+fmt.Sprintf("%#v", this.ResolvedAt)+",\n") - s = append(s, "LastSentAt: "+fmt.Sprintf("%#v", this.LastSentAt)+",\n") - s = append(s, "ValidUntil: "+fmt.Sprintf("%#v", this.ValidUntil)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringRuler(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// RulerClient is the client API for Ruler service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type RulerClient interface { - Rules(ctx context.Context, in *RulesRequest, opts ...grpc.CallOption) (*RulesResponse, error) -} - -type rulerClient struct { - cc *grpc.ClientConn -} - -func NewRulerClient(cc *grpc.ClientConn) RulerClient { - return &rulerClient{cc} -} - -func (c *rulerClient) Rules(ctx context.Context, in *RulesRequest, opts ...grpc.CallOption) (*RulesResponse, error) { - out := new(RulesResponse) - err := c.cc.Invoke(ctx, "/ruler.Ruler/Rules", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// RulerServer is the server API for Ruler service. -type RulerServer interface { - Rules(context.Context, *RulesRequest) (*RulesResponse, error) -} - -// UnimplementedRulerServer can be embedded to have forward compatible implementations. -type UnimplementedRulerServer struct { -} - -func (*UnimplementedRulerServer) Rules(ctx context.Context, req *RulesRequest) (*RulesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Rules not implemented") -} - -func RegisterRulerServer(s *grpc.Server, srv RulerServer) { - s.RegisterService(&_Ruler_serviceDesc, srv) -} - -func _Ruler_Rules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RulesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RulerServer).Rules(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ruler.Ruler/Rules", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RulerServer).Rules(ctx, req.(*RulesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Ruler_serviceDesc = grpc.ServiceDesc{ - ServiceName: "ruler.Ruler", - HandlerType: (*RulerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Rules", - Handler: _Ruler_Rules_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "ruler.proto", -} - -func (m *RulesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RulesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RulesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RulesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RulesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRuler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *GroupStateDesc) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupStateDesc) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupStateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.EvaluationDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintRuler(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EvaluationTimestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintRuler(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x1a - if len(m.ActiveRules) > 0 { - for iNdEx := len(m.ActiveRules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ActiveRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRuler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Group != nil { - { - size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRuler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RuleStateDesc) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuleStateDesc) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuleStateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.EvaluationDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintRuler(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x3a - n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EvaluationTimestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp):]) - if err5 != nil { - return 0, err5 - } - i -= n5 - i = encodeVarintRuler(dAtA, i, uint64(n5)) - i-- - dAtA[i] = 0x32 - if len(m.Alerts) > 0 { - for iNdEx := len(m.Alerts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Alerts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRuler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.LastError) > 0 { - i -= len(m.LastError) - copy(dAtA[i:], m.LastError) - i = encodeVarintRuler(dAtA, i, uint64(len(m.LastError))) - i-- - dAtA[i] = 0x22 - } - if len(m.Health) > 0 { - i -= len(m.Health) - copy(dAtA[i:], m.Health) - i = encodeVarintRuler(dAtA, i, uint64(len(m.Health))) - i-- - dAtA[i] = 0x1a - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintRuler(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x12 - } - if m.Rule != nil { - { - size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRuler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AlertStateDesc) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlertStateDesc) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlertStateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ValidUntil, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ValidUntil):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintRuler(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x4a - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastSentAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastSentAt):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintRuler(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x42 - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ResolvedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ResolvedAt):]) - if err9 != nil { - return 0, err9 - } - i -= n9 - i = encodeVarintRuler(dAtA, i, uint64(n9)) - i-- - dAtA[i] = 0x3a - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.FiredAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.FiredAt):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintRuler(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x32 - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ActiveAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ActiveAt):]) - if err11 != nil { - return 0, err11 - } - i -= n11 - i = encodeVarintRuler(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0x2a - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x21 - } - if len(m.Annotations) > 0 { - for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Annotations[iNdEx].Size() - i -= size - if _, err := m.Annotations[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintRuler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintRuler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintRuler(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintRuler(dAtA []byte, offset int, v uint64) int { - offset -= sovRuler(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RulesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *RulesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovRuler(uint64(l)) - } - } - return n -} - -func (m *GroupStateDesc) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Group != nil { - l = m.Group.Size() - n += 1 + l + sovRuler(uint64(l)) - } - if len(m.ActiveRules) > 0 { - for _, e := range m.ActiveRules { - l = e.Size() - n += 1 + l + sovRuler(uint64(l)) - } - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp) - n += 1 + l + sovRuler(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration) - n += 1 + l + sovRuler(uint64(l)) - return n -} - -func (m *RuleStateDesc) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Rule != nil { - l = m.Rule.Size() - n += 1 + l + sovRuler(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovRuler(uint64(l)) - } - l = len(m.Health) - if l > 0 { - n += 1 + l + sovRuler(uint64(l)) - } - l = len(m.LastError) - if l > 0 { - n += 1 + l + sovRuler(uint64(l)) - } - if len(m.Alerts) > 0 { - for _, e := range m.Alerts { - l = e.Size() - n += 1 + l + sovRuler(uint64(l)) - } - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp) - n += 1 + l + sovRuler(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration) - n += 1 + l + sovRuler(uint64(l)) - return n -} - -func (m *AlertStateDesc) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.State) - if l > 0 { - n += 1 + l + sovRuler(uint64(l)) - } - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovRuler(uint64(l)) - } - } - if len(m.Annotations) > 0 { - for _, e := range m.Annotations { - l = e.Size() - n += 1 + l + sovRuler(uint64(l)) - } - } - if m.Value != 0 { - n += 9 - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ActiveAt) - n += 1 + l + sovRuler(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.FiredAt) - n += 1 + l + sovRuler(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ResolvedAt) - n += 1 + l + sovRuler(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LastSentAt) - n += 1 + l + sovRuler(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ValidUntil) - n += 1 + l + sovRuler(uint64(l)) - return n -} - -func sovRuler(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRuler(x uint64) (n int) { - return sovRuler(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RulesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RulesRequest{`, - `}`, - }, "") - return s -} -func (this *RulesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForGroups := "[]*GroupStateDesc{" - for _, f := range this.Groups { - repeatedStringForGroups += strings.Replace(f.String(), "GroupStateDesc", "GroupStateDesc", 1) + "," - } - repeatedStringForGroups += "}" - s := strings.Join([]string{`&RulesResponse{`, - `Groups:` + repeatedStringForGroups + `,`, - `}`, - }, "") - return s -} -func (this *GroupStateDesc) String() string { - if this == nil { - return "nil" - } - repeatedStringForActiveRules := "[]*RuleStateDesc{" - for _, f := range this.ActiveRules { - repeatedStringForActiveRules += strings.Replace(f.String(), "RuleStateDesc", "RuleStateDesc", 1) + "," - } - repeatedStringForActiveRules += "}" - s := strings.Join([]string{`&GroupStateDesc{`, - `Group:` + strings.Replace(fmt.Sprintf("%v", this.Group), "RuleGroupDesc", "rulespb.RuleGroupDesc", 1) + `,`, - `ActiveRules:` + repeatedStringForActiveRules + `,`, - `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RuleStateDesc) String() string { - if this == nil { - return "nil" - } - repeatedStringForAlerts := "[]*AlertStateDesc{" - for _, f := range this.Alerts { - repeatedStringForAlerts += strings.Replace(f.String(), "AlertStateDesc", "AlertStateDesc", 1) + "," - } - repeatedStringForAlerts += "}" - s := strings.Join([]string{`&RuleStateDesc{`, - `Rule:` + strings.Replace(fmt.Sprintf("%v", this.Rule), "RuleDesc", "rulespb.RuleDesc", 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `Health:` + fmt.Sprintf("%v", this.Health) + `,`, - `LastError:` + fmt.Sprintf("%v", this.LastError) + `,`, - `Alerts:` + repeatedStringForAlerts + `,`, - `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AlertStateDesc) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AlertStateDesc{`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ActiveAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActiveAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `FiredAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FiredAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `ResolvedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResolvedAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `LastSentAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastSentAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `ValidUntil:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ValidUntil), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringRuler(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *RulesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RulesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRuler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RulesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RulesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, &GroupStateDesc{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRuler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupStateDesc) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupStateDesc: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupStateDesc: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Group == nil { - m.Group = &rulespb.RuleGroupDesc{} - } - if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveRules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ActiveRules = append(m.ActiveRules, &RuleStateDesc{}) - if err := m.ActiveRules[len(m.ActiveRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EvaluationTimestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationDuration", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.EvaluationDuration, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRuler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuleStateDesc) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleStateDesc: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleStateDesc: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Rule == nil { - m.Rule = &rulespb.RuleDesc{} - } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Health = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LastError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alerts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Alerts = append(m.Alerts, &AlertStateDesc{}) - if err := m.Alerts[len(m.Alerts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EvaluationTimestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationDuration", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.EvaluationDuration, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRuler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlertStateDesc) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlertStateDesc: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlertStateDesc: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Annotations = append(m.Annotations, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Annotations[len(m.Annotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ActiveAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FiredAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.FiredAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResolvedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ResolvedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastSentAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LastSentAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidUntil", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRuler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRuler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRuler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ValidUntil, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRuler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRuler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRuler(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRuler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRuler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRuler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRuler - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthRuler - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRuler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRuler(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRuler - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRuler = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRuler = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.proto b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.proto deleted file mode 100644 index 2b66db412..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.proto +++ /dev/null @@ -1,68 +0,0 @@ -// Ruler Service Representation -// This service is used to retrieve the current state of rules running across -// all Rulers in a cluster. It allows cortex to fully serve the `/api/v1/{rules|alerts}` -// Prometheus API -syntax = "proto3"; -package ruler; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; -import "github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.proto"; - - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service Ruler { - rpc Rules(RulesRequest) returns (RulesResponse) {}; -} - -message RulesRequest {} - -message RulesResponse { - repeated GroupStateDesc groups = 1; -} - -// GroupStateDesc is a proto representation of a cortex rule group -message GroupStateDesc { - rules.RuleGroupDesc group = 1; - repeated RuleStateDesc active_rules = 2; - google.protobuf.Timestamp evaluationTimestamp = 3 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Duration evaluationDuration = 4 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; -} - -// RuleStateDesc is a proto representation of a Prometheus Rule -message RuleStateDesc { - rules.RuleDesc rule = 1; - string state = 2; - string health = 3; - string lastError = 4; - repeated AlertStateDesc alerts = 5; - google.protobuf.Timestamp evaluationTimestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Duration evaluationDuration = 7 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; -} - -message AlertStateDesc { - string state = 1; - repeated cortexpb.LabelPair labels = 2 [ - (gogoproto.nullable) = false, - (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" - ]; - repeated cortexpb.LabelPair annotations = 3 [ - (gogoproto.nullable) = false, - (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" - ]; - double value = 4; - google.protobuf.Timestamp active_at = 5 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp fired_at = 6 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp resolved_at = 7 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp last_sent_at = 8 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - google.protobuf.Timestamp valid_until = 9 - [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} \ No newline at end of file diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go deleted file mode 100644 index 9017a8429..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go +++ /dev/null @@ -1,104 +0,0 @@ -package ruler - -import ( - "flag" - "fmt" - "os" - "time" - - "github.com/go-kit/log" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util/flagext" -) - -const ( - // If a ruler is unable to heartbeat the ring, its better to quickly remove it and resume - // the evaluation of all rules since the worst case scenario is that some rulers will - // receive duplicate/out-of-order sample errors. - ringAutoForgetUnhealthyPeriods = 2 -) - -// RingOp is the operation used for distributing rule groups between rulers. -var RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, func(s ring.InstanceState) bool { - // Only ACTIVE rulers get any rule groups. If instance is not ACTIVE, we need to find another ruler. - return s != ring.ACTIVE -}) - -// RingConfig masks the ring lifecycler config which contains -// many options not really required by the rulers ring. This config -// is used to strip down the config to the minimum, and avoid confusion -// to the user. -type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - - // Instance details - InstanceID string `yaml:"instance_id" doc:"hidden"` - InstanceInterfaceNames []string `yaml:"instance_interface_names"` - InstancePort int `yaml:"instance_port" doc:"hidden"` - InstanceAddr string `yaml:"instance_addr" doc:"hidden"` - NumTokens int `yaml:"num_tokens"` - - // Injected internally - ListenPort int `yaml:"-"` - - // Used for testing - SkipUnregister bool `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { - hostname, err := os.Hostname() - if err != nil { - panic(fmt.Errorf("failed to get hostname, %w", err)) - } - - // Ring flags - cfg.KVStore.RegisterFlagsWithPrefix("ruler.ring.", "rulers/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "ruler.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") - f.DurationVar(&cfg.HeartbeatTimeout, "ruler.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which rulers are considered unhealthy within the ring. 0 = never (timeout disabled).") - - // Instance flags - cfg.InstanceInterfaceNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), "ruler.ring.instance-interface-names", "Name of network interface to read address from.") - f.StringVar(&cfg.InstanceAddr, "ruler.ring.instance-addr", "", "IP address to advertise in the ring.") - f.IntVar(&cfg.InstancePort, "ruler.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") - f.StringVar(&cfg.InstanceID, "ruler.ring.instance-id", hostname, "Instance ID to register in the ring.") - f.IntVar(&cfg.NumTokens, "ruler.ring.num-tokens", 128, "Number of tokens for each ruler.") -} - -// ToLifecyclerConfig returns a LifecyclerConfig based on the ruler -// ring config. -func (cfg *RingConfig) ToLifecyclerConfig(logger log.Logger) (ring.BasicLifecyclerConfig, error) { - instanceAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames, logger) - if err != nil { - return ring.BasicLifecyclerConfig{}, err - } - - instancePort := ring.GetInstancePort(cfg.InstancePort, cfg.ListenPort) - - return ring.BasicLifecyclerConfig{ - ID: cfg.InstanceID, - Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), - HeartbeatPeriod: cfg.HeartbeatPeriod, - TokensObservePeriod: 0, - NumTokens: cfg.NumTokens, - }, nil -} - -func (cfg *RingConfig) ToRingConfig() ring.Config { - rc := ring.Config{} - flagext.DefaultValues(&rc) - - rc.KVStore = cfg.KVStore - rc.HeartbeatTimeout = cfg.HeartbeatTimeout - rc.SubringCacheDisabled = true - - // Each rule group is loaded to *exactly* one ruler. - rc.ReplicationFactor = 1 - - return rc -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/compat.go deleted file mode 100644 index 9c2524c04..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/compat.go +++ /dev/null @@ -1,75 +0,0 @@ -package rulespb - -import ( - "time" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/rulefmt" - "gopkg.in/yaml.v3" - - "github.com/cortexproject/cortex/pkg/cortexpb" //lint:ignore faillint allowed to import other protobuf -) - -// ToProto transforms a formatted prometheus rulegroup to a rule group protobuf -func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc { - rg := RuleGroupDesc{ - Name: rl.Name, - Namespace: namespace, - Interval: time.Duration(rl.Interval), - Rules: formattedRuleToProto(rl.Rules), - User: user, - } - return &rg -} - -func formattedRuleToProto(rls []rulefmt.RuleNode) []*RuleDesc { - rules := make([]*RuleDesc, len(rls)) - for i := range rls { - rules[i] = &RuleDesc{ - Expr: rls[i].Expr.Value, - Record: rls[i].Record.Value, - Alert: rls[i].Alert.Value, - For: time.Duration(rls[i].For), - Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Labels)), - Annotations: cortexpb.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Annotations)), - } - } - - return rules -} - -// FromProto generates a rulefmt RuleGroup -func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { - formattedRuleGroup := rulefmt.RuleGroup{ - Name: rg.GetName(), - Interval: model.Duration(rg.Interval), - Rules: make([]rulefmt.RuleNode, len(rg.GetRules())), - } - - for i, rl := range rg.GetRules() { - exprNode := yaml.Node{} - exprNode.SetString(rl.GetExpr()) - - newRule := rulefmt.RuleNode{ - Expr: exprNode, - Labels: cortexpb.FromLabelAdaptersToLabels(rl.Labels).Map(), - Annotations: cortexpb.FromLabelAdaptersToLabels(rl.Annotations).Map(), - For: model.Duration(rl.GetFor()), - } - - if rl.GetRecord() != "" { - recordNode := yaml.Node{} - recordNode.SetString(rl.GetRecord()) - newRule.Record = recordNode - } else { - alertNode := yaml.Node{} - alertNode.SetString(rl.GetAlert()) - newRule.Alert = alertNode - } - - formattedRuleGroup.Rules[i] = newRule - } - - return formattedRuleGroup -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/custom.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/custom.go deleted file mode 100644 index d896afa14..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/custom.go +++ /dev/null @@ -1,21 +0,0 @@ -package rulespb - -import "github.com/prometheus/prometheus/model/rulefmt" - -// RuleGroupList contains a set of rule groups -type RuleGroupList []*RuleGroupDesc - -// Formatted returns the rule group list as a set of formatted rule groups mapped -// by namespace -func (l RuleGroupList) Formatted() map[string][]rulefmt.RuleGroup { - ruleMap := map[string][]rulefmt.RuleGroup{} - for _, g := range l { - if _, exists := ruleMap[g.Namespace]; !exists { - ruleMap[g.Namespace] = []rulefmt.RuleGroup{FromProto(g)} - continue - } - ruleMap[g.Namespace] = append(ruleMap[g.Namespace], FromProto(g)) - - } - return ruleMap -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.pb.go deleted file mode 100644 index 8f76eaa47..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.pb.go +++ /dev/null @@ -1,1278 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: rules.proto - -package rulespb - -import ( - fmt "fmt" - _ "github.com/cortexproject/cortex/pkg/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/duration" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// RuleGroupDesc is a proto representation of a cortex rule group. -type RuleGroupDesc struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Interval time.Duration `protobuf:"bytes,3,opt,name=interval,proto3,stdduration" json:"interval"` - Rules []*RuleDesc `protobuf:"bytes,4,rep,name=rules,proto3" json:"rules,omitempty"` - User string `protobuf:"bytes,6,opt,name=user,proto3" json:"user,omitempty"` - // The options field can be used to extend Cortex Ruler functionality without - // having to repeatedly redefine the proto description. It can also be leveraged - // to create custom `ManagerOpts` based on rule configs which can then be passed - // to the Prometheus Manager. - Options []*types.Any `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` -} - -func (m *RuleGroupDesc) Reset() { *m = RuleGroupDesc{} } -func (*RuleGroupDesc) ProtoMessage() {} -func (*RuleGroupDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_8e722d3e922f0937, []int{0} -} -func (m *RuleGroupDesc) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuleGroupDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RuleGroupDesc.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RuleGroupDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuleGroupDesc.Merge(m, src) -} -func (m *RuleGroupDesc) XXX_Size() int { - return m.Size() -} -func (m *RuleGroupDesc) XXX_DiscardUnknown() { - xxx_messageInfo_RuleGroupDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_RuleGroupDesc proto.InternalMessageInfo - -func (m *RuleGroupDesc) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *RuleGroupDesc) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *RuleGroupDesc) GetInterval() time.Duration { - if m != nil { - return m.Interval - } - return 0 -} - -func (m *RuleGroupDesc) GetRules() []*RuleDesc { - if m != nil { - return m.Rules - } - return nil -} - -func (m *RuleGroupDesc) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *RuleGroupDesc) GetOptions() []*types.Any { - if m != nil { - return m.Options - } - return nil -} - -// RuleDesc is a proto representation of a Prometheus Rule -type RuleDesc struct { - Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"` - Record string `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` - Alert string `protobuf:"bytes,3,opt,name=alert,proto3" json:"alert,omitempty"` - For time.Duration `protobuf:"bytes,4,opt,name=for,proto3,stdduration" json:"for"` - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,5,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"labels"` - Annotations []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,6,rep,name=annotations,proto3,customtype=github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" json:"annotations"` -} - -func (m *RuleDesc) Reset() { *m = RuleDesc{} } -func (*RuleDesc) ProtoMessage() {} -func (*RuleDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_8e722d3e922f0937, []int{1} -} -func (m *RuleDesc) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuleDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RuleDesc.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RuleDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuleDesc.Merge(m, src) -} -func (m *RuleDesc) XXX_Size() int { - return m.Size() -} -func (m *RuleDesc) XXX_DiscardUnknown() { - xxx_messageInfo_RuleDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_RuleDesc proto.InternalMessageInfo - -func (m *RuleDesc) GetExpr() string { - if m != nil { - return m.Expr - } - return "" -} - -func (m *RuleDesc) GetRecord() string { - if m != nil { - return m.Record - } - return "" -} - -func (m *RuleDesc) GetAlert() string { - if m != nil { - return m.Alert - } - return "" -} - -func (m *RuleDesc) GetFor() time.Duration { - if m != nil { - return m.For - } - return 0 -} - -func init() { - proto.RegisterType((*RuleGroupDesc)(nil), "rules.RuleGroupDesc") - proto.RegisterType((*RuleDesc)(nil), "rules.RuleDesc") -} - -func init() { proto.RegisterFile("rules.proto", fileDescriptor_8e722d3e922f0937) } - -var fileDescriptor_8e722d3e922f0937 = []byte{ - // 476 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0x3f, 0x6f, 0xd3, 0x40, - 0x1c, 0xf5, 0x35, 0x8e, 0x63, 0x5f, 0x54, 0x11, 0x1d, 0x15, 0x72, 0x2b, 0x74, 0x89, 0x2a, 0x21, - 0x65, 0xe1, 0x22, 0x15, 0x31, 0x30, 0x20, 0x94, 0xa8, 0x12, 0x52, 0xc4, 0x80, 0x3c, 0xb2, 0x9d, - 0x9d, 0xab, 0x31, 0xb8, 0xbe, 0xd3, 0xf9, 0x8c, 0xda, 0x8d, 0x8f, 0xc0, 0xc8, 0x47, 0xe0, 0xa3, - 0x74, 0xcc, 0x58, 0x31, 0x14, 0xe2, 0x2c, 0x8c, 0x95, 0xf8, 0x00, 0xa0, 0xfb, 0x63, 0x5a, 0xc1, - 0x02, 0x03, 0x53, 0x7e, 0xef, 0xde, 0xbd, 0xbc, 0xf7, 0x7b, 0x67, 0x38, 0x94, 0x4d, 0xc9, 0x6a, - 0x22, 0x24, 0x57, 0x1c, 0xf5, 0x0d, 0x38, 0x78, 0x98, 0x17, 0xea, 0x75, 0x93, 0x92, 0x8c, 0x9f, - 0xce, 0x72, 0x9e, 0xf3, 0x99, 0x61, 0xd3, 0xe6, 0xc4, 0x20, 0x03, 0xcc, 0x64, 0x55, 0x07, 0x38, - 0xe7, 0x3c, 0x2f, 0xd9, 0xcd, 0xad, 0x55, 0x23, 0xa9, 0x2a, 0x78, 0xe5, 0xf8, 0xfd, 0xdf, 0x79, - 0x5a, 0x9d, 0x3b, 0xea, 0xc9, 0x2d, 0xa7, 0x8c, 0x4b, 0xc5, 0xce, 0x84, 0xe4, 0x6f, 0x58, 0xa6, - 0x1c, 0x9a, 0x89, 0xb7, 0x79, 0x47, 0xa4, 0x6e, 0xb0, 0xd2, 0xc3, 0x1f, 0x00, 0xee, 0x26, 0x4d, - 0xc9, 0x9e, 0x4b, 0xde, 0x88, 0x63, 0x56, 0x67, 0x08, 0x41, 0xbf, 0xa2, 0xa7, 0x2c, 0x06, 0x13, - 0x30, 0x8d, 0x12, 0x33, 0xa3, 0xfb, 0x30, 0xd2, 0xbf, 0xb5, 0xa0, 0x19, 0x8b, 0x77, 0x0c, 0x71, - 0x73, 0x80, 0x9e, 0xc1, 0xb0, 0xa8, 0x14, 0x93, 0xef, 0x68, 0x19, 0xf7, 0x26, 0x60, 0x3a, 0x3c, - 0xda, 0x27, 0x36, 0x2c, 0xe9, 0xc2, 0x92, 0x63, 0xb7, 0xcc, 0x22, 0xbc, 0xb8, 0x1a, 0x7b, 0x1f, - 0xbf, 0x8c, 0x41, 0xf2, 0x4b, 0x84, 0x1e, 0x40, 0x5b, 0x59, 0xec, 0x4f, 0x7a, 0xd3, 0xe1, 0xd1, - 0x1d, 0x62, 0xdb, 0xd4, 0xb9, 0x74, 0xa4, 0xc4, 0xb2, 0x3a, 0x59, 0x53, 0x33, 0x19, 0x07, 0x36, - 0x99, 0x9e, 0x11, 0x81, 0x03, 0x2e, 0xf4, 0x1f, 0xd7, 0x71, 0x64, 0xc4, 0x7b, 0x7f, 0x58, 0xcf, - 0xab, 0xf3, 0xa4, 0xbb, 0xb4, 0xf4, 0xc3, 0xfe, 0x28, 0x58, 0xfa, 0xe1, 0x60, 0x14, 0x2e, 0xfd, - 0x30, 0x1c, 0x45, 0x87, 0xdf, 0x77, 0x60, 0xd8, 0x39, 0x69, 0x0b, 0x5d, 0x5e, 0xb7, 0xbc, 0x9e, - 0xd1, 0x3d, 0x18, 0x48, 0x96, 0x71, 0xb9, 0x72, 0x9b, 0x3b, 0x84, 0xf6, 0x60, 0x9f, 0x96, 0x4c, - 0x2a, 0xb3, 0x73, 0x94, 0x58, 0x80, 0x1e, 0xc3, 0xde, 0x09, 0x97, 0xb1, 0xff, 0xf7, 0x3d, 0xe8, - 0xfb, 0xa8, 0x82, 0x41, 0x49, 0x53, 0x56, 0xd6, 0x71, 0xdf, 0xac, 0x71, 0x97, 0x74, 0xef, 0x45, - 0x5e, 0xe8, 0xf3, 0x97, 0xb4, 0x90, 0x8b, 0xb9, 0xd6, 0x7c, 0xbe, 0x1a, 0xff, 0xd3, 0x7b, 0x5b, - 0xfd, 0x7c, 0x45, 0x85, 0x62, 0x32, 0x71, 0x2e, 0xe8, 0x0c, 0x0e, 0x69, 0x55, 0x71, 0x45, 0x6d, - 0x77, 0xc1, 0x7f, 0x35, 0xbd, 0x6d, 0x65, 0xba, 0xdf, 0x5d, 0x3c, 0x5d, 0x6f, 0xb0, 0x77, 0xb9, - 0xc1, 0xde, 0xf5, 0x06, 0x83, 0xf7, 0x2d, 0x06, 0x9f, 0x5a, 0x0c, 0x2e, 0x5a, 0x0c, 0xd6, 0x2d, - 0x06, 0x5f, 0x5b, 0x0c, 0xbe, 0xb5, 0xd8, 0xbb, 0x6e, 0x31, 0xf8, 0xb0, 0xc5, 0xde, 0x7a, 0x8b, - 0xbd, 0xcb, 0x2d, 0xf6, 0x5e, 0x0d, 0xcc, 0x87, 0x20, 0xd2, 0x34, 0x30, 0x85, 0x3e, 0xfa, 0x19, - 0x00, 0x00, 0xff, 0xff, 0xa0, 0xd3, 0x9a, 0x1a, 0x78, 0x03, 0x00, 0x00, -} - -func (this *RuleGroupDesc) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RuleGroupDesc) - if !ok { - that2, ok := that.(RuleGroupDesc) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.Namespace != that1.Namespace { - return false - } - if this.Interval != that1.Interval { - return false - } - if len(this.Rules) != len(that1.Rules) { - return false - } - for i := range this.Rules { - if !this.Rules[i].Equal(that1.Rules[i]) { - return false - } - } - if this.User != that1.User { - return false - } - if len(this.Options) != len(that1.Options) { - return false - } - for i := range this.Options { - if !this.Options[i].Equal(that1.Options[i]) { - return false - } - } - return true -} -func (this *RuleDesc) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RuleDesc) - if !ok { - that2, ok := that.(RuleDesc) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Expr != that1.Expr { - return false - } - if this.Record != that1.Record { - return false - } - if this.Alert != that1.Alert { - return false - } - if this.For != that1.For { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Annotations) != len(that1.Annotations) { - return false - } - for i := range this.Annotations { - if !this.Annotations[i].Equal(that1.Annotations[i]) { - return false - } - } - return true -} -func (this *RuleGroupDesc) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&rulespb.RuleGroupDesc{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n") - s = append(s, "Interval: "+fmt.Sprintf("%#v", this.Interval)+",\n") - if this.Rules != nil { - s = append(s, "Rules: "+fmt.Sprintf("%#v", this.Rules)+",\n") - } - s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RuleDesc) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&rulespb.RuleDesc{") - s = append(s, "Expr: "+fmt.Sprintf("%#v", this.Expr)+",\n") - s = append(s, "Record: "+fmt.Sprintf("%#v", this.Record)+",\n") - s = append(s, "Alert: "+fmt.Sprintf("%#v", this.Alert)+",\n") - s = append(s, "For: "+fmt.Sprintf("%#v", this.For)+",\n") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringRules(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *RuleGroupDesc) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuleGroupDesc) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Options) > 0 { - for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRules(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintRules(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x32 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRules(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Interval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintRules(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x1a - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintRules(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRules(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RuleDesc) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuleDesc) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuleDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Annotations) > 0 { - for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Annotations[iNdEx].Size() - i -= size - if _, err := m.Annotations[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintRules(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintRules(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.For):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintRules(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x22 - if len(m.Alert) > 0 { - i -= len(m.Alert) - copy(dAtA[i:], m.Alert) - i = encodeVarintRules(dAtA, i, uint64(len(m.Alert))) - i-- - dAtA[i] = 0x1a - } - if len(m.Record) > 0 { - i -= len(m.Record) - copy(dAtA[i:], m.Record) - i = encodeVarintRules(dAtA, i, uint64(len(m.Record))) - i-- - dAtA[i] = 0x12 - } - if len(m.Expr) > 0 { - i -= len(m.Expr) - copy(dAtA[i:], m.Expr) - i = encodeVarintRules(dAtA, i, uint64(len(m.Expr))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintRules(dAtA []byte, offset int, v uint64) int { - offset -= sovRules(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RuleGroupDesc) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRules(uint64(l)) - } - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovRules(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval) - n += 1 + l + sovRules(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovRules(uint64(l)) - } - } - l = len(m.User) - if l > 0 { - n += 1 + l + sovRules(uint64(l)) - } - if len(m.Options) > 0 { - for _, e := range m.Options { - l = e.Size() - n += 1 + l + sovRules(uint64(l)) - } - } - return n -} - -func (m *RuleDesc) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Expr) - if l > 0 { - n += 1 + l + sovRules(uint64(l)) - } - l = len(m.Record) - if l > 0 { - n += 1 + l + sovRules(uint64(l)) - } - l = len(m.Alert) - if l > 0 { - n += 1 + l + sovRules(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.For) - n += 1 + l + sovRules(uint64(l)) - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovRules(uint64(l)) - } - } - if len(m.Annotations) > 0 { - for _, e := range m.Annotations { - l = e.Size() - n += 1 + l + sovRules(uint64(l)) - } - } - return n -} - -func sovRules(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRules(x uint64) (n int) { - return sovRules(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RuleGroupDesc) String() string { - if this == nil { - return "nil" - } - repeatedStringForRules := "[]*RuleDesc{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(f.String(), "RuleDesc", "RuleDesc", 1) + "," - } - repeatedStringForRules += "}" - repeatedStringForOptions := "[]*Any{" - for _, f := range this.Options { - repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Any", "types.Any", 1) + "," - } - repeatedStringForOptions += "}" - s := strings.Join([]string{`&RuleGroupDesc{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Interval:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Options:` + repeatedStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *RuleDesc) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuleDesc{`, - `Expr:` + fmt.Sprintf("%v", this.Expr) + `,`, - `Record:` + fmt.Sprintf("%v", this.Record) + `,`, - `Alert:` + fmt.Sprintf("%v", this.Alert) + `,`, - `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, - `}`, - }, "") - return s -} -func valueToStringRules(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *RuleGroupDesc) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleGroupDesc: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleGroupDesc: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Interval, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, &RuleDesc{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Options = append(m.Options, &types.Any{}) - if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRules(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRules - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRules - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuleDesc) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleDesc: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleDesc: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Record = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alert", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Alert = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field For", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.For, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRules - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRules - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRules - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Annotations = append(m.Annotations, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Annotations[len(m.Annotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRules(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRules - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRules - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRules(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRules - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRules - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRules - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRules - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthRules - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRules - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRules(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRules - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRules = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRules = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.proto b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.proto deleted file mode 100644 index 16274cec0..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulespb/rules.proto +++ /dev/null @@ -1,47 +0,0 @@ - -syntax = "proto3"; - -package rules; - -option go_package = "rulespb"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/any.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// RuleGroupDesc is a proto representation of a cortex rule group. -message RuleGroupDesc { - reserved 5, 7, 8; - string name = 1; - string namespace = 2; - google.protobuf.Duration interval = 3 - [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; - repeated RuleDesc rules = 4; - string user = 6; - // The options field can be used to extend Cortex Ruler functionality without - // having to repeatedly redefine the proto description. It can also be leveraged - // to create custom `ManagerOpts` based on rule configs which can then be passed - // to the Prometheus Manager. - repeated google.protobuf.Any options = 9; -} - -// RuleDesc is a proto representation of a Prometheus Rule -message RuleDesc { - reserved 7 to 12; - string expr = 1; - string record = 2; - string alert = 3; - google.protobuf.Duration for = 4 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; - repeated cortexpb.LabelPair labels = 5 [ - (gogoproto.nullable) = false, - (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" - ]; - repeated cortexpb.LabelPair annotations = 6 [ - (gogoproto.nullable) = false, - (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter" - ]; -} \ No newline at end of file diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go deleted file mode 100644 index 95a1b6cf8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient/bucket_client.go +++ /dev/null @@ -1,319 +0,0 @@ -package bucketclient - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io/ioutil" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" - "golang.org/x/sync/errgroup" - - "github.com/cortexproject/cortex/pkg/ruler/rulespb" - "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/storage/bucket" -) - -const ( - // The bucket prefix under which all tenants rule groups are stored. - rulesPrefix = "rules" - - loadConcurrency = 10 -) - -var ( - errInvalidRuleGroupKey = errors.New("invalid rule group object key") - errEmptyUser = errors.New("empty user") - errEmptyNamespace = errors.New("empty namespace") - errEmptyGroupName = errors.New("empty group name") -) - -// BucketRuleStore is used to support the RuleStore interface against an object storage backend. It is implemented -// using the Thanos objstore.Bucket interface -type BucketRuleStore struct { - bucket objstore.Bucket - cfgProvider bucket.TenantConfigProvider - logger log.Logger -} - -func NewBucketRuleStore(bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *BucketRuleStore { - return &BucketRuleStore{ - bucket: bucket.NewPrefixedBucketClient(bkt, rulesPrefix), - cfgProvider: cfgProvider, - logger: logger, - } -} - -// getRuleGroup loads and return a rules group. If existing rule group is supplied, it is Reset and reused. If nil, new RuleGroupDesc is allocated. -func (b *BucketRuleStore) getRuleGroup(ctx context.Context, userID, namespace, groupName string, rg *rulespb.RuleGroupDesc) (*rulespb.RuleGroupDesc, error) { - userBucket := bucket.NewUserBucketClient(userID, b.bucket, b.cfgProvider) - objectKey := getRuleGroupObjectKey(namespace, groupName) - - reader, err := userBucket.Get(ctx, objectKey) - if userBucket.IsObjNotFoundErr(err) { - level.Debug(b.logger).Log("msg", "rule group does not exist", "user", userID, "key", objectKey) - return nil, rulestore.ErrGroupNotFound - } - - if err != nil { - return nil, errors.Wrapf(err, "failed to get rule group %s", objectKey) - } - defer func() { _ = reader.Close() }() - - buf, err := ioutil.ReadAll(reader) - if err != nil { - return nil, errors.Wrapf(err, "failed to read rule group %s", objectKey) - } - - if rg == nil { - rg = &rulespb.RuleGroupDesc{} - } else { - rg.Reset() - } - - err = proto.Unmarshal(buf, rg) - if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal rule group %s", objectKey) - } - - return rg, nil -} - -// ListAllUsers implements rules.RuleStore. -func (b *BucketRuleStore) ListAllUsers(ctx context.Context) ([]string, error) { - var users []string - err := b.bucket.Iter(ctx, "", func(user string) error { - users = append(users, strings.TrimSuffix(user, objstore.DirDelim)) - return nil - }) - if err != nil { - return nil, fmt.Errorf("unable to list users in rule store bucket: %w", err) - } - - return users, nil -} - -// ListAllRuleGroups implements rules.RuleStore. -func (b *BucketRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) { - out := map[string]rulespb.RuleGroupList{} - - // List rule groups for all tenants. - err := b.bucket.Iter(ctx, "", func(key string) error { - userID, namespace, group, err := parseRuleGroupObjectKeyWithUser(key) - if err != nil { - level.Warn(b.logger).Log("msg", "invalid rule group object key found while listing rule groups", "key", key, "err", err) - - // Do not fail just because of a spurious item in the bucket. - return nil - } - - out[userID] = append(out[userID], &rulespb.RuleGroupDesc{ - User: userID, - Namespace: namespace, - Name: group, - }) - return nil - }, objstore.WithRecursiveIter) - - if err != nil { - return nil, err - } - - return out, nil -} - -// ListRuleGroupsForUserAndNamespace implements rules.RuleStore. -func (b *BucketRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) { - userBucket := bucket.NewUserBucketClient(userID, b.bucket, b.cfgProvider) - - groupList := rulespb.RuleGroupList{} - - // The prefix to list objects depends on whether the namespace has been - // specified in the request. - prefix := "" - if namespace != "" { - prefix = getNamespacePrefix(namespace) - } - - err := userBucket.Iter(ctx, prefix, func(key string) error { - namespace, group, err := parseRuleGroupObjectKey(key) - if err != nil { - level.Warn(b.logger).Log("msg", "invalid rule group object key found while listing rule groups", "user", userID, "key", key, "err", err) - - // Do not fail just because of a spurious item in the bucket. - return nil - } - - groupList = append(groupList, &rulespb.RuleGroupDesc{ - User: userID, - Namespace: namespace, - Name: group, - }) - return nil - }, objstore.WithRecursiveIter) - if err != nil { - return nil, err - } - - return groupList, nil -} - -// LoadRuleGroups implements rules.RuleStore. -func (b *BucketRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error { - ch := make(chan *rulespb.RuleGroupDesc) - - // Given we store one file per rule group. With this, we create a pool of workers that will - // download all rule groups in parallel. We limit the number of workers to avoid a - // particular user having too many rule groups rate limiting us with the object storage. - g, gCtx := errgroup.WithContext(ctx) - for i := 0; i < loadConcurrency; i++ { - g.Go(func() error { - for gr := range ch { - user, namespace, group := gr.GetUser(), gr.GetNamespace(), gr.GetName() - if user == "" || namespace == "" || group == "" { - return fmt.Errorf("invalid rule group: user=%q, namespace=%q, group=%q", user, namespace, group) - } - - gr, err := b.getRuleGroup(gCtx, user, namespace, group, gr) // reuse group pointer from the map. - if err != nil { - return errors.Wrapf(err, "get rule group user=%q, namespace=%q, name=%q", user, namespace, group) - } - - if user != gr.User || namespace != gr.Namespace || group != gr.Name { - return fmt.Errorf("mismatch between requested rule group and loaded rule group, requested: user=%q, namespace=%q, group=%q, loaded: user=%q, namespace=%q, group=%q", user, namespace, group, gr.User, gr.Namespace, gr.Name) - } - } - - return nil - }) - } - -outer: - for _, gs := range groupsToLoad { - for _, g := range gs { - if g == nil { - continue - } - select { - case <-gCtx.Done(): - break outer - case ch <- g: - // ok - } - } - } - close(ch) - - return g.Wait() -} - -// GetRuleGroup implements rules.RuleStore. -func (b *BucketRuleStore) GetRuleGroup(ctx context.Context, userID string, namespace string, group string) (*rulespb.RuleGroupDesc, error) { - return b.getRuleGroup(ctx, userID, namespace, group, nil) -} - -// SetRuleGroup implements rules.RuleStore. -func (b *BucketRuleStore) SetRuleGroup(ctx context.Context, userID string, namespace string, group *rulespb.RuleGroupDesc) error { - userBucket := bucket.NewUserBucketClient(userID, b.bucket, b.cfgProvider) - data, err := proto.Marshal(group) - if err != nil { - return err - } - - return userBucket.Upload(ctx, getRuleGroupObjectKey(namespace, group.Name), bytes.NewBuffer(data)) -} - -// DeleteRuleGroup implements rules.RuleStore. -func (b *BucketRuleStore) DeleteRuleGroup(ctx context.Context, userID string, namespace string, group string) error { - userBucket := bucket.NewUserBucketClient(userID, b.bucket, b.cfgProvider) - err := userBucket.Delete(ctx, getRuleGroupObjectKey(namespace, group)) - if b.bucket.IsObjNotFoundErr(err) { - return rulestore.ErrGroupNotFound - } - return err -} - -// DeleteNamespace implements rules.RuleStore. -func (b *BucketRuleStore) DeleteNamespace(ctx context.Context, userID string, namespace string) error { - ruleGroupList, err := b.ListRuleGroupsForUserAndNamespace(ctx, userID, namespace) - if err != nil { - return err - } - - if len(ruleGroupList) == 0 { - return rulestore.ErrGroupNamespaceNotFound - } - - userBucket := bucket.NewUserBucketClient(userID, b.bucket, b.cfgProvider) - for _, rg := range ruleGroupList { - if err := ctx.Err(); err != nil { - return err - } - objectKey := getRuleGroupObjectKey(rg.Namespace, rg.Name) - level.Debug(b.logger).Log("msg", "deleting rule group", "user", userID, "namespace", namespace, "key", objectKey) - err = userBucket.Delete(ctx, objectKey) - if err != nil { - level.Error(b.logger).Log("msg", "unable to delete rule group from namespace", "user", userID, "namespace", namespace, "key", objectKey, "err", err) - return err - } - } - - return nil -} - -func getNamespacePrefix(namespace string) string { - return base64.URLEncoding.EncodeToString([]byte(namespace)) + objstore.DirDelim -} - -func getRuleGroupObjectKey(namespace, group string) string { - return getNamespacePrefix(namespace) + base64.URLEncoding.EncodeToString([]byte(group)) -} - -// parseRuleGroupObjectKeyWithUser parses a bucket object key in the format "//". -func parseRuleGroupObjectKeyWithUser(key string) (user, namespace, group string, err error) { - parts := strings.SplitN(key, objstore.DirDelim, 2) - if len(parts) != 2 { - return "", "", "", errInvalidRuleGroupKey - } - - user = parts[0] - if user == "" { - return "", "", "", errEmptyUser - } - namespace, group, err = parseRuleGroupObjectKey(parts[1]) - return -} - -// parseRuleGroupObjectKey parses a bucket object key in the format "/". -func parseRuleGroupObjectKey(key string) (namespace, group string, _ error) { - parts := strings.Split(key, objstore.DirDelim) - if len(parts) != 2 { - return "", "", errInvalidRuleGroupKey - } - - decodedNamespace, err := base64.URLEncoding.DecodeString(parts[0]) - if err != nil { - return "", "", err - } - - if len(decodedNamespace) == 0 { - return "", "", errEmptyNamespace - } - - decodedGroup, err := base64.URLEncoding.DecodeString(parts[1]) - if err != nil { - return "", "", err - } - - if len(decodedGroup) == 0 { - return "", "", errEmptyGroupName - } - - return string(decodedNamespace), string(decodedGroup), nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go deleted file mode 100644 index ef5c855ee..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/config.go +++ /dev/null @@ -1,37 +0,0 @@ -package rulestore - -import ( - "flag" - "reflect" - - "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb" - "github.com/cortexproject/cortex/pkg/ruler/rulestore/local" - "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/util/flagext" -) - -// Config configures a rule store. -type Config struct { - bucket.Config `yaml:",inline"` - ConfigDB client.Config `yaml:"configdb"` - Local local.Config `yaml:"local"` -} - -// RegisterFlags registers the backend storage config. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - prefix := "ruler-storage." - - cfg.ExtraBackends = []string{configdb.Name, local.Name} - cfg.ConfigDB.RegisterFlagsWithPrefix(prefix, f) - cfg.Local.RegisterFlagsWithPrefix(prefix, f) - cfg.RegisterFlagsWithPrefix(prefix, f) -} - -// IsDefaults returns true if the storage options have not been set. -func (cfg *Config) IsDefaults() bool { - defaults := Config{} - flagext.DefaultValues(&defaults) - - return reflect.DeepEqual(*cfg, defaults) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go deleted file mode 100644 index 5d125a920..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb/store.go +++ /dev/null @@ -1,136 +0,0 @@ -package configdb - -import ( - "context" - "errors" - - "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/configs/userconfig" - "github.com/cortexproject/cortex/pkg/ruler/rulespb" -) - -const ( - Name = "configdb" -) - -// ConfigRuleStore is a concrete implementation of RuleStore that sources rules from the config service -type ConfigRuleStore struct { - configClient client.Client - since userconfig.ID - ruleGroupList map[string]rulespb.RuleGroupList -} - -func (c *ConfigRuleStore) SupportsModifications() bool { - return false -} - -// NewConfigRuleStore constructs a ConfigRuleStore -func NewConfigRuleStore(c client.Client) *ConfigRuleStore { - return &ConfigRuleStore{ - configClient: c, - since: 0, - ruleGroupList: make(map[string]rulespb.RuleGroupList), - } -} - -func (c *ConfigRuleStore) ListAllUsers(ctx context.Context) ([]string, error) { - m, err := c.ListAllRuleGroups(ctx) - - // TODO: this should be optimized, if possible. - result := []string(nil) - for u := range m { - result = append(result, u) - } - - return result, err -} - -// ListAllRuleGroups implements RuleStore -func (c *ConfigRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) { - configs, err := c.configClient.GetRules(ctx, c.since) - - if err != nil { - return nil, err - } - - for user, cfg := range configs { - userRules := rulespb.RuleGroupList{} - if cfg.IsDeleted() { - delete(c.ruleGroupList, user) - continue - } - rMap, err := cfg.Config.ParseFormatted() - if err != nil { - return nil, err - } - for file, rgs := range rMap { - for _, rg := range rgs.Groups { - userRules = append(userRules, rulespb.ToProto(user, file, rg)) - } - } - c.ruleGroupList[user] = userRules - } - - c.since = getLatestConfigID(configs, c.since) - - return c.ruleGroupList, nil -} - -// getLatestConfigID gets the latest configs ID. -// max [latest, max (map getID cfgs)] -func getLatestConfigID(cfgs map[string]userconfig.VersionedRulesConfig, latest userconfig.ID) userconfig.ID { - ret := latest - for _, config := range cfgs { - if config.ID > ret { - ret = config.ID - } - } - return ret -} - -func (c *ConfigRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) { - r, err := c.ListAllRuleGroups(ctx) - if err != nil { - return nil, err - } - - if namespace == "" { - return r[userID], nil - } - - list := r[userID] - for ix := 0; ix < len(list); { - if list[ix].GetNamespace() != namespace { - list = append(list[:ix], list[ix+1:]...) - } else { - ix++ - } - } - - return list, nil -} - -func (c *ConfigRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error { - // Since ConfigRuleStore already Loads the rules in the List methods, there is nothing left to do here. - return nil -} - -// GetRuleGroup is not implemented -func (c *ConfigRuleStore) GetRuleGroup(ctx context.Context, userID, namespace, group string) (*rulespb.RuleGroupDesc, error) { - return nil, errors.New("not implemented by the config service rule store") -} - -// SetRuleGroup is not implemented -func (c *ConfigRuleStore) SetRuleGroup(ctx context.Context, userID, namespace string, group *rulespb.RuleGroupDesc) error { - return errors.New("not implemented by the config service rule store") -} - -// DeleteRuleGroup is not implemented -func (c *ConfigRuleStore) DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error { - return errors.New("not implemented by the config service rule store") -} - -// DeleteNamespace is not implemented -func (c *ConfigRuleStore) DeleteNamespace(ctx context.Context, userID, namespace string) error { - return errors.New("not implemented by the config service rule store") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go deleted file mode 100644 index 080e29414..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/local/local.go +++ /dev/null @@ -1,182 +0,0 @@ -package local - -import ( - "context" - "flag" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - promRules "github.com/prometheus/prometheus/rules" - - "github.com/cortexproject/cortex/pkg/ruler/rulespb" -) - -const ( - Name = "local" -) - -type Config struct { - Directory string `yaml:"directory"` -} - -// RegisterFlags registers flags. -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.Directory, prefix+"local.directory", "", "Directory to scan for rules") -} - -// Client expects to load already existing rules located at: -// cfg.Directory / userID / namespace -type Client struct { - cfg Config - loader promRules.GroupLoader -} - -func NewLocalRulesClient(cfg Config, loader promRules.GroupLoader) (*Client, error) { - if cfg.Directory == "" { - return nil, errors.New("directory required for local rules config") - } - - return &Client{ - cfg: cfg, - loader: loader, - }, nil -} - -func (l *Client) ListAllUsers(ctx context.Context) ([]string, error) { - root := l.cfg.Directory - infos, err := ioutil.ReadDir(root) - if err != nil { - return nil, errors.Wrapf(err, "unable to read dir %s", root) - } - - var result []string - for _, info := range infos { - // After resolving link, info.Name() may be different than user, so keep original name. - user := info.Name() - - if info.Mode()&os.ModeSymlink != 0 { - // ioutil.ReadDir only returns result of LStat. Calling Stat resolves symlink. - info, err = os.Stat(filepath.Join(root, info.Name())) - if err != nil { - return nil, err - } - } - - if info.IsDir() { - result = append(result, user) - } - } - - return result, nil -} - -// ListAllRuleGroups implements rules.RuleStore. This method also loads the rules. -func (l *Client) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) { - users, err := l.ListAllUsers(ctx) - if err != nil { - return nil, err - } - - lists := make(map[string]rulespb.RuleGroupList) - for _, user := range users { - list, err := l.loadAllRulesGroupsForUser(ctx, user) - if err != nil { - return nil, errors.Wrapf(err, "failed to list rule groups for user %s", user) - } - - lists[user] = list - } - - return lists, nil -} - -// ListRuleGroupsForUserAndNamespace implements rules.RuleStore. This method also loads the rules. -func (l *Client) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) { - if namespace != "" { - return l.loadAllRulesGroupsForUserAndNamespace(ctx, userID, namespace) - } - - return l.loadAllRulesGroupsForUser(ctx, userID) -} - -func (l *Client) LoadRuleGroups(_ context.Context, _ map[string]rulespb.RuleGroupList) error { - // This Client already loads the rules in its List methods, there is nothing left to do here. - return nil -} - -// GetRuleGroup implements RuleStore -func (l *Client) GetRuleGroup(ctx context.Context, userID, namespace, group string) (*rulespb.RuleGroupDesc, error) { - return nil, errors.New("GetRuleGroup unsupported in rule local store") -} - -// SetRuleGroup implements RuleStore -func (l *Client) SetRuleGroup(ctx context.Context, userID, namespace string, group *rulespb.RuleGroupDesc) error { - return errors.New("SetRuleGroup unsupported in rule local store") -} - -// DeleteRuleGroup implements RuleStore -func (l *Client) DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error { - return errors.New("DeleteRuleGroup unsupported in rule local store") -} - -// DeleteNamespace implements RulerStore -func (l *Client) DeleteNamespace(ctx context.Context, userID, namespace string) error { - return errors.New("DeleteNamespace unsupported in rule local store") -} - -func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) (rulespb.RuleGroupList, error) { - var allLists rulespb.RuleGroupList - - root := filepath.Join(l.cfg.Directory, userID) - infos, err := ioutil.ReadDir(root) - if err != nil { - return nil, errors.Wrapf(err, "unable to read rule dir %s", root) - } - - for _, info := range infos { - // After resolving link, info.Name() may be different than namespace, so keep original name. - namespace := info.Name() - - if info.Mode()&os.ModeSymlink != 0 { - // ioutil.ReadDir only returns result of LStat. Calling Stat resolves symlink. - path := filepath.Join(root, info.Name()) - info, err = os.Stat(path) - if err != nil { - return nil, errors.Wrapf(err, "unable to stat rule file %s", path) - } - } - - if info.IsDir() { - continue - } - - list, err := l.loadAllRulesGroupsForUserAndNamespace(ctx, userID, namespace) - if err != nil { - return nil, errors.Wrapf(err, "failed to list rule group for user %s and namespace %s", userID, namespace) - } - - allLists = append(allLists, list...) - } - - return allLists, nil -} - -func (l *Client) loadAllRulesGroupsForUserAndNamespace(_ context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) { - filename := filepath.Join(l.cfg.Directory, userID, namespace) - - rulegroups, allErrors := l.loader.Load(filename) - if len(allErrors) > 0 { - return nil, errors.Wrapf(allErrors[0], "error parsing %s", filename) - } - - var list rulespb.RuleGroupList - - for _, group := range rulegroups.Groups { - desc := rulespb.ToProto(userID, namespace, group) - list = append(list, desc) - } - - return list, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go deleted file mode 100644 index bdaba132f..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient/rule_store.go +++ /dev/null @@ -1,289 +0,0 @@ -package objectclient - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io/ioutil" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/ruler/rulespb" - "github.com/cortexproject/cortex/pkg/ruler/rulestore" -) - -// Object Rule Storage Schema -// ======================= -// Object Name: "rules///" -// Storage Format: Encoded RuleGroupDesc -// -// Prometheus Rule Groups can include a large number of characters that are not valid object names -// in common object storage systems. A URL Base64 encoding allows for generic consistent naming -// across all backends - -const ( - delim = "/" - rulePrefix = "rules" + delim -) - -// RuleStore allows cortex rules to be stored using an object store backend. -type RuleStore struct { - client chunk.ObjectClient - loadConcurrency int - - logger log.Logger -} - -// NewRuleStore returns a new RuleStore -func NewRuleStore(client chunk.ObjectClient, loadConcurrency int, logger log.Logger) *RuleStore { - return &RuleStore{ - client: client, - loadConcurrency: loadConcurrency, - logger: logger, - } -} - -// If existing rule group is supplied, it is Reset and reused. If nil, new RuleGroupDesc is allocated. -func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string, rg *rulespb.RuleGroupDesc) (*rulespb.RuleGroupDesc, error) { - reader, err := o.client.GetObject(ctx, objectKey) - if err == chunk.ErrStorageObjectNotFound { - level.Debug(o.logger).Log("msg", "rule group does not exist", "name", objectKey) - return nil, errors.Wrapf(rulestore.ErrGroupNotFound, "get rule group user=%q, namespace=%q, name=%q", rg.GetUser(), rg.GetNamespace(), rg.GetName()) - } - - if err != nil { - return nil, errors.Wrapf(err, "failed to get rule group %s", objectKey) - } - defer func() { _ = reader.Close() }() - - buf, err := ioutil.ReadAll(reader) - if err != nil { - return nil, errors.Wrapf(err, "failed to read rule group %s", objectKey) - } - - if rg == nil { - rg = &rulespb.RuleGroupDesc{} - } else { - rg.Reset() - } - - err = proto.Unmarshal(buf, rg) - if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal rule group %s", objectKey) - } - - return rg, nil -} - -func (o *RuleStore) ListAllUsers(ctx context.Context) ([]string, error) { - _, prefixes, err := o.client.List(ctx, rulePrefix, delim) - if err != nil { - return nil, err - } - - var result []string - for _, p := range prefixes { - s := string(p) - - s = strings.TrimPrefix(s, rulePrefix) - s = strings.TrimSuffix(s, delim) - - if s != "" { - result = append(result, s) - } - } - - return result, nil -} - -// ListAllRuleGroups implements rules.RuleStore. -func (o *RuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) { - // No delimiter to get *all* rule groups for all users and namespaces. - ruleGroupObjects, _, err := o.client.List(ctx, rulePrefix, "") - if err != nil { - return nil, err - } - - return convertRuleGroupObjectsToMap(ruleGroupObjects), nil -} - -func (o *RuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID, namespace string) (rulespb.RuleGroupList, error) { - ruleGroupObjects, _, err := o.client.List(ctx, generateRuleObjectKey(userID, namespace, ""), "") - if err != nil { - return nil, err - } - - return convertRuleGroupObjectsToMap(ruleGroupObjects)[userID], nil -} - -func (o *RuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error { - ch := make(chan *rulespb.RuleGroupDesc) - - // Given we store one file per rule group. With this, we create a pool of workers that will - // download all rule groups in parallel. We limit the number of workers to avoid a - // particular user having too many rule groups rate limiting us with the object storage. - g, gCtx := errgroup.WithContext(ctx) - for i := 0; i < o.loadConcurrency; i++ { - g.Go(func() error { - for gr := range ch { - if gr == nil { - continue - } - - user, namespace, group := gr.GetUser(), gr.GetNamespace(), gr.GetName() - if user == "" || namespace == "" || group == "" { - return fmt.Errorf("invalid rule group: user=%q, namespace=%q, group=%q", user, namespace, group) - } - - key := generateRuleObjectKey(user, namespace, group) - - level.Debug(o.logger).Log("msg", "loading rule group", "key", key, "user", user) - gr, err := o.getRuleGroup(gCtx, key, gr) // reuse group pointer from the map. - if err != nil { - level.Error(o.logger).Log("msg", "failed to get rule group", "key", key, "user", user) - return err - } - - if user != gr.User || namespace != gr.Namespace || group != gr.Name { - return fmt.Errorf("mismatch between requested rule group and loaded rule group, requested: user=%q, namespace=%q, group=%q, loaded: user=%q, namespace=%q, group=%q", user, namespace, group, gr.User, gr.Namespace, gr.Name) - } - } - - return nil - }) - } - -outer: - for _, gs := range groupsToLoad { - for _, g := range gs { - select { - case <-gCtx.Done(): - break outer - case ch <- g: - // ok - } - } - } - close(ch) - - return g.Wait() -} - -func convertRuleGroupObjectsToMap(ruleGroupObjects []chunk.StorageObject) map[string]rulespb.RuleGroupList { - result := map[string]rulespb.RuleGroupList{} - for _, rg := range ruleGroupObjects { - user, namespace, group := decomposeRuleObjectKey(rg.Key) - if user == "" || namespace == "" || group == "" { - continue - } - - result[user] = append(result[user], &rulespb.RuleGroupDesc{ - User: user, - Namespace: namespace, - Name: group, - }) - } - return result -} - -// GetRuleGroup returns the requested rule group -func (o *RuleStore) GetRuleGroup(ctx context.Context, userID string, namespace string, grp string) (*rulespb.RuleGroupDesc, error) { - handle := generateRuleObjectKey(userID, namespace, grp) - return o.getRuleGroup(ctx, handle, nil) -} - -// SetRuleGroup sets provided rule group -func (o *RuleStore) SetRuleGroup(ctx context.Context, userID string, namespace string, group *rulespb.RuleGroupDesc) error { - data, err := proto.Marshal(group) - if err != nil { - return err - } - - objectKey := generateRuleObjectKey(userID, namespace, group.Name) - return o.client.PutObject(ctx, objectKey, bytes.NewReader(data)) -} - -// DeleteRuleGroup deletes the specified rule group -func (o *RuleStore) DeleteRuleGroup(ctx context.Context, userID string, namespace string, groupName string) error { - objectKey := generateRuleObjectKey(userID, namespace, groupName) - err := o.client.DeleteObject(ctx, objectKey) - if err == chunk.ErrStorageObjectNotFound { - return rulestore.ErrGroupNotFound - } - return err -} - -// DeleteNamespace deletes all the rule groups in the specified namespace -func (o *RuleStore) DeleteNamespace(ctx context.Context, userID, namespace string) error { - ruleGroupObjects, _, err := o.client.List(ctx, generateRuleObjectKey(userID, namespace, ""), "") - if err != nil { - return err - } - - if len(ruleGroupObjects) == 0 { - return rulestore.ErrGroupNamespaceNotFound - } - - for _, obj := range ruleGroupObjects { - if err := ctx.Err(); err != nil { - return err - } - - level.Debug(o.logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key) - err = o.client.DeleteObject(ctx, obj.Key) - if err != nil { - level.Error(o.logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key) - return err - } - } - - return nil -} - -func generateRuleObjectKey(userID, namespace, groupName string) string { - if userID == "" { - return rulePrefix - } - - prefix := rulePrefix + userID + delim - if namespace == "" { - return prefix - } - - ns := base64.URLEncoding.EncodeToString([]byte(namespace)) + delim - if groupName == "" { - return prefix + ns - } - - return prefix + ns + base64.URLEncoding.EncodeToString([]byte(groupName)) -} - -func decomposeRuleObjectKey(objectKey string) (userID, namespace, groupName string) { - if !strings.HasPrefix(objectKey, rulePrefix) { - return - } - - components := strings.Split(objectKey, delim) - if len(components) != 4 { - return - } - - ns, err := base64.URLEncoding.DecodeString(components[2]) - if err != nil { - return - } - - gr, err := base64.URLEncoding.DecodeString(components[3]) - if err != nil { - return - } - - return components[1], string(ns), string(gr) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go deleted file mode 100644 index d8b97ed05..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rulestore/store.go +++ /dev/null @@ -1,47 +0,0 @@ -package rulestore - -import ( - "context" - "errors" - - "github.com/cortexproject/cortex/pkg/ruler/rulespb" -) - -var ( - // ErrGroupNotFound is returned if a rule group does not exist - ErrGroupNotFound = errors.New("group does not exist") - // ErrGroupNamespaceNotFound is returned if a namespace does not exist - ErrGroupNamespaceNotFound = errors.New("group namespace does not exist") - // ErrUserNotFound is returned if the user does not currently exist - ErrUserNotFound = errors.New("no rule groups found for user") -) - -// RuleStore is used to store and retrieve rules. -// Methods starting with "List" prefix may return partially loaded groups: with only group Name, Namespace and User fields set. -// To make sure that rules within each group are loaded, client must use LoadRuleGroups method. -type RuleStore interface { - // ListAllUsers returns all users with rule groups configured. - ListAllUsers(ctx context.Context) ([]string, error) - - // ListAllRuleGroups returns all rule groups for all users. - ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) - - // ListRuleGroupsForUserAndNamespace returns all the active rule groups for a user from given namespace. - // If namespace is empty, groups from all namespaces are returned. - ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) - - // LoadRuleGroups loads rules for each rule group in the map. - // Parameter with groups to load *MUST* be coming from one of the List methods. - // Reason is that some implementations don't do anything, since their List method already loads the rules. - LoadRuleGroups(ctx context.Context, groupsToLoad map[string]rulespb.RuleGroupList) error - - GetRuleGroup(ctx context.Context, userID, namespace, group string) (*rulespb.RuleGroupDesc, error) - SetRuleGroup(ctx context.Context, userID, namespace string, group *rulespb.RuleGroupDesc) error - - // DeleteRuleGroup deletes single rule group. - DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error - - // DeleteNamespace lists rule groups for given user and namespace, and deletes all rule groups. - // If namespace is empty, deletes all rule groups for user. - DeleteNamespace(ctx context.Context, userID, namespace string) error -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go deleted file mode 100644 index f66d68986..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go +++ /dev/null @@ -1,145 +0,0 @@ -package ruler - -import ( - "context" - "flag" - "fmt" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - promRules "github.com/prometheus/prometheus/rules" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/aws" - "github.com/cortexproject/cortex/pkg/chunk/azure" - "github.com/cortexproject/cortex/pkg/chunk/gcp" - "github.com/cortexproject/cortex/pkg/chunk/openstack" - "github.com/cortexproject/cortex/pkg/configs/client" - configClient "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient" - "github.com/cortexproject/cortex/pkg/ruler/rulestore/configdb" - "github.com/cortexproject/cortex/pkg/ruler/rulestore/local" - "github.com/cortexproject/cortex/pkg/ruler/rulestore/objectclient" - "github.com/cortexproject/cortex/pkg/storage/bucket" -) - -// RuleStoreConfig configures a rule store. -// TODO remove this legacy config in Cortex 1.11. -type RuleStoreConfig struct { - Type string `yaml:"type"` - ConfigDB client.Config `yaml:"configdb"` - - // Object Storage Configs - Azure azure.BlobStorageConfig `yaml:"azure"` - GCS gcp.GCSConfig `yaml:"gcs"` - S3 aws.S3Config `yaml:"s3"` - Swift openstack.SwiftConfig `yaml:"swift"` - Local local.Config `yaml:"local"` - - mock rulestore.RuleStore `yaml:"-"` -} - -// RegisterFlags registers flags. -func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.ConfigDB.RegisterFlagsWithPrefix("ruler.", f) - cfg.Azure.RegisterFlagsWithPrefix("ruler.storage.", f) - cfg.GCS.RegisterFlagsWithPrefix("ruler.storage.", f) - cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) - cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f) - cfg.Local.RegisterFlagsWithPrefix("ruler.storage.", f) - - f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local)") -} - -// Validate config and returns error on failure -func (cfg *RuleStoreConfig) Validate() error { - if err := cfg.Swift.Validate(); err != nil { - return errors.Wrap(err, "invalid Swift Storage config") - } - if err := cfg.Azure.Validate(); err != nil { - return errors.Wrap(err, "invalid Azure Storage config") - } - if err := cfg.S3.Validate(); err != nil { - return errors.Wrap(err, "invalid S3 Storage config") - } - return nil -} - -// IsDefaults returns true if the storage options have not been set -func (cfg *RuleStoreConfig) IsDefaults() bool { - return cfg.Type == "configdb" && cfg.ConfigDB.ConfigsAPIURL.URL == nil -} - -// NewLegacyRuleStore returns a rule store backend client based on the provided cfg. -// The client used by the function is based a legacy object store clients that shouldn't -// be used anymore. -func NewLegacyRuleStore(cfg RuleStoreConfig, loader promRules.GroupLoader, logger log.Logger) (rulestore.RuleStore, error) { - if cfg.mock != nil { - return cfg.mock, nil - } - - if loader == nil { - loader = promRules.FileLoader{} - } - - var err error - var client chunk.ObjectClient - - switch cfg.Type { - case "configdb": - c, err := configClient.New(cfg.ConfigDB) - if err != nil { - return nil, err - } - return configdb.NewConfigRuleStore(c), nil - case "azure": - client, err = azure.NewBlobStorage(&cfg.Azure) - case "gcs": - client, err = gcp.NewGCSObjectClient(context.Background(), cfg.GCS) - case "s3": - client, err = aws.NewS3ObjectClient(cfg.S3) - case "swift": - client, err = openstack.NewSwiftObjectClient(cfg.Swift) - case "local": - return local.NewLocalRulesClient(cfg.Local, loader) - default: - return nil, fmt.Errorf("unrecognized rule storage mode %v, choose one of: configdb, gcs, s3, swift, azure, local", cfg.Type) - } - - if err != nil { - return nil, err - } - - return objectclient.NewRuleStore(client, loadRulesConcurrency, logger), nil -} - -// NewRuleStore returns a rule store backend client based on the provided cfg. -func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.TenantConfigProvider, loader promRules.GroupLoader, logger log.Logger, reg prometheus.Registerer) (rulestore.RuleStore, error) { - if cfg.Backend == configdb.Name { - c, err := client.New(cfg.ConfigDB) - - if err != nil { - return nil, err - } - - return configdb.NewConfigRuleStore(c), nil - } - - if cfg.Backend == local.Name { - return local.NewLocalRulesClient(cfg.Local, loader) - } - - bucketClient, err := bucket.NewClient(ctx, cfg.Config, "ruler-storage", logger, reg) - if err != nil { - return nil, err - } - - store := bucketclient.NewBucketRuleStore(bucketClient, cfgProvider, logger) - if err != nil { - return nil, err - } - - return store, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go deleted file mode 100644 index 37de02736..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go +++ /dev/null @@ -1,224 +0,0 @@ -package queue - -import ( - "context" - "sync" - "time" - - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util/services" -) - -const ( - // How frequently to check for disconnected queriers that should be forgotten. - forgetCheckPeriod = 5 * time.Second -) - -var ( - ErrTooManyRequests = errors.New("too many outstanding requests") - ErrStopped = errors.New("queue is stopped") -) - -// UserIndex is opaque type that allows to resume iteration over users between successive calls -// of RequestQueue.GetNextRequestForQuerier method. -type UserIndex struct { - last int -} - -// Modify index to start iteration on the same user, for which last queue was returned. -func (ui UserIndex) ReuseLastUser() UserIndex { - if ui.last >= 0 { - return UserIndex{last: ui.last - 1} - } - return ui -} - -// FirstUser returns UserIndex that starts iteration over user queues from the very first user. -func FirstUser() UserIndex { - return UserIndex{last: -1} -} - -// Request stored into the queue. -type Request interface{} - -// RequestQueue holds incoming requests in per-user queues. It also assigns each user specified number of queriers, -// and when querier asks for next request to handle (using GetNextRequestForQuerier), it returns requests -// in a fair fashion. -type RequestQueue struct { - services.Service - - connectedQuerierWorkers *atomic.Int32 - - mtx sync.Mutex - cond *sync.Cond // Notified when request is enqueued or dequeued, or querier is disconnected. - queues *queues - stopped bool - - queueLength *prometheus.GaugeVec // Per user and reason. - discardedRequests *prometheus.CounterVec // Per user. -} - -func NewRequestQueue(maxOutstandingPerTenant int, forgetDelay time.Duration, queueLength *prometheus.GaugeVec, discardedRequests *prometheus.CounterVec) *RequestQueue { - q := &RequestQueue{ - queues: newUserQueues(maxOutstandingPerTenant, forgetDelay), - connectedQuerierWorkers: atomic.NewInt32(0), - queueLength: queueLength, - discardedRequests: discardedRequests, - } - - q.cond = sync.NewCond(&q.mtx) - q.Service = services.NewTimerService(forgetCheckPeriod, nil, q.forgetDisconnectedQueriers, q.stopping).WithName("request queue") - - return q -} - -// EnqueueRequest puts the request into the queue. MaxQueries is user-specific value that specifies how many queriers can -// this user use (zero or negative = all queriers). It is passed to each EnqueueRequest, because it can change -// between calls. -// -// If request is successfully enqueued, successFn is called with the lock held, before any querier can receive the request. -func (q *RequestQueue) EnqueueRequest(userID string, req Request, maxQueriers int, successFn func()) error { - q.mtx.Lock() - defer q.mtx.Unlock() - - if q.stopped { - return ErrStopped - } - - queue := q.queues.getOrAddQueue(userID, maxQueriers) - if queue == nil { - // This can only happen if userID is "". - return errors.New("no queue found") - } - - select { - case queue <- req: - q.queueLength.WithLabelValues(userID).Inc() - q.cond.Broadcast() - // Call this function while holding a lock. This guarantees that no querier can fetch the request before function returns. - if successFn != nil { - successFn() - } - return nil - default: - q.discardedRequests.WithLabelValues(userID).Inc() - return ErrTooManyRequests - } -} - -// GetNextRequestForQuerier find next user queue and takes the next request off of it. Will block if there are no requests. -// By passing user index from previous call of this method, querier guarantees that it iterates over all users fairly. -// If querier finds that request from the user is already expired, it can get a request for the same user by using UserIndex.ReuseLastUser. -func (q *RequestQueue) GetNextRequestForQuerier(ctx context.Context, last UserIndex, querierID string) (Request, UserIndex, error) { - q.mtx.Lock() - defer q.mtx.Unlock() - - querierWait := false - -FindQueue: - // We need to wait if there are no users, or no pending requests for given querier. - for (q.queues.len() == 0 || querierWait) && ctx.Err() == nil && !q.stopped { - querierWait = false - q.cond.Wait() - } - - if q.stopped { - return nil, last, ErrStopped - } - - if err := ctx.Err(); err != nil { - return nil, last, err - } - - for { - queue, userID, idx := q.queues.getNextQueueForQuerier(last.last, querierID) - last.last = idx - if queue == nil { - break - } - - // Pick next request from the queue. - for { - request := <-queue - if len(queue) == 0 { - q.queues.deleteQueue(userID) - } - - q.queueLength.WithLabelValues(userID).Dec() - - // Tell close() we've processed a request. - q.cond.Broadcast() - - return request, last, nil - } - } - - // There are no unexpired requests, so we can get back - // and wait for more requests. - querierWait = true - goto FindQueue -} - -func (q *RequestQueue) forgetDisconnectedQueriers(_ context.Context) error { - q.mtx.Lock() - defer q.mtx.Unlock() - - if q.queues.forgetDisconnectedQueriers(time.Now()) > 0 { - // We need to notify goroutines cause having removed some queriers - // may have caused a resharding. - q.cond.Broadcast() - } - - return nil -} - -func (q *RequestQueue) stopping(_ error) error { - q.mtx.Lock() - defer q.mtx.Unlock() - - for q.queues.len() > 0 && q.connectedQuerierWorkers.Load() > 0 { - q.cond.Wait() - } - - // Only stop after dispatching enqueued requests. - q.stopped = true - - // If there are still goroutines in GetNextRequestForQuerier method, they get notified. - q.cond.Broadcast() - - return nil -} - -func (q *RequestQueue) RegisterQuerierConnection(querier string) { - q.connectedQuerierWorkers.Inc() - - q.mtx.Lock() - defer q.mtx.Unlock() - q.queues.addQuerierConnection(querier) -} - -func (q *RequestQueue) UnregisterQuerierConnection(querier string) { - q.connectedQuerierWorkers.Dec() - - q.mtx.Lock() - defer q.mtx.Unlock() - q.queues.removeQuerierConnection(querier, time.Now()) -} - -func (q *RequestQueue) NotifyQuerierShutdown(querierID string) { - q.mtx.Lock() - defer q.mtx.Unlock() - q.queues.notifyQuerierShutdown(querierID) -} - -// When querier is waiting for next request, this unblocks the method. -func (q *RequestQueue) QuerierDisconnecting() { - q.cond.Broadcast() -} - -func (q *RequestQueue) GetConnectedQuerierWorkersMetric() float64 { - return float64(q.connectedQuerierWorkers.Load()) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/user_queues.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/user_queues.go deleted file mode 100644 index cdad43402..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/user_queues.go +++ /dev/null @@ -1,305 +0,0 @@ -package queue - -import ( - "math/rand" - "sort" - "time" - - "github.com/cortexproject/cortex/pkg/util" -) - -// querier holds information about a querier registered in the queue. -type querier struct { - // Number of active connections. - connections int - - // True if the querier notified it's gracefully shutting down. - shuttingDown bool - - // When the last connection has been unregistered. - disconnectedAt time.Time -} - -// This struct holds user queues for pending requests. It also keeps track of connected queriers, -// and mapping between users and queriers. -type queues struct { - userQueues map[string]*userQueue - - // List of all users with queues, used for iteration when searching for next queue to handle. - // Users removed from the middle are replaced with "". To avoid skipping users during iteration, we only shrink - // this list when there are ""'s at the end of it. - users []string - - maxUserQueueSize int - - // How long to wait before removing a querier which has got disconnected - // but hasn't notified about a graceful shutdown. - forgetDelay time.Duration - - // Tracks queriers registered to the queue. - queriers map[string]*querier - - // Sorted list of querier names, used when creating per-user shard. - sortedQueriers []string -} - -type userQueue struct { - ch chan Request - - // If not nil, only these queriers can handle user requests. If nil, all queriers can. - // We set this to nil if number of available queriers <= maxQueriers. - queriers map[string]struct{} - maxQueriers int - - // Seed for shuffle sharding of queriers. This seed is based on userID only and is therefore consistent - // between different frontends. - seed int64 - - // Points back to 'users' field in queues. Enables quick cleanup. - index int -} - -func newUserQueues(maxUserQueueSize int, forgetDelay time.Duration) *queues { - return &queues{ - userQueues: map[string]*userQueue{}, - users: nil, - maxUserQueueSize: maxUserQueueSize, - forgetDelay: forgetDelay, - queriers: map[string]*querier{}, - sortedQueriers: nil, - } -} - -func (q *queues) len() int { - return len(q.userQueues) -} - -func (q *queues) deleteQueue(userID string) { - uq := q.userQueues[userID] - if uq == nil { - return - } - - delete(q.userQueues, userID) - q.users[uq.index] = "" - - // Shrink users list size if possible. This is safe, and no users will be skipped during iteration. - for ix := len(q.users) - 1; ix >= 0 && q.users[ix] == ""; ix-- { - q.users = q.users[:ix] - } -} - -// Returns existing or new queue for user. -// MaxQueriers is used to compute which queriers should handle requests for this user. -// If maxQueriers is <= 0, all queriers can handle this user's requests. -// If maxQueriers has changed since the last call, queriers for this are recomputed. -func (q *queues) getOrAddQueue(userID string, maxQueriers int) chan Request { - // Empty user is not allowed, as that would break our users list ("" is used for free spot). - if userID == "" { - return nil - } - - if maxQueriers < 0 { - maxQueriers = 0 - } - - uq := q.userQueues[userID] - - if uq == nil { - uq = &userQueue{ - ch: make(chan Request, q.maxUserQueueSize), - seed: util.ShuffleShardSeed(userID, ""), - index: -1, - } - q.userQueues[userID] = uq - - // Add user to the list of users... find first free spot, and put it there. - for ix, u := range q.users { - if u == "" { - uq.index = ix - q.users[ix] = userID - break - } - } - - // ... or add to the end. - if uq.index < 0 { - uq.index = len(q.users) - q.users = append(q.users, userID) - } - } - - if uq.maxQueriers != maxQueriers { - uq.maxQueriers = maxQueriers - uq.queriers = shuffleQueriersForUser(uq.seed, maxQueriers, q.sortedQueriers, nil) - } - - return uq.ch -} - -// Finds next queue for the querier. To support fair scheduling between users, client is expected -// to pass last user index returned by this function as argument. Is there was no previous -// last user index, use -1. -func (q *queues) getNextQueueForQuerier(lastUserIndex int, querierID string) (chan Request, string, int) { - uid := lastUserIndex - - for iters := 0; iters < len(q.users); iters++ { - uid = uid + 1 - - // Don't use "mod len(q.users)", as that could skip users at the beginning of the list - // for example when q.users has shrunk since last call. - if uid >= len(q.users) { - uid = 0 - } - - u := q.users[uid] - if u == "" { - continue - } - - q := q.userQueues[u] - - if q.queriers != nil { - if _, ok := q.queriers[querierID]; !ok { - // This querier is not handling the user. - continue - } - } - - return q.ch, u, uid - } - return nil, "", uid -} - -func (q *queues) addQuerierConnection(querierID string) { - info := q.queriers[querierID] - if info != nil { - info.connections++ - - // Reset in case the querier re-connected while it was in the forget waiting period. - info.shuttingDown = false - info.disconnectedAt = time.Time{} - - return - } - - // First connection from this querier. - q.queriers[querierID] = &querier{connections: 1} - q.sortedQueriers = append(q.sortedQueriers, querierID) - sort.Strings(q.sortedQueriers) - - q.recomputeUserQueriers() -} - -func (q *queues) removeQuerierConnection(querierID string, now time.Time) { - info := q.queriers[querierID] - if info == nil || info.connections <= 0 { - panic("unexpected number of connections for querier") - } - - // Decrease the number of active connections. - info.connections-- - if info.connections > 0 { - return - } - - // There no more active connections. If the forget delay is configured then - // we can remove it only if querier has announced a graceful shutdown. - if info.shuttingDown || q.forgetDelay == 0 { - q.removeQuerier(querierID) - return - } - - // No graceful shutdown has been notified yet, so we should track the current time - // so that we'll remove the querier as soon as we receive the graceful shutdown - // notification (if any) or once the threshold expires. - info.disconnectedAt = now -} - -func (q *queues) removeQuerier(querierID string) { - delete(q.queriers, querierID) - - ix := sort.SearchStrings(q.sortedQueriers, querierID) - if ix >= len(q.sortedQueriers) || q.sortedQueriers[ix] != querierID { - panic("incorrect state of sorted queriers") - } - - q.sortedQueriers = append(q.sortedQueriers[:ix], q.sortedQueriers[ix+1:]...) - - q.recomputeUserQueriers() -} - -// notifyQuerierShutdown records that a querier has sent notification about a graceful shutdown. -func (q *queues) notifyQuerierShutdown(querierID string) { - info := q.queriers[querierID] - if info == nil { - // The querier may have already been removed, so we just ignore it. - return - } - - // If there are no more connections, we should remove the querier. - if info.connections == 0 { - q.removeQuerier(querierID) - return - } - - // Otherwise we should annotate we received a graceful shutdown notification - // and the querier will be removed once all connections are unregistered. - info.shuttingDown = true -} - -// forgetDisconnectedQueriers removes all disconnected queriers that have gone since at least -// the forget delay. Returns the number of forgotten queriers. -func (q *queues) forgetDisconnectedQueriers(now time.Time) int { - // Nothing to do if the forget delay is disabled. - if q.forgetDelay == 0 { - return 0 - } - - // Remove all queriers with no connections that have gone since at least the forget delay. - threshold := now.Add(-q.forgetDelay) - forgotten := 0 - - for querierID := range q.queriers { - if info := q.queriers[querierID]; info.connections == 0 && info.disconnectedAt.Before(threshold) { - q.removeQuerier(querierID) - forgotten++ - } - } - - return forgotten -} - -func (q *queues) recomputeUserQueriers() { - scratchpad := make([]string, 0, len(q.sortedQueriers)) - - for _, uq := range q.userQueues { - uq.queriers = shuffleQueriersForUser(uq.seed, uq.maxQueriers, q.sortedQueriers, scratchpad) - } -} - -// shuffleQueriersForUser returns nil if queriersToSelect is 0 or there are not enough queriers to select from. -// In that case *all* queriers should be used. -// Scratchpad is used for shuffling, to avoid new allocations. If nil, new slice is allocated. -func shuffleQueriersForUser(userSeed int64, queriersToSelect int, allSortedQueriers []string, scratchpad []string) map[string]struct{} { - if queriersToSelect == 0 || len(allSortedQueriers) <= queriersToSelect { - return nil - } - - result := make(map[string]struct{}, queriersToSelect) - rnd := rand.New(rand.NewSource(userSeed)) - - scratchpad = scratchpad[:0] - scratchpad = append(scratchpad, allSortedQueriers...) - - last := len(scratchpad) - 1 - for i := 0; i < queriersToSelect; i++ { - r := rnd.Intn(last + 1) - result[scratchpad[r]] = struct{}{} - // move selected item to the end, it won't be selected anymore. - scratchpad[r], scratchpad[last] = scratchpad[last], scratchpad[r] - last-- - } - - return result -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go deleted file mode 100644 index 772e60c87..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go +++ /dev/null @@ -1,528 +0,0 @@ -package scheduler - -import ( - "context" - "flag" - "io" - "net/http" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - otgrpc "github.com/opentracing-contrib/go-grpc" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/middleware" - "github.com/weaveworks/common/user" - "google.golang.org/grpc" - - "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" - "github.com/cortexproject/cortex/pkg/scheduler/queue" - "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -var ( - errSchedulerIsNotRunning = errors.New("scheduler is not running") -) - -// Scheduler is responsible for queueing and dispatching queries to Queriers. -type Scheduler struct { - services.Service - - cfg Config - log log.Logger - - limits Limits - - connectedFrontendsMu sync.Mutex - connectedFrontends map[string]*connectedFrontend - - requestQueue *queue.RequestQueue - activeUsers *util.ActiveUsersCleanupService - - pendingRequestsMu sync.Mutex - pendingRequests map[requestKey]*schedulerRequest // Request is kept in this map even after being dispatched to querier. It can still be canceled at that time. - - // Subservices manager. - subservices *services.Manager - subservicesWatcher *services.FailureWatcher - - // Metrics. - queueLength *prometheus.GaugeVec - discardedRequests *prometheus.CounterVec - connectedQuerierClients prometheus.GaugeFunc - connectedFrontendClients prometheus.GaugeFunc - queueDuration prometheus.Histogram -} - -type requestKey struct { - frontendAddr string - queryID uint64 -} - -type connectedFrontend struct { - connections int - - // This context is used for running all queries from the same frontend. - // When last frontend connection is closed, context is canceled. - ctx context.Context - cancel context.CancelFunc -} - -type Config struct { - MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant"` - QuerierForgetDelay time.Duration `yaml:"querier_forget_delay"` - GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=This configures the gRPC client used to report errors back to the query-frontend."` -} - -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per query-scheduler. In-flight requests above this limit will fail with HTTP response status code 429.") - f.DurationVar(&cfg.QuerierForgetDelay, "query-scheduler.querier-forget-delay", 0, "If a querier disconnects without sending notification about graceful shutdown, the query-scheduler will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.") - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("query-scheduler.grpc-client-config", f) -} - -// NewScheduler creates a new Scheduler. -func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer) (*Scheduler, error) { - s := &Scheduler{ - cfg: cfg, - log: log, - limits: limits, - - pendingRequests: map[requestKey]*schedulerRequest{}, - connectedFrontends: map[string]*connectedFrontend{}, - } - - s.queueLength = promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ - Name: "cortex_query_scheduler_queue_length", - Help: "Number of queries in the queue.", - }, []string{"user"}) - - s.discardedRequests = promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_query_scheduler_discarded_requests_total", - Help: "Total number of query requests discarded.", - }, []string{"user"}) - s.requestQueue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, cfg.QuerierForgetDelay, s.queueLength, s.discardedRequests) - - s.queueDuration = promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_query_scheduler_queue_duration_seconds", - Help: "Time spend by requests in queue before getting picked up by a querier.", - Buckets: prometheus.DefBuckets, - }) - s.connectedQuerierClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_query_scheduler_connected_querier_clients", - Help: "Number of querier worker clients currently connected to the query-scheduler.", - }, s.requestQueue.GetConnectedQuerierWorkersMetric) - s.connectedFrontendClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_query_scheduler_connected_frontend_clients", - Help: "Number of query-frontend worker clients currently connected to the query-scheduler.", - }, s.getConnectedFrontendClientsMetric) - - s.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(s.cleanupMetricsForInactiveUser) - - var err error - s.subservices, err = services.NewManager(s.requestQueue, s.activeUsers) - if err != nil { - return nil, err - } - - s.Service = services.NewBasicService(s.starting, s.running, s.stopping) - return s, nil -} - -// Limits needed for the Query Scheduler - interface used for decoupling. -type Limits interface { - // MaxQueriersPerUser returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. - MaxQueriersPerUser(user string) int -} - -type schedulerRequest struct { - frontendAddress string - userID string - queryID uint64 - request *httpgrpc.HTTPRequest - statsEnabled bool - - enqueueTime time.Time - - ctx context.Context - ctxCancel context.CancelFunc - queueSpan opentracing.Span - - // This is only used for testing. - parentSpanContext opentracing.SpanContext -} - -// FrontendLoop handles connection from frontend. -func (s *Scheduler) FrontendLoop(frontend schedulerpb.SchedulerForFrontend_FrontendLoopServer) error { - frontendAddress, frontendCtx, err := s.frontendConnected(frontend) - if err != nil { - return err - } - defer s.frontendDisconnected(frontendAddress) - - // Response to INIT. If scheduler is not running, we skip for-loop, send SHUTTING_DOWN and exit this method. - if s.State() == services.Running { - if err := frontend.Send(&schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK}); err != nil { - return err - } - } - - // We stop accepting new queries in Stopping state. By returning quickly, we disconnect frontends, which in turns - // cancels all their queries. - for s.State() == services.Running { - msg, err := frontend.Recv() - if err != nil { - // No need to report this as error, it is expected when query-frontend performs SendClose() (as frontendSchedulerWorker does). - if err == io.EOF { - return nil - } - return err - } - - if s.State() != services.Running { - break // break out of the loop, and send SHUTTING_DOWN message. - } - - var resp *schedulerpb.SchedulerToFrontend - - switch msg.GetType() { - case schedulerpb.ENQUEUE: - err = s.enqueueRequest(frontendCtx, frontendAddress, msg) - switch { - case err == nil: - resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} - case err == queue.ErrTooManyRequests: - resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.TOO_MANY_REQUESTS_PER_TENANT} - default: - resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.ERROR, Error: err.Error()} - } - - case schedulerpb.CANCEL: - s.cancelRequestAndRemoveFromPending(frontendAddress, msg.QueryID) - resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} - - default: - level.Error(s.log).Log("msg", "unknown request type from frontend", "addr", frontendAddress, "type", msg.GetType()) - return errors.New("unknown request type") - } - - err = frontend.Send(resp) - // Failure to send response results in ending this connection. - if err != nil { - return err - } - } - - // Report shutdown back to frontend, so that it can retry with different scheduler. Also stop the frontend loop. - return frontend.Send(&schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN}) -} - -func (s *Scheduler) frontendConnected(frontend schedulerpb.SchedulerForFrontend_FrontendLoopServer) (string, context.Context, error) { - msg, err := frontend.Recv() - if err != nil { - return "", nil, err - } - if msg.Type != schedulerpb.INIT || msg.FrontendAddress == "" { - return "", nil, errors.New("no frontend address") - } - - s.connectedFrontendsMu.Lock() - defer s.connectedFrontendsMu.Unlock() - - cf := s.connectedFrontends[msg.FrontendAddress] - if cf == nil { - cf = &connectedFrontend{ - connections: 0, - } - cf.ctx, cf.cancel = context.WithCancel(context.Background()) - s.connectedFrontends[msg.FrontendAddress] = cf - } - - cf.connections++ - return msg.FrontendAddress, cf.ctx, nil -} - -func (s *Scheduler) frontendDisconnected(frontendAddress string) { - s.connectedFrontendsMu.Lock() - defer s.connectedFrontendsMu.Unlock() - - cf := s.connectedFrontends[frontendAddress] - cf.connections-- - if cf.connections == 0 { - delete(s.connectedFrontends, frontendAddress) - cf.cancel() - } -} - -func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr string, msg *schedulerpb.FrontendToScheduler) error { - // Create new context for this request, to support cancellation. - ctx, cancel := context.WithCancel(frontendContext) - shouldCancel := true - defer func() { - if shouldCancel { - cancel() - } - }() - - // Extract tracing information from headers in HTTP request. FrontendContext doesn't have the correct tracing - // information, since that is a long-running request. - tracer := opentracing.GlobalTracer() - parentSpanContext, err := httpgrpcutil.GetParentSpanForRequest(tracer, msg.HttpRequest) - if err != nil { - return err - } - - userID := msg.GetUserID() - - req := &schedulerRequest{ - frontendAddress: frontendAddr, - userID: msg.UserID, - queryID: msg.QueryID, - request: msg.HttpRequest, - statsEnabled: msg.StatsEnabled, - } - - now := time.Now() - - req.parentSpanContext = parentSpanContext - req.queueSpan, req.ctx = opentracing.StartSpanFromContextWithTracer(ctx, tracer, "queued", opentracing.ChildOf(parentSpanContext)) - req.enqueueTime = now - req.ctxCancel = cancel - - // aggregate the max queriers limit in the case of a multi tenant query - tenantIDs, err := tenant.TenantIDsFromOrgID(userID) - if err != nil { - return err - } - maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, s.limits.MaxQueriersPerUser) - - s.activeUsers.UpdateUserTimestamp(userID, now) - return s.requestQueue.EnqueueRequest(userID, req, maxQueriers, func() { - shouldCancel = false - - s.pendingRequestsMu.Lock() - defer s.pendingRequestsMu.Unlock() - s.pendingRequests[requestKey{frontendAddr: frontendAddr, queryID: msg.QueryID}] = req - }) -} - -// This method doesn't do removal from the queue. -func (s *Scheduler) cancelRequestAndRemoveFromPending(frontendAddr string, queryID uint64) { - s.pendingRequestsMu.Lock() - defer s.pendingRequestsMu.Unlock() - - key := requestKey{frontendAddr: frontendAddr, queryID: queryID} - req := s.pendingRequests[key] - if req != nil { - req.ctxCancel() - } - delete(s.pendingRequests, key) -} - -// QuerierLoop is started by querier to receive queries from scheduler. -func (s *Scheduler) QuerierLoop(querier schedulerpb.SchedulerForQuerier_QuerierLoopServer) error { - resp, err := querier.Recv() - if err != nil { - return err - } - - querierID := resp.GetQuerierID() - - s.requestQueue.RegisterQuerierConnection(querierID) - defer s.requestQueue.UnregisterQuerierConnection(querierID) - - // If the downstream connection to querier is cancelled, - // we need to ping the condition variable to unblock getNextRequestForQuerier. - // Ideally we'd have ctx aware condition variables... - go func() { - <-querier.Context().Done() - s.requestQueue.QuerierDisconnecting() - }() - - lastUserIndex := queue.FirstUser() - - // In stopping state scheduler is not accepting new queries, but still dispatching queries in the queues. - for s.isRunningOrStopping() { - req, idx, err := s.requestQueue.GetNextRequestForQuerier(querier.Context(), lastUserIndex, querierID) - if err != nil { - return err - } - lastUserIndex = idx - - r := req.(*schedulerRequest) - - s.queueDuration.Observe(time.Since(r.enqueueTime).Seconds()) - r.queueSpan.Finish() - - /* - We want to dequeue the next unexpired request from the chosen tenant queue. - The chance of choosing a particular tenant for dequeueing is (1/active_tenants). - This is problematic under load, especially with other middleware enabled such as - querier.split-by-interval, where one request may fan out into many. - If expired requests aren't exhausted before checking another tenant, it would take - n_active_tenants * n_expired_requests_at_front_of_queue requests being processed - before an active request was handled for the tenant in question. - If this tenant meanwhile continued to queue requests, - it's possible that it's own queue would perpetually contain only expired requests. - */ - - if r.ctx.Err() != nil { - // Remove from pending requests. - s.cancelRequestAndRemoveFromPending(r.frontendAddress, r.queryID) - - lastUserIndex = lastUserIndex.ReuseLastUser() - continue - } - - if err := s.forwardRequestToQuerier(querier, r); err != nil { - return err - } - } - - return errSchedulerIsNotRunning -} - -func (s *Scheduler) NotifyQuerierShutdown(_ context.Context, req *schedulerpb.NotifyQuerierShutdownRequest) (*schedulerpb.NotifyQuerierShutdownResponse, error) { - level.Info(s.log).Log("msg", "received shutdown notification from querier", "querier", req.GetQuerierID()) - s.requestQueue.NotifyQuerierShutdown(req.GetQuerierID()) - - return &schedulerpb.NotifyQuerierShutdownResponse{}, nil -} - -func (s *Scheduler) forwardRequestToQuerier(querier schedulerpb.SchedulerForQuerier_QuerierLoopServer, req *schedulerRequest) error { - // Make sure to cancel request at the end to cleanup resources. - defer s.cancelRequestAndRemoveFromPending(req.frontendAddress, req.queryID) - - // Handle the stream sending & receiving on a goroutine so we can - // monitoring the contexts in a select and cancel things appropriately. - errCh := make(chan error, 1) - go func() { - err := querier.Send(&schedulerpb.SchedulerToQuerier{ - UserID: req.userID, - QueryID: req.queryID, - FrontendAddress: req.frontendAddress, - HttpRequest: req.request, - StatsEnabled: req.statsEnabled, - }) - if err != nil { - errCh <- err - return - } - - _, err = querier.Recv() - errCh <- err - }() - - select { - case <-req.ctx.Done(): - // If the upstream request is cancelled (eg. frontend issued CANCEL or closed connection), - // we need to cancel the downstream req. Only way we can do that is to close the stream (by returning error here). - // Querier is expecting this semantics. - return req.ctx.Err() - - case err := <-errCh: - // Is there was an error handling this request due to network IO, - // then error out this upstream request _and_ stream. - - if err != nil { - s.forwardErrorToFrontend(req.ctx, req, err) - } - return err - } -} - -func (s *Scheduler) forwardErrorToFrontend(ctx context.Context, req *schedulerRequest, requestErr error) { - opts, err := s.cfg.GRPCClientConfig.DialOption([]grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), - middleware.ClientUserHeaderInterceptor}, - nil) - if err != nil { - level.Warn(s.log).Log("msg", "failed to create gRPC options for the connection to frontend to report error", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr) - return - } - - conn, err := grpc.DialContext(ctx, req.frontendAddress, opts...) - if err != nil { - level.Warn(s.log).Log("msg", "failed to create gRPC connection to frontend to report error", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr) - return - } - - defer func() { - _ = conn.Close() - }() - - client := frontendv2pb.NewFrontendForQuerierClient(conn) - - userCtx := user.InjectOrgID(ctx, req.userID) - _, err = client.QueryResult(userCtx, &frontendv2pb.QueryResultRequest{ - QueryID: req.queryID, - HttpResponse: &httpgrpc.HTTPResponse{ - Code: http.StatusInternalServerError, - Body: []byte(requestErr.Error()), - }, - }) - - if err != nil { - level.Warn(s.log).Log("msg", "failed to forward error to frontend", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr) - return - } -} - -func (s *Scheduler) isRunningOrStopping() bool { - st := s.State() - return st == services.Running || st == services.Stopping -} - -func (s *Scheduler) starting(ctx context.Context) error { - s.subservicesWatcher.WatchManager(s.subservices) - - if err := services.StartManagerAndAwaitHealthy(ctx, s.subservices); err != nil { - return errors.Wrap(err, "unable to start scheduler subservices") - } - - return nil -} - -func (s *Scheduler) running(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return nil - case err := <-s.subservicesWatcher.Chan(): - return errors.Wrap(err, "scheduler subservice failed") - } - } -} - -// Close the Scheduler. -func (s *Scheduler) stopping(_ error) error { - // This will also stop the requests queue, which stop accepting new requests and errors out any pending requests. - return services.StopManagerAndAwaitStopped(context.Background(), s.subservices) -} - -func (s *Scheduler) cleanupMetricsForInactiveUser(user string) { - s.queueLength.DeleteLabelValues(user) - s.discardedRequests.DeleteLabelValues(user) -} - -func (s *Scheduler) getConnectedFrontendClientsMetric() float64 { - s.connectedFrontendsMu.Lock() - defer s.connectedFrontendsMu.Unlock() - - count := 0 - for _, workers := range s.connectedFrontends { - count += workers.connections - } - - return float64(count) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go deleted file mode 100644 index 1b6fda6e6..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go +++ /dev/null @@ -1,2323 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: scheduler.proto - -package schedulerpb - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - httpgrpc "github.com/weaveworks/common/httpgrpc" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type FrontendToSchedulerType int32 - -const ( - INIT FrontendToSchedulerType = 0 - ENQUEUE FrontendToSchedulerType = 1 - CANCEL FrontendToSchedulerType = 2 -) - -var FrontendToSchedulerType_name = map[int32]string{ - 0: "INIT", - 1: "ENQUEUE", - 2: "CANCEL", -} - -var FrontendToSchedulerType_value = map[string]int32{ - "INIT": 0, - "ENQUEUE": 1, - "CANCEL": 2, -} - -func (FrontendToSchedulerType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{0} -} - -type SchedulerToFrontendStatus int32 - -const ( - OK SchedulerToFrontendStatus = 0 - TOO_MANY_REQUESTS_PER_TENANT SchedulerToFrontendStatus = 1 - ERROR SchedulerToFrontendStatus = 2 - SHUTTING_DOWN SchedulerToFrontendStatus = 3 -) - -var SchedulerToFrontendStatus_name = map[int32]string{ - 0: "OK", - 1: "TOO_MANY_REQUESTS_PER_TENANT", - 2: "ERROR", - 3: "SHUTTING_DOWN", -} - -var SchedulerToFrontendStatus_value = map[string]int32{ - "OK": 0, - "TOO_MANY_REQUESTS_PER_TENANT": 1, - "ERROR": 2, - "SHUTTING_DOWN": 3, -} - -func (SchedulerToFrontendStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{1} -} - -// Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. -// To signal that querier is ready to accept another request, querier sends empty message. -type QuerierToScheduler struct { - QuerierID string `protobuf:"bytes,1,opt,name=querierID,proto3" json:"querierID,omitempty"` -} - -func (m *QuerierToScheduler) Reset() { *m = QuerierToScheduler{} } -func (*QuerierToScheduler) ProtoMessage() {} -func (*QuerierToScheduler) Descriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{0} -} -func (m *QuerierToScheduler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QuerierToScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QuerierToScheduler.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QuerierToScheduler) XXX_Merge(src proto.Message) { - xxx_messageInfo_QuerierToScheduler.Merge(m, src) -} -func (m *QuerierToScheduler) XXX_Size() int { - return m.Size() -} -func (m *QuerierToScheduler) XXX_DiscardUnknown() { - xxx_messageInfo_QuerierToScheduler.DiscardUnknown(m) -} - -var xxx_messageInfo_QuerierToScheduler proto.InternalMessageInfo - -func (m *QuerierToScheduler) GetQuerierID() string { - if m != nil { - return m.QuerierID - } - return "" -} - -type SchedulerToQuerier struct { - // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), - // it identifies the query by using this ID. - QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` - HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` - // Where should querier send HTTP Response to (using FrontendForQuerier interface). - FrontendAddress string `protobuf:"bytes,3,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` - // User who initiated the request. Needed to send reply back to frontend. - UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` - // Whether query statistics tracking should be enabled. The response will include - // statistics only when this option is enabled. - StatsEnabled bool `protobuf:"varint,5,opt,name=statsEnabled,proto3" json:"statsEnabled,omitempty"` -} - -func (m *SchedulerToQuerier) Reset() { *m = SchedulerToQuerier{} } -func (*SchedulerToQuerier) ProtoMessage() {} -func (*SchedulerToQuerier) Descriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{1} -} -func (m *SchedulerToQuerier) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SchedulerToQuerier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SchedulerToQuerier.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SchedulerToQuerier) XXX_Merge(src proto.Message) { - xxx_messageInfo_SchedulerToQuerier.Merge(m, src) -} -func (m *SchedulerToQuerier) XXX_Size() int { - return m.Size() -} -func (m *SchedulerToQuerier) XXX_DiscardUnknown() { - xxx_messageInfo_SchedulerToQuerier.DiscardUnknown(m) -} - -var xxx_messageInfo_SchedulerToQuerier proto.InternalMessageInfo - -func (m *SchedulerToQuerier) GetQueryID() uint64 { - if m != nil { - return m.QueryID - } - return 0 -} - -func (m *SchedulerToQuerier) GetHttpRequest() *httpgrpc.HTTPRequest { - if m != nil { - return m.HttpRequest - } - return nil -} - -func (m *SchedulerToQuerier) GetFrontendAddress() string { - if m != nil { - return m.FrontendAddress - } - return "" -} - -func (m *SchedulerToQuerier) GetUserID() string { - if m != nil { - return m.UserID - } - return "" -} - -func (m *SchedulerToQuerier) GetStatsEnabled() bool { - if m != nil { - return m.StatsEnabled - } - return false -} - -type FrontendToScheduler struct { - Type FrontendToSchedulerType `protobuf:"varint,1,opt,name=type,proto3,enum=schedulerpb.FrontendToSchedulerType" json:"type,omitempty"` - // Used by INIT message. Will be put into all requests passed to querier. - FrontendAddress string `protobuf:"bytes,2,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` - // Used by ENQUEUE and CANCEL. - // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. - QueryID uint64 `protobuf:"varint,3,opt,name=queryID,proto3" json:"queryID,omitempty"` - // Following are used by ENQUEUE only. - UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` - HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,5,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` - StatsEnabled bool `protobuf:"varint,6,opt,name=statsEnabled,proto3" json:"statsEnabled,omitempty"` -} - -func (m *FrontendToScheduler) Reset() { *m = FrontendToScheduler{} } -func (*FrontendToScheduler) ProtoMessage() {} -func (*FrontendToScheduler) Descriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{2} -} -func (m *FrontendToScheduler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FrontendToScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FrontendToScheduler.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FrontendToScheduler) XXX_Merge(src proto.Message) { - xxx_messageInfo_FrontendToScheduler.Merge(m, src) -} -func (m *FrontendToScheduler) XXX_Size() int { - return m.Size() -} -func (m *FrontendToScheduler) XXX_DiscardUnknown() { - xxx_messageInfo_FrontendToScheduler.DiscardUnknown(m) -} - -var xxx_messageInfo_FrontendToScheduler proto.InternalMessageInfo - -func (m *FrontendToScheduler) GetType() FrontendToSchedulerType { - if m != nil { - return m.Type - } - return INIT -} - -func (m *FrontendToScheduler) GetFrontendAddress() string { - if m != nil { - return m.FrontendAddress - } - return "" -} - -func (m *FrontendToScheduler) GetQueryID() uint64 { - if m != nil { - return m.QueryID - } - return 0 -} - -func (m *FrontendToScheduler) GetUserID() string { - if m != nil { - return m.UserID - } - return "" -} - -func (m *FrontendToScheduler) GetHttpRequest() *httpgrpc.HTTPRequest { - if m != nil { - return m.HttpRequest - } - return nil -} - -func (m *FrontendToScheduler) GetStatsEnabled() bool { - if m != nil { - return m.StatsEnabled - } - return false -} - -type SchedulerToFrontend struct { - Status SchedulerToFrontendStatus `protobuf:"varint,1,opt,name=status,proto3,enum=schedulerpb.SchedulerToFrontendStatus" json:"status,omitempty"` - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` -} - -func (m *SchedulerToFrontend) Reset() { *m = SchedulerToFrontend{} } -func (*SchedulerToFrontend) ProtoMessage() {} -func (*SchedulerToFrontend) Descriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{3} -} -func (m *SchedulerToFrontend) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SchedulerToFrontend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SchedulerToFrontend.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SchedulerToFrontend) XXX_Merge(src proto.Message) { - xxx_messageInfo_SchedulerToFrontend.Merge(m, src) -} -func (m *SchedulerToFrontend) XXX_Size() int { - return m.Size() -} -func (m *SchedulerToFrontend) XXX_DiscardUnknown() { - xxx_messageInfo_SchedulerToFrontend.DiscardUnknown(m) -} - -var xxx_messageInfo_SchedulerToFrontend proto.InternalMessageInfo - -func (m *SchedulerToFrontend) GetStatus() SchedulerToFrontendStatus { - if m != nil { - return m.Status - } - return OK -} - -func (m *SchedulerToFrontend) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type NotifyQuerierShutdownRequest struct { - QuerierID string `protobuf:"bytes,1,opt,name=querierID,proto3" json:"querierID,omitempty"` -} - -func (m *NotifyQuerierShutdownRequest) Reset() { *m = NotifyQuerierShutdownRequest{} } -func (*NotifyQuerierShutdownRequest) ProtoMessage() {} -func (*NotifyQuerierShutdownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{4} -} -func (m *NotifyQuerierShutdownRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NotifyQuerierShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NotifyQuerierShutdownRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NotifyQuerierShutdownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NotifyQuerierShutdownRequest.Merge(m, src) -} -func (m *NotifyQuerierShutdownRequest) XXX_Size() int { - return m.Size() -} -func (m *NotifyQuerierShutdownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NotifyQuerierShutdownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NotifyQuerierShutdownRequest proto.InternalMessageInfo - -func (m *NotifyQuerierShutdownRequest) GetQuerierID() string { - if m != nil { - return m.QuerierID - } - return "" -} - -type NotifyQuerierShutdownResponse struct { -} - -func (m *NotifyQuerierShutdownResponse) Reset() { *m = NotifyQuerierShutdownResponse{} } -func (*NotifyQuerierShutdownResponse) ProtoMessage() {} -func (*NotifyQuerierShutdownResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2b3fc28395a6d9c5, []int{5} -} -func (m *NotifyQuerierShutdownResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NotifyQuerierShutdownResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NotifyQuerierShutdownResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NotifyQuerierShutdownResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NotifyQuerierShutdownResponse.Merge(m, src) -} -func (m *NotifyQuerierShutdownResponse) XXX_Size() int { - return m.Size() -} -func (m *NotifyQuerierShutdownResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NotifyQuerierShutdownResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NotifyQuerierShutdownResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("schedulerpb.FrontendToSchedulerType", FrontendToSchedulerType_name, FrontendToSchedulerType_value) - proto.RegisterEnum("schedulerpb.SchedulerToFrontendStatus", SchedulerToFrontendStatus_name, SchedulerToFrontendStatus_value) - proto.RegisterType((*QuerierToScheduler)(nil), "schedulerpb.QuerierToScheduler") - proto.RegisterType((*SchedulerToQuerier)(nil), "schedulerpb.SchedulerToQuerier") - proto.RegisterType((*FrontendToScheduler)(nil), "schedulerpb.FrontendToScheduler") - proto.RegisterType((*SchedulerToFrontend)(nil), "schedulerpb.SchedulerToFrontend") - proto.RegisterType((*NotifyQuerierShutdownRequest)(nil), "schedulerpb.NotifyQuerierShutdownRequest") - proto.RegisterType((*NotifyQuerierShutdownResponse)(nil), "schedulerpb.NotifyQuerierShutdownResponse") -} - -func init() { proto.RegisterFile("scheduler.proto", fileDescriptor_2b3fc28395a6d9c5) } - -var fileDescriptor_2b3fc28395a6d9c5 = []byte{ - // 650 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x4f, 0x13, 0x41, - 0x14, 0xde, 0x29, 0x6d, 0x81, 0x57, 0x94, 0x75, 0x00, 0xad, 0x0d, 0x0e, 0x4d, 0x63, 0x4c, 0x25, - 0xb1, 0x35, 0xd5, 0x44, 0x0f, 0xc4, 0xa4, 0xc2, 0x22, 0x8d, 0xb8, 0x85, 0xe9, 0x34, 0xfe, 0xb8, - 0x34, 0xb4, 0x1d, 0x5a, 0x02, 0xec, 0x2c, 0xb3, 0xbb, 0x92, 0xde, 0x3c, 0x7a, 0xf4, 0xcf, 0xf0, - 0x4f, 0xf1, 0x62, 0xc2, 0x91, 0x83, 0x07, 0x59, 0x2e, 0x1e, 0xf9, 0x13, 0x0c, 0xd3, 0x6d, 0xdd, - 0xd6, 0x16, 0xb8, 0xbd, 0xf7, 0xf6, 0xfb, 0x76, 0xde, 0xf7, 0xbd, 0x37, 0x03, 0xb3, 0x4e, 0xa3, - 0xcd, 0x9b, 0xde, 0x01, 0x97, 0x39, 0x5b, 0x0a, 0x57, 0xe0, 0x44, 0xbf, 0x60, 0xd7, 0x53, 0x4f, - 0x5a, 0x7b, 0x6e, 0xdb, 0xab, 0xe7, 0x1a, 0xe2, 0x30, 0xdf, 0x12, 0x2d, 0x91, 0x57, 0x98, 0xba, - 0xb7, 0xab, 0x32, 0x95, 0xa8, 0xa8, 0xcb, 0x4d, 0x3d, 0x0f, 0xc1, 0x8f, 0xf9, 0xce, 0x67, 0x7e, - 0x2c, 0xe4, 0xbe, 0x93, 0x6f, 0x88, 0xc3, 0x43, 0x61, 0xe5, 0xdb, 0xae, 0x6b, 0xb7, 0xa4, 0xdd, - 0xe8, 0x07, 0x5d, 0x56, 0xa6, 0x00, 0x78, 0xdb, 0xe3, 0x72, 0x8f, 0x4b, 0x26, 0x2a, 0xbd, 0xc3, - 0xf1, 0x22, 0x4c, 0x1f, 0x75, 0xab, 0xa5, 0xb5, 0x24, 0x4a, 0xa3, 0xec, 0x34, 0xfd, 0x57, 0xc8, - 0xfc, 0x44, 0x80, 0xfb, 0x58, 0x26, 0x02, 0x3e, 0x4e, 0xc2, 0xe4, 0x25, 0xa6, 0x13, 0x50, 0xa2, - 0xb4, 0x97, 0xe2, 0x17, 0x90, 0xb8, 0x3c, 0x96, 0xf2, 0x23, 0x8f, 0x3b, 0x6e, 0x32, 0x92, 0x46, - 0xd9, 0x44, 0x61, 0x21, 0xd7, 0x6f, 0x65, 0x83, 0xb1, 0xad, 0xe0, 0x23, 0x0d, 0x23, 0x71, 0x16, - 0x66, 0x77, 0xa5, 0xb0, 0x5c, 0x6e, 0x35, 0x8b, 0xcd, 0xa6, 0xe4, 0x8e, 0x93, 0x9c, 0x50, 0xdd, - 0x0c, 0x97, 0xf1, 0x5d, 0x88, 0x7b, 0x8e, 0x6a, 0x37, 0xaa, 0x00, 0x41, 0x86, 0x33, 0x30, 0xe3, - 0xb8, 0x3b, 0xae, 0x63, 0x58, 0x3b, 0xf5, 0x03, 0xde, 0x4c, 0xc6, 0xd2, 0x28, 0x3b, 0x45, 0x07, - 0x6a, 0x99, 0xaf, 0x11, 0x98, 0x5b, 0x0f, 0xfe, 0x17, 0x76, 0xe1, 0x25, 0x44, 0xdd, 0x8e, 0xcd, - 0x95, 0x9a, 0xdb, 0x85, 0x87, 0xb9, 0xd0, 0x70, 0x72, 0x23, 0xf0, 0xac, 0x63, 0x73, 0xaa, 0x18, - 0xa3, 0xfa, 0x8e, 0x8c, 0xee, 0x3b, 0x64, 0xda, 0xc4, 0xa0, 0x69, 0xe3, 0x14, 0x0d, 0x99, 0x19, - 0xbb, 0xb1, 0x99, 0xc3, 0x56, 0xc4, 0x47, 0x58, 0xb1, 0x0f, 0x73, 0xa1, 0xc9, 0xf6, 0x44, 0xe2, - 0x57, 0x10, 0xbf, 0x84, 0x79, 0x4e, 0xe0, 0xc5, 0xa3, 0x01, 0x2f, 0x46, 0x30, 0x2a, 0x0a, 0x4d, - 0x03, 0x16, 0x9e, 0x87, 0x18, 0x97, 0x52, 0xc8, 0xc0, 0x85, 0x6e, 0x92, 0x59, 0x81, 0x45, 0x53, - 0xb8, 0x7b, 0xbb, 0x9d, 0x60, 0x83, 0x2a, 0x6d, 0xcf, 0x6d, 0x8a, 0x63, 0xab, 0xd7, 0xf0, 0xd5, - 0x5b, 0xb8, 0x04, 0x0f, 0xc6, 0xb0, 0x1d, 0x5b, 0x58, 0x0e, 0x5f, 0x5e, 0x81, 0x7b, 0x63, 0xa6, - 0x84, 0xa7, 0x20, 0x5a, 0x32, 0x4b, 0x4c, 0xd7, 0x70, 0x02, 0x26, 0x0d, 0x73, 0xbb, 0x6a, 0x54, - 0x0d, 0x1d, 0x61, 0x80, 0xf8, 0x6a, 0xd1, 0x5c, 0x35, 0x36, 0xf5, 0xc8, 0x72, 0x03, 0xee, 0x8f, - 0xd5, 0x85, 0xe3, 0x10, 0x29, 0xbf, 0xd5, 0x35, 0x9c, 0x86, 0x45, 0x56, 0x2e, 0xd7, 0xde, 0x15, - 0xcd, 0x8f, 0x35, 0x6a, 0x6c, 0x57, 0x8d, 0x0a, 0xab, 0xd4, 0xb6, 0x0c, 0x5a, 0x63, 0x86, 0x59, - 0x34, 0x99, 0x8e, 0xf0, 0x34, 0xc4, 0x0c, 0x4a, 0xcb, 0x54, 0x8f, 0xe0, 0x3b, 0x70, 0xab, 0xb2, - 0x51, 0x65, 0xac, 0x64, 0xbe, 0xa9, 0xad, 0x95, 0xdf, 0x9b, 0xfa, 0x44, 0xe1, 0x17, 0x0a, 0xf9, - 0xbd, 0x2e, 0x64, 0xef, 0x2a, 0x55, 0x21, 0x11, 0x84, 0x9b, 0x42, 0xd8, 0x78, 0x69, 0xc0, 0xee, - 0xff, 0xef, 0x6b, 0x6a, 0x69, 0xdc, 0x3c, 0x02, 0x6c, 0x46, 0xcb, 0xa2, 0xa7, 0x08, 0x5b, 0xb0, - 0x30, 0xd2, 0x32, 0xfc, 0x78, 0x80, 0x7f, 0xd5, 0x50, 0x52, 0xcb, 0x37, 0x81, 0x76, 0x27, 0x50, - 0xb0, 0x61, 0x3e, 0xac, 0xae, 0xbf, 0x4e, 0x1f, 0x60, 0xa6, 0x17, 0x2b, 0x7d, 0xe9, 0xeb, 0xae, - 0x56, 0x2a, 0x7d, 0xdd, 0xc2, 0x75, 0x15, 0xbe, 0x2e, 0x9e, 0x9c, 0x11, 0xed, 0xf4, 0x8c, 0x68, - 0x17, 0x67, 0x04, 0x7d, 0xf1, 0x09, 0xfa, 0xee, 0x13, 0xf4, 0xc3, 0x27, 0xe8, 0xc4, 0x27, 0xe8, - 0xb7, 0x4f, 0xd0, 0x1f, 0x9f, 0x68, 0x17, 0x3e, 0x41, 0xdf, 0xce, 0x89, 0x76, 0x72, 0x4e, 0xb4, - 0xd3, 0x73, 0xa2, 0x7d, 0x0a, 0x3f, 0xbb, 0xf5, 0xb8, 0x7a, 0x18, 0x9f, 0xfd, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x49, 0x11, 0xf9, 0x34, 0x9d, 0x05, 0x00, 0x00, -} - -func (x FrontendToSchedulerType) String() string { - s, ok := FrontendToSchedulerType_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (x SchedulerToFrontendStatus) String() string { - s, ok := SchedulerToFrontendStatus_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *QuerierToScheduler) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QuerierToScheduler) - if !ok { - that2, ok := that.(QuerierToScheduler) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.QuerierID != that1.QuerierID { - return false - } - return true -} -func (this *SchedulerToQuerier) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*SchedulerToQuerier) - if !ok { - that2, ok := that.(SchedulerToQuerier) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.QueryID != that1.QueryID { - return false - } - if !this.HttpRequest.Equal(that1.HttpRequest) { - return false - } - if this.FrontendAddress != that1.FrontendAddress { - return false - } - if this.UserID != that1.UserID { - return false - } - if this.StatsEnabled != that1.StatsEnabled { - return false - } - return true -} -func (this *FrontendToScheduler) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*FrontendToScheduler) - if !ok { - that2, ok := that.(FrontendToScheduler) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if this.FrontendAddress != that1.FrontendAddress { - return false - } - if this.QueryID != that1.QueryID { - return false - } - if this.UserID != that1.UserID { - return false - } - if !this.HttpRequest.Equal(that1.HttpRequest) { - return false - } - if this.StatsEnabled != that1.StatsEnabled { - return false - } - return true -} -func (this *SchedulerToFrontend) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*SchedulerToFrontend) - if !ok { - that2, ok := that.(SchedulerToFrontend) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Status != that1.Status { - return false - } - if this.Error != that1.Error { - return false - } - return true -} -func (this *NotifyQuerierShutdownRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*NotifyQuerierShutdownRequest) - if !ok { - that2, ok := that.(NotifyQuerierShutdownRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.QuerierID != that1.QuerierID { - return false - } - return true -} -func (this *NotifyQuerierShutdownResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*NotifyQuerierShutdownResponse) - if !ok { - that2, ok := that.(NotifyQuerierShutdownResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *QuerierToScheduler) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&schedulerpb.QuerierToScheduler{") - s = append(s, "QuerierID: "+fmt.Sprintf("%#v", this.QuerierID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SchedulerToQuerier) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&schedulerpb.SchedulerToQuerier{") - s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") - if this.HttpRequest != nil { - s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") - } - s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") - s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") - s = append(s, "StatsEnabled: "+fmt.Sprintf("%#v", this.StatsEnabled)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FrontendToScheduler) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&schedulerpb.FrontendToScheduler{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") - s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") - s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") - if this.HttpRequest != nil { - s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") - } - s = append(s, "StatsEnabled: "+fmt.Sprintf("%#v", this.StatsEnabled)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SchedulerToFrontend) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&schedulerpb.SchedulerToFrontend{") - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *NotifyQuerierShutdownRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&schedulerpb.NotifyQuerierShutdownRequest{") - s = append(s, "QuerierID: "+fmt.Sprintf("%#v", this.QuerierID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *NotifyQuerierShutdownResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&schedulerpb.NotifyQuerierShutdownResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringScheduler(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// SchedulerForQuerierClient is the client API for SchedulerForQuerier service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SchedulerForQuerierClient interface { - // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for - // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, - // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. - // - // Long-running loop is used to detect broken connection between scheduler and querier. This is important - // for scheduler to keep a list of connected queriers up-to-date. - QuerierLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForQuerier_QuerierLoopClient, error) - // The querier notifies the query-scheduler that it started a graceful shutdown. - NotifyQuerierShutdown(ctx context.Context, in *NotifyQuerierShutdownRequest, opts ...grpc.CallOption) (*NotifyQuerierShutdownResponse, error) -} - -type schedulerForQuerierClient struct { - cc *grpc.ClientConn -} - -func NewSchedulerForQuerierClient(cc *grpc.ClientConn) SchedulerForQuerierClient { - return &schedulerForQuerierClient{cc} -} - -func (c *schedulerForQuerierClient) QuerierLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForQuerier_QuerierLoopClient, error) { - stream, err := c.cc.NewStream(ctx, &_SchedulerForQuerier_serviceDesc.Streams[0], "/schedulerpb.SchedulerForQuerier/QuerierLoop", opts...) - if err != nil { - return nil, err - } - x := &schedulerForQuerierQuerierLoopClient{stream} - return x, nil -} - -type SchedulerForQuerier_QuerierLoopClient interface { - Send(*QuerierToScheduler) error - Recv() (*SchedulerToQuerier, error) - grpc.ClientStream -} - -type schedulerForQuerierQuerierLoopClient struct { - grpc.ClientStream -} - -func (x *schedulerForQuerierQuerierLoopClient) Send(m *QuerierToScheduler) error { - return x.ClientStream.SendMsg(m) -} - -func (x *schedulerForQuerierQuerierLoopClient) Recv() (*SchedulerToQuerier, error) { - m := new(SchedulerToQuerier) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *schedulerForQuerierClient) NotifyQuerierShutdown(ctx context.Context, in *NotifyQuerierShutdownRequest, opts ...grpc.CallOption) (*NotifyQuerierShutdownResponse, error) { - out := new(NotifyQuerierShutdownResponse) - err := c.cc.Invoke(ctx, "/schedulerpb.SchedulerForQuerier/NotifyQuerierShutdown", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SchedulerForQuerierServer is the server API for SchedulerForQuerier service. -type SchedulerForQuerierServer interface { - // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for - // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, - // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. - // - // Long-running loop is used to detect broken connection between scheduler and querier. This is important - // for scheduler to keep a list of connected queriers up-to-date. - QuerierLoop(SchedulerForQuerier_QuerierLoopServer) error - // The querier notifies the query-scheduler that it started a graceful shutdown. - NotifyQuerierShutdown(context.Context, *NotifyQuerierShutdownRequest) (*NotifyQuerierShutdownResponse, error) -} - -// UnimplementedSchedulerForQuerierServer can be embedded to have forward compatible implementations. -type UnimplementedSchedulerForQuerierServer struct { -} - -func (*UnimplementedSchedulerForQuerierServer) QuerierLoop(srv SchedulerForQuerier_QuerierLoopServer) error { - return status.Errorf(codes.Unimplemented, "method QuerierLoop not implemented") -} -func (*UnimplementedSchedulerForQuerierServer) NotifyQuerierShutdown(ctx context.Context, req *NotifyQuerierShutdownRequest) (*NotifyQuerierShutdownResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NotifyQuerierShutdown not implemented") -} - -func RegisterSchedulerForQuerierServer(s *grpc.Server, srv SchedulerForQuerierServer) { - s.RegisterService(&_SchedulerForQuerier_serviceDesc, srv) -} - -func _SchedulerForQuerier_QuerierLoop_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SchedulerForQuerierServer).QuerierLoop(&schedulerForQuerierQuerierLoopServer{stream}) -} - -type SchedulerForQuerier_QuerierLoopServer interface { - Send(*SchedulerToQuerier) error - Recv() (*QuerierToScheduler, error) - grpc.ServerStream -} - -type schedulerForQuerierQuerierLoopServer struct { - grpc.ServerStream -} - -func (x *schedulerForQuerierQuerierLoopServer) Send(m *SchedulerToQuerier) error { - return x.ServerStream.SendMsg(m) -} - -func (x *schedulerForQuerierQuerierLoopServer) Recv() (*QuerierToScheduler, error) { - m := new(QuerierToScheduler) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _SchedulerForQuerier_NotifyQuerierShutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NotifyQuerierShutdownRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SchedulerForQuerierServer).NotifyQuerierShutdown(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/schedulerpb.SchedulerForQuerier/NotifyQuerierShutdown", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SchedulerForQuerierServer).NotifyQuerierShutdown(ctx, req.(*NotifyQuerierShutdownRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _SchedulerForQuerier_serviceDesc = grpc.ServiceDesc{ - ServiceName: "schedulerpb.SchedulerForQuerier", - HandlerType: (*SchedulerForQuerierServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NotifyQuerierShutdown", - Handler: _SchedulerForQuerier_NotifyQuerierShutdown_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "QuerierLoop", - Handler: _SchedulerForQuerier_QuerierLoop_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "scheduler.proto", -} - -// SchedulerForFrontendClient is the client API for SchedulerForFrontend service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SchedulerForFrontendClient interface { - // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and - // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. - // - // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both - // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending - // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. - FrontendLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForFrontend_FrontendLoopClient, error) -} - -type schedulerForFrontendClient struct { - cc *grpc.ClientConn -} - -func NewSchedulerForFrontendClient(cc *grpc.ClientConn) SchedulerForFrontendClient { - return &schedulerForFrontendClient{cc} -} - -func (c *schedulerForFrontendClient) FrontendLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForFrontend_FrontendLoopClient, error) { - stream, err := c.cc.NewStream(ctx, &_SchedulerForFrontend_serviceDesc.Streams[0], "/schedulerpb.SchedulerForFrontend/FrontendLoop", opts...) - if err != nil { - return nil, err - } - x := &schedulerForFrontendFrontendLoopClient{stream} - return x, nil -} - -type SchedulerForFrontend_FrontendLoopClient interface { - Send(*FrontendToScheduler) error - Recv() (*SchedulerToFrontend, error) - grpc.ClientStream -} - -type schedulerForFrontendFrontendLoopClient struct { - grpc.ClientStream -} - -func (x *schedulerForFrontendFrontendLoopClient) Send(m *FrontendToScheduler) error { - return x.ClientStream.SendMsg(m) -} - -func (x *schedulerForFrontendFrontendLoopClient) Recv() (*SchedulerToFrontend, error) { - m := new(SchedulerToFrontend) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// SchedulerForFrontendServer is the server API for SchedulerForFrontend service. -type SchedulerForFrontendServer interface { - // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and - // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. - // - // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both - // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending - // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. - FrontendLoop(SchedulerForFrontend_FrontendLoopServer) error -} - -// UnimplementedSchedulerForFrontendServer can be embedded to have forward compatible implementations. -type UnimplementedSchedulerForFrontendServer struct { -} - -func (*UnimplementedSchedulerForFrontendServer) FrontendLoop(srv SchedulerForFrontend_FrontendLoopServer) error { - return status.Errorf(codes.Unimplemented, "method FrontendLoop not implemented") -} - -func RegisterSchedulerForFrontendServer(s *grpc.Server, srv SchedulerForFrontendServer) { - s.RegisterService(&_SchedulerForFrontend_serviceDesc, srv) -} - -func _SchedulerForFrontend_FrontendLoop_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SchedulerForFrontendServer).FrontendLoop(&schedulerForFrontendFrontendLoopServer{stream}) -} - -type SchedulerForFrontend_FrontendLoopServer interface { - Send(*SchedulerToFrontend) error - Recv() (*FrontendToScheduler, error) - grpc.ServerStream -} - -type schedulerForFrontendFrontendLoopServer struct { - grpc.ServerStream -} - -func (x *schedulerForFrontendFrontendLoopServer) Send(m *SchedulerToFrontend) error { - return x.ServerStream.SendMsg(m) -} - -func (x *schedulerForFrontendFrontendLoopServer) Recv() (*FrontendToScheduler, error) { - m := new(FrontendToScheduler) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _SchedulerForFrontend_serviceDesc = grpc.ServiceDesc{ - ServiceName: "schedulerpb.SchedulerForFrontend", - HandlerType: (*SchedulerForFrontendServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "FrontendLoop", - Handler: _SchedulerForFrontend_FrontendLoop_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "scheduler.proto", -} - -func (m *QuerierToScheduler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QuerierToScheduler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QuerierToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.QuerierID) > 0 { - i -= len(m.QuerierID) - copy(dAtA[i:], m.QuerierID) - i = encodeVarintScheduler(dAtA, i, uint64(len(m.QuerierID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SchedulerToQuerier) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SchedulerToQuerier) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SchedulerToQuerier) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StatsEnabled { - i-- - if m.StatsEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.UserID) > 0 { - i -= len(m.UserID) - copy(dAtA[i:], m.UserID) - i = encodeVarintScheduler(dAtA, i, uint64(len(m.UserID))) - i-- - dAtA[i] = 0x22 - } - if len(m.FrontendAddress) > 0 { - i -= len(m.FrontendAddress) - copy(dAtA[i:], m.FrontendAddress) - i = encodeVarintScheduler(dAtA, i, uint64(len(m.FrontendAddress))) - i-- - dAtA[i] = 0x1a - } - if m.HttpRequest != nil { - { - size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintScheduler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.QueryID != 0 { - i = encodeVarintScheduler(dAtA, i, uint64(m.QueryID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *FrontendToScheduler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FrontendToScheduler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FrontendToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StatsEnabled { - i-- - if m.StatsEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.HttpRequest != nil { - { - size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintScheduler(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.UserID) > 0 { - i -= len(m.UserID) - copy(dAtA[i:], m.UserID) - i = encodeVarintScheduler(dAtA, i, uint64(len(m.UserID))) - i-- - dAtA[i] = 0x22 - } - if m.QueryID != 0 { - i = encodeVarintScheduler(dAtA, i, uint64(m.QueryID)) - i-- - dAtA[i] = 0x18 - } - if len(m.FrontendAddress) > 0 { - i -= len(m.FrontendAddress) - copy(dAtA[i:], m.FrontendAddress) - i = encodeVarintScheduler(dAtA, i, uint64(len(m.FrontendAddress))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintScheduler(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *SchedulerToFrontend) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SchedulerToFrontend) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SchedulerToFrontend) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintScheduler(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x12 - } - if m.Status != 0 { - i = encodeVarintScheduler(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *NotifyQuerierShutdownRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NotifyQuerierShutdownRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NotifyQuerierShutdownRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.QuerierID) > 0 { - i -= len(m.QuerierID) - copy(dAtA[i:], m.QuerierID) - i = encodeVarintScheduler(dAtA, i, uint64(len(m.QuerierID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NotifyQuerierShutdownResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NotifyQuerierShutdownResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NotifyQuerierShutdownResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintScheduler(dAtA []byte, offset int, v uint64) int { - offset -= sovScheduler(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QuerierToScheduler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.QuerierID) - if l > 0 { - n += 1 + l + sovScheduler(uint64(l)) - } - return n -} - -func (m *SchedulerToQuerier) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.QueryID != 0 { - n += 1 + sovScheduler(uint64(m.QueryID)) - } - if m.HttpRequest != nil { - l = m.HttpRequest.Size() - n += 1 + l + sovScheduler(uint64(l)) - } - l = len(m.FrontendAddress) - if l > 0 { - n += 1 + l + sovScheduler(uint64(l)) - } - l = len(m.UserID) - if l > 0 { - n += 1 + l + sovScheduler(uint64(l)) - } - if m.StatsEnabled { - n += 2 - } - return n -} - -func (m *FrontendToScheduler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovScheduler(uint64(m.Type)) - } - l = len(m.FrontendAddress) - if l > 0 { - n += 1 + l + sovScheduler(uint64(l)) - } - if m.QueryID != 0 { - n += 1 + sovScheduler(uint64(m.QueryID)) - } - l = len(m.UserID) - if l > 0 { - n += 1 + l + sovScheduler(uint64(l)) - } - if m.HttpRequest != nil { - l = m.HttpRequest.Size() - n += 1 + l + sovScheduler(uint64(l)) - } - if m.StatsEnabled { - n += 2 - } - return n -} - -func (m *SchedulerToFrontend) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != 0 { - n += 1 + sovScheduler(uint64(m.Status)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovScheduler(uint64(l)) - } - return n -} - -func (m *NotifyQuerierShutdownRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.QuerierID) - if l > 0 { - n += 1 + l + sovScheduler(uint64(l)) - } - return n -} - -func (m *NotifyQuerierShutdownResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovScheduler(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozScheduler(x uint64) (n int) { - return sovScheduler(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *QuerierToScheduler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QuerierToScheduler{`, - `QuerierID:` + fmt.Sprintf("%v", this.QuerierID) + `,`, - `}`, - }, "") - return s -} -func (this *SchedulerToQuerier) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SchedulerToQuerier{`, - `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, - `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, - `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, - `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, - `StatsEnabled:` + fmt.Sprintf("%v", this.StatsEnabled) + `,`, - `}`, - }, "") - return s -} -func (this *FrontendToScheduler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FrontendToScheduler{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, - `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, - `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, - `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, - `StatsEnabled:` + fmt.Sprintf("%v", this.StatsEnabled) + `,`, - `}`, - }, "") - return s -} -func (this *SchedulerToFrontend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SchedulerToFrontend{`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *NotifyQuerierShutdownRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NotifyQuerierShutdownRequest{`, - `QuerierID:` + fmt.Sprintf("%v", this.QuerierID) + `,`, - `}`, - }, "") - return s -} -func (this *NotifyQuerierShutdownResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NotifyQuerierShutdownResponse{`, - `}`, - }, "") - return s -} -func valueToStringScheduler(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *QuerierToScheduler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QuerierToScheduler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QuerierToScheduler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuerierID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.QuerierID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipScheduler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SchedulerToQuerier) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SchedulerToQuerier: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SchedulerToQuerier: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) - } - m.QueryID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QueryID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HttpRequest == nil { - m.HttpRequest = &httpgrpc.HTTPRequest{} - } - if err := m.HttpRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FrontendAddress = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StatsEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StatsEnabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipScheduler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FrontendToScheduler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FrontendToScheduler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FrontendToScheduler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= FrontendToSchedulerType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FrontendAddress = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) - } - m.QueryID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QueryID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HttpRequest == nil { - m.HttpRequest = &httpgrpc.HTTPRequest{} - } - if err := m.HttpRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StatsEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StatsEnabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipScheduler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SchedulerToFrontend) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SchedulerToFrontend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SchedulerToFrontend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= SchedulerToFrontendStatus(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipScheduler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NotifyQuerierShutdownRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NotifyQuerierShutdownRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NotifyQuerierShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuerierID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthScheduler - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthScheduler - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.QuerierID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipScheduler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NotifyQuerierShutdownResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowScheduler - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NotifyQuerierShutdownResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NotifyQuerierShutdownResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipScheduler(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthScheduler - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipScheduler(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowScheduler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowScheduler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowScheduler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthScheduler - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthScheduler - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowScheduler - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipScheduler(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthScheduler - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthScheduler = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowScheduler = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto deleted file mode 100644 index 3ae643756..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package schedulerpb; - -option go_package = "schedulerpb"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// Scheduler interface exposed to Queriers. -service SchedulerForQuerier { - // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for - // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, - // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. - // - // Long-running loop is used to detect broken connection between scheduler and querier. This is important - // for scheduler to keep a list of connected queriers up-to-date. - rpc QuerierLoop(stream QuerierToScheduler) returns (stream SchedulerToQuerier) { }; - - // The querier notifies the query-scheduler that it started a graceful shutdown. - rpc NotifyQuerierShutdown(NotifyQuerierShutdownRequest) returns (NotifyQuerierShutdownResponse); -} - -// Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. -// To signal that querier is ready to accept another request, querier sends empty message. -message QuerierToScheduler { - string querierID = 1; -} - -message SchedulerToQuerier { - // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), - // it identifies the query by using this ID. - uint64 queryID = 1; - httpgrpc.HTTPRequest httpRequest = 2; - - // Where should querier send HTTP Response to (using FrontendForQuerier interface). - string frontendAddress = 3; - - // User who initiated the request. Needed to send reply back to frontend. - string userID = 4; - - // Whether query statistics tracking should be enabled. The response will include - // statistics only when this option is enabled. - bool statsEnabled = 5; -} - -// Scheduler interface exposed to Frontend. Frontend can enqueue and cancel requests. -service SchedulerForFrontend { - // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and - // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. - // - // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both - // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending - // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. - rpc FrontendLoop(stream FrontendToScheduler) returns (stream SchedulerToFrontend) { }; -} - -enum FrontendToSchedulerType { - INIT = 0; - ENQUEUE = 1; - CANCEL = 2; -} - -message FrontendToScheduler { - FrontendToSchedulerType type = 1; - - // Used by INIT message. Will be put into all requests passed to querier. - string frontendAddress = 2; - - // Used by ENQUEUE and CANCEL. - // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. - uint64 queryID = 3; - - // Following are used by ENQUEUE only. - string userID = 4; - httpgrpc.HTTPRequest httpRequest = 5; - bool statsEnabled = 6; -} - -enum SchedulerToFrontendStatus { - OK = 0; - TOO_MANY_REQUESTS_PER_TENANT = 1; - ERROR = 2; - SHUTTING_DOWN = 3; -} - -message SchedulerToFrontend { - SchedulerToFrontendStatus status = 1; - string error = 2; -} - -message NotifyQuerierShutdownRequest { - string querierID = 1; -} - -message NotifyQuerierShutdownResponse {} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go deleted file mode 100644 index 8d916e1a8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go +++ /dev/null @@ -1,143 +0,0 @@ -package storegateway - -import ( - "context" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" -) - -const ( - corruptedBucketIndex = "corrupted-bucket-index" - noBucketIndex = "no-bucket-index" -) - -// BucketIndexMetadataFetcher is a Thanos MetadataFetcher implementation leveraging on the Cortex bucket index. -type BucketIndexMetadataFetcher struct { - userID string - bkt objstore.Bucket - strategy ShardingStrategy - cfgProvider bucket.TenantConfigProvider - logger log.Logger - filters []block.MetadataFilter - modifiers []block.MetadataModifier - metrics *block.FetcherMetrics -} - -func NewBucketIndexMetadataFetcher( - userID string, - bkt objstore.Bucket, - strategy ShardingStrategy, - cfgProvider bucket.TenantConfigProvider, - logger log.Logger, - reg prometheus.Registerer, - filters []block.MetadataFilter, - modifiers []block.MetadataModifier, -) *BucketIndexMetadataFetcher { - return &BucketIndexMetadataFetcher{ - userID: userID, - bkt: bkt, - strategy: strategy, - cfgProvider: cfgProvider, - logger: logger, - filters: filters, - modifiers: modifiers, - metrics: block.NewFetcherMetrics(reg, [][]string{{corruptedBucketIndex}, {noBucketIndex}}, nil), - } -} - -// Fetch implements block.MetadataFetcher. Not goroutine-safe. -func (f *BucketIndexMetadataFetcher) Fetch(ctx context.Context) (metas map[ulid.ULID]*metadata.Meta, partial map[ulid.ULID]error, err error) { - f.metrics.ResetTx() - - // Check whether the user belongs to the shard. - if len(f.strategy.FilterUsers(ctx, []string{f.userID})) != 1 { - f.metrics.Submit() - return nil, nil, nil - } - - // Track duration and sync counters only if wasn't filtered out by the sharding strategy. - start := time.Now() - defer func() { - f.metrics.SyncDuration.Observe(time.Since(start).Seconds()) - if err != nil { - f.metrics.SyncFailures.Inc() - } - }() - f.metrics.Syncs.Inc() - - // Fetch the bucket index. - idx, err := bucketindex.ReadIndex(ctx, f.bkt, f.userID, f.cfgProvider, f.logger) - if errors.Is(err, bucketindex.ErrIndexNotFound) { - // This is a legit case happening when the first blocks of a tenant have recently been uploaded by ingesters - // and their bucket index has not been created yet. - f.metrics.Synced.WithLabelValues(noBucketIndex).Set(1) - f.metrics.Submit() - - return nil, nil, nil - } - if errors.Is(err, bucketindex.ErrIndexCorrupted) { - // In case a single tenant bucket index is corrupted, we don't want the store-gateway to fail at startup - // because unable to fetch blocks metadata. We'll act as if the tenant has no bucket index, but the query - // will fail anyway in the querier (the querier fails in the querier if bucket index is corrupted). - level.Error(f.logger).Log("msg", "corrupted bucket index found", "user", f.userID, "err", err) - f.metrics.Synced.WithLabelValues(corruptedBucketIndex).Set(1) - f.metrics.Submit() - - return nil, nil, nil - } - if err != nil { - f.metrics.Synced.WithLabelValues(block.FailedMeta).Set(1) - f.metrics.Submit() - - return nil, nil, errors.Wrapf(err, "read bucket index") - } - - // Build block metas out of the index. - metas = make(map[ulid.ULID]*metadata.Meta, len(idx.Blocks)) - for _, b := range idx.Blocks { - metas[b.ID] = b.ThanosMeta(f.userID) - } - - for _, filter := range f.filters { - var err error - - // NOTE: filter can update synced metric accordingly to the reason of the exclude. - if customFilter, ok := filter.(MetadataFilterWithBucketIndex); ok { - err = customFilter.FilterWithBucketIndex(ctx, metas, idx, f.metrics.Synced) - } else { - err = filter.Filter(ctx, metas, f.metrics.Synced) - } - - if err != nil { - return nil, nil, errors.Wrap(err, "filter metas") - } - } - - for _, m := range f.modifiers { - // NOTE: modifier can update modified metric accordingly to the reason of the modification. - if err := m.Modify(ctx, metas, f.metrics.Modified); err != nil { - return nil, nil, errors.Wrap(err, "modify metas") - } - } - - f.metrics.Synced.WithLabelValues(block.LoadedMeta).Set(float64(len(metas))) - f.metrics.Submit() - - return metas, nil, nil -} - -func (f *BucketIndexMetadataFetcher) UpdateOnChange(callback func([]metadata.Meta, error)) { - // Unused by the store-gateway. - callback(nil, errors.New("UpdateOnChange is unsupported")) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go deleted file mode 100644 index ff02afb44..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go +++ /dev/null @@ -1,66 +0,0 @@ -package storegateway - -import ( - "context" - - "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/storage" - "github.com/thanos-io/thanos/pkg/store/hintspb" - "github.com/thanos-io/thanos/pkg/store/storepb" -) - -// bucketStoreSeriesServer is an fake in-memory gRPC server used to -// call Thanos BucketStore.Series() without having to go through the -// gRPC networking stack. -type bucketStoreSeriesServer struct { - // This field just exist to pseudo-implement the unused methods of the interface. - storepb.Store_SeriesServer - - ctx context.Context - - SeriesSet []*storepb.Series - Warnings storage.Warnings - Hints hintspb.SeriesResponseHints -} - -func newBucketStoreSeriesServer(ctx context.Context) *bucketStoreSeriesServer { - return &bucketStoreSeriesServer{ctx: ctx} -} - -func (s *bucketStoreSeriesServer) Send(r *storepb.SeriesResponse) error { - if r.GetWarning() != "" { - s.Warnings = append(s.Warnings, errors.New(r.GetWarning())) - } - - if rawHints := r.GetHints(); rawHints != nil { - // We expect only 1 hints entry so we just keep 1. - if err := types.UnmarshalAny(rawHints, &s.Hints); err != nil { - return errors.Wrap(err, "failed to unmarshal series hints") - } - } - - if recvSeries := r.GetSeries(); recvSeries != nil { - // Thanos uses a pool for the chunks and may use other pools in the future. - // Given we need to retain the reference after the pooled slices are recycled, - // we need to do a copy here. We prefer to stay on the safest side at this stage - // so we do a marshal+unmarshal to copy the whole series. - recvSeriesData, err := recvSeries.Marshal() - if err != nil { - return errors.Wrap(err, "marshal received series") - } - - copiedSeries := &storepb.Series{} - if err = copiedSeries.Unmarshal(recvSeriesData); err != nil { - return errors.Wrap(err, "unmarshal received series") - } - - s.SeriesSet = append(s.SeriesSet, copiedSeries) - } - - return nil -} - -func (s *bucketStoreSeriesServer) Context() context.Context { - return s.ctx -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go deleted file mode 100644 index bfed90a09..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go +++ /dev/null @@ -1,243 +0,0 @@ -package storegateway - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/util" -) - -// BucketStoreMetrics aggregates metrics exported by Thanos Bucket Store -// and re-exports those aggregates as Cortex metrics. -type BucketStoreMetrics struct { - regs *util.UserRegistries - - // exported metrics, gathered from Thanos BucketStore - blockLoads *prometheus.Desc - blockLoadFailures *prometheus.Desc - blockDrops *prometheus.Desc - blockDropFailures *prometheus.Desc - blocksLoaded *prometheus.Desc - seriesDataTouched *prometheus.Desc - seriesDataFetched *prometheus.Desc - seriesDataSizeTouched *prometheus.Desc - seriesDataSizeFetched *prometheus.Desc - seriesBlocksQueried *prometheus.Desc - seriesGetAllDuration *prometheus.Desc - seriesMergeDuration *prometheus.Desc - seriesRefetches *prometheus.Desc - resultSeriesCount *prometheus.Desc - queriesDropped *prometheus.Desc - - cachedPostingsCompressions *prometheus.Desc - cachedPostingsCompressionErrors *prometheus.Desc - cachedPostingsCompressionTimeSeconds *prometheus.Desc - cachedPostingsOriginalSizeBytes *prometheus.Desc - cachedPostingsCompressedSizeBytes *prometheus.Desc - - seriesFetchDuration *prometheus.Desc - postingsFetchDuration *prometheus.Desc - - indexHeaderLazyLoadCount *prometheus.Desc - indexHeaderLazyLoadFailedCount *prometheus.Desc - indexHeaderLazyUnloadCount *prometheus.Desc - indexHeaderLazyUnloadFailedCount *prometheus.Desc - indexHeaderLazyLoadDuration *prometheus.Desc -} - -func NewBucketStoreMetrics() *BucketStoreMetrics { - return &BucketStoreMetrics{ - regs: util.NewUserRegistries(), - - blockLoads: prometheus.NewDesc( - "cortex_bucket_store_block_loads_total", - "Total number of remote block loading attempts.", - nil, nil), - blockLoadFailures: prometheus.NewDesc( - "cortex_bucket_store_block_load_failures_total", - "Total number of failed remote block loading attempts.", - nil, nil), - blockDrops: prometheus.NewDesc( - "cortex_bucket_store_block_drops_total", - "Total number of local blocks that were dropped.", - nil, nil), - blockDropFailures: prometheus.NewDesc( - "cortex_bucket_store_block_drop_failures_total", - "Total number of local blocks that failed to be dropped.", - nil, nil), - blocksLoaded: prometheus.NewDesc( - "cortex_bucket_store_blocks_loaded", - "Number of currently loaded blocks.", - nil, nil), - seriesDataTouched: prometheus.NewDesc( - "cortex_bucket_store_series_data_touched", - "How many items of a data type in a block were touched for a single series request.", - []string{"data_type"}, nil), - seriesDataFetched: prometheus.NewDesc( - "cortex_bucket_store_series_data_fetched", - "How many items of a data type in a block were fetched for a single series request.", - []string{"data_type"}, nil), - seriesDataSizeTouched: prometheus.NewDesc( - "cortex_bucket_store_series_data_size_touched_bytes", - "Size of all items of a data type in a block were touched for a single series request.", - []string{"data_type"}, nil), - seriesDataSizeFetched: prometheus.NewDesc( - "cortex_bucket_store_series_data_size_fetched_bytes", - "Size of all items of a data type in a block were fetched for a single series request.", - []string{"data_type"}, nil), - seriesBlocksQueried: prometheus.NewDesc( - "cortex_bucket_store_series_blocks_queried", - "Number of blocks in a bucket store that were touched to satisfy a query.", - nil, nil), - - seriesGetAllDuration: prometheus.NewDesc( - "cortex_bucket_store_series_get_all_duration_seconds", - "Time it takes until all per-block prepares and preloads for a query are finished.", - nil, nil), - seriesMergeDuration: prometheus.NewDesc( - "cortex_bucket_store_series_merge_duration_seconds", - "Time it takes to merge sub-results from all queried blocks into a single result.", - nil, nil), - seriesRefetches: prometheus.NewDesc( - "cortex_bucket_store_series_refetches_total", - "Total number of cases where the built-in max series size was not enough to fetch series from index, resulting in refetch.", - nil, nil), - resultSeriesCount: prometheus.NewDesc( - "cortex_bucket_store_series_result_series", - "Number of series observed in the final result of a query.", - nil, nil), - queriesDropped: prometheus.NewDesc( - "cortex_bucket_store_queries_dropped_total", - "Number of queries that were dropped due to the max chunks per query limit.", - nil, nil), - - cachedPostingsCompressions: prometheus.NewDesc( - "cortex_bucket_store_cached_postings_compressions_total", - "Number of postings compressions and decompressions when storing to index cache.", - []string{"op"}, nil), - cachedPostingsCompressionErrors: prometheus.NewDesc( - "cortex_bucket_store_cached_postings_compression_errors_total", - "Number of postings compression and decompression errors.", - []string{"op"}, nil), - cachedPostingsCompressionTimeSeconds: prometheus.NewDesc( - "cortex_bucket_store_cached_postings_compression_time_seconds", - "Time spent compressing and decompressing postings when storing to / reading from postings cache.", - []string{"op"}, nil), - cachedPostingsOriginalSizeBytes: prometheus.NewDesc( - "cortex_bucket_store_cached_postings_original_size_bytes_total", - "Original size of postings stored into cache.", - nil, nil), - cachedPostingsCompressedSizeBytes: prometheus.NewDesc( - "cortex_bucket_store_cached_postings_compressed_size_bytes_total", - "Compressed size of postings stored into cache.", - nil, nil), - - seriesFetchDuration: prometheus.NewDesc( - "cortex_bucket_store_cached_series_fetch_duration_seconds", - "Time it takes to fetch series to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", - nil, nil), - postingsFetchDuration: prometheus.NewDesc( - "cortex_bucket_store_cached_postings_fetch_duration_seconds", - "Time it takes to fetch postings to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", - nil, nil), - - indexHeaderLazyLoadCount: prometheus.NewDesc( - "cortex_bucket_store_indexheader_lazy_load_total", - "Total number of index-header lazy load operations.", - nil, nil), - indexHeaderLazyLoadFailedCount: prometheus.NewDesc( - "cortex_bucket_store_indexheader_lazy_load_failed_total", - "Total number of failed index-header lazy load operations.", - nil, nil), - indexHeaderLazyUnloadCount: prometheus.NewDesc( - "cortex_bucket_store_indexheader_lazy_unload_total", - "Total number of index-header lazy unload operations.", - nil, nil), - indexHeaderLazyUnloadFailedCount: prometheus.NewDesc( - "cortex_bucket_store_indexheader_lazy_unload_failed_total", - "Total number of failed index-header lazy unload operations.", - nil, nil), - indexHeaderLazyLoadDuration: prometheus.NewDesc( - "cortex_bucket_store_indexheader_lazy_load_duration_seconds", - "Duration of the index-header lazy loading in seconds.", - nil, nil), - } -} - -func (m *BucketStoreMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { - m.regs.AddUserRegistry(user, reg) -} - -func (m *BucketStoreMetrics) RemoveUserRegistry(user string) { - m.regs.RemoveUserRegistry(user, false) -} - -func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { - out <- m.blockLoads - out <- m.blockLoadFailures - out <- m.blockDrops - out <- m.blockDropFailures - out <- m.blocksLoaded - out <- m.seriesDataTouched - out <- m.seriesDataFetched - out <- m.seriesDataSizeTouched - out <- m.seriesDataSizeFetched - out <- m.seriesBlocksQueried - out <- m.seriesGetAllDuration - out <- m.seriesMergeDuration - out <- m.seriesRefetches - out <- m.resultSeriesCount - out <- m.queriesDropped - - out <- m.cachedPostingsCompressions - out <- m.cachedPostingsCompressionErrors - out <- m.cachedPostingsCompressionTimeSeconds - out <- m.cachedPostingsOriginalSizeBytes - out <- m.cachedPostingsCompressedSizeBytes - - out <- m.seriesFetchDuration - out <- m.postingsFetchDuration - - out <- m.indexHeaderLazyLoadCount - out <- m.indexHeaderLazyLoadFailedCount - out <- m.indexHeaderLazyUnloadCount - out <- m.indexHeaderLazyUnloadFailedCount - out <- m.indexHeaderLazyLoadDuration -} - -func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { - data := m.regs.BuildMetricFamiliesPerUser() - - data.SendSumOfCounters(out, m.blockLoads, "thanos_bucket_store_block_loads_total") - data.SendSumOfCounters(out, m.blockLoadFailures, "thanos_bucket_store_block_load_failures_total") - data.SendSumOfCounters(out, m.blockDrops, "thanos_bucket_store_block_drops_total") - data.SendSumOfCounters(out, m.blockDropFailures, "thanos_bucket_store_block_drop_failures_total") - - data.SendSumOfGauges(out, m.blocksLoaded, "thanos_bucket_store_blocks_loaded") - - data.SendSumOfSummariesWithLabels(out, m.seriesDataTouched, "thanos_bucket_store_series_data_touched", "data_type") - data.SendSumOfSummariesWithLabels(out, m.seriesDataFetched, "thanos_bucket_store_series_data_fetched", "data_type") - data.SendSumOfSummariesWithLabels(out, m.seriesDataSizeTouched, "thanos_bucket_store_series_data_size_touched_bytes", "data_type") - data.SendSumOfSummariesWithLabels(out, m.seriesDataSizeFetched, "thanos_bucket_store_series_data_size_fetched_bytes", "data_type") - data.SendSumOfSummariesWithLabels(out, m.seriesBlocksQueried, "thanos_bucket_store_series_blocks_queried") - - data.SendSumOfHistograms(out, m.seriesGetAllDuration, "thanos_bucket_store_series_get_all_duration_seconds") - data.SendSumOfHistograms(out, m.seriesMergeDuration, "thanos_bucket_store_series_merge_duration_seconds") - data.SendSumOfCounters(out, m.seriesRefetches, "thanos_bucket_store_series_refetches_total") - data.SendSumOfSummaries(out, m.resultSeriesCount, "thanos_bucket_store_series_result_series") - data.SendSumOfCounters(out, m.queriesDropped, "thanos_bucket_store_queries_dropped_total") - - data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressions, "thanos_bucket_store_cached_postings_compressions_total", "op") - data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionErrors, "thanos_bucket_store_cached_postings_compression_errors_total", "op") - data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionTimeSeconds, "thanos_bucket_store_cached_postings_compression_time_seconds_total", "op") - data.SendSumOfCountersWithLabels(out, m.cachedPostingsOriginalSizeBytes, "thanos_bucket_store_cached_postings_original_size_bytes_total") - data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressedSizeBytes, "thanos_bucket_store_cached_postings_compressed_size_bytes_total") - - data.SendSumOfHistograms(out, m.seriesFetchDuration, "thanos_bucket_store_cached_series_fetch_duration_seconds") - data.SendSumOfHistograms(out, m.postingsFetchDuration, "thanos_bucket_store_cached_postings_fetch_duration_seconds") - - data.SendSumOfCounters(out, m.indexHeaderLazyLoadCount, "thanos_bucket_store_indexheader_lazy_load_total") - data.SendSumOfCounters(out, m.indexHeaderLazyLoadFailedCount, "thanos_bucket_store_indexheader_lazy_load_failed_total") - data.SendSumOfCounters(out, m.indexHeaderLazyUnloadCount, "thanos_bucket_store_indexheader_lazy_unload_total") - data.SendSumOfCounters(out, m.indexHeaderLazyUnloadFailedCount, "thanos_bucket_store_indexheader_lazy_unload_failed_total") - data.SendSumOfHistograms(out, m.indexHeaderLazyLoadDuration, "thanos_bucket_store_indexheader_lazy_load_duration_seconds") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go deleted file mode 100644 index c6233aa4b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go +++ /dev/null @@ -1,639 +0,0 @@ -package storegateway - -import ( - "context" - "fmt" - "io/ioutil" - "math" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/thanos-io/thanos/pkg/block" - thanos_metadata "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/gate" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/pool" - "github.com/thanos-io/thanos/pkg/store" - storecache "github.com/thanos-io/thanos/pkg/store/cache" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/logging" - "google.golang.org/grpc/metadata" - - "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/util/backoff" - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -// BucketStores is a multi-tenant wrapper of Thanos BucketStore. -type BucketStores struct { - logger log.Logger - cfg tsdb.BlocksStorageConfig - limits *validation.Overrides - bucket objstore.Bucket - logLevel logging.Level - bucketStoreMetrics *BucketStoreMetrics - metaFetcherMetrics *MetadataFetcherMetrics - shardingStrategy ShardingStrategy - - // Index cache shared across all tenants. - indexCache storecache.IndexCache - - // Chunks bytes pool shared across all tenants. - chunksPool pool.Bytes - - // Partitioner shared across all tenants. - partitioner store.Partitioner - - // Gate used to limit query concurrency across all tenants. - queryGate gate.Gate - - // Keeps a bucket store for each tenant. - storesMu sync.RWMutex - stores map[string]*store.BucketStore - - // Metrics. - syncTimes prometheus.Histogram - syncLastSuccess prometheus.Gauge - tenantsDiscovered prometheus.Gauge - tenantsSynced prometheus.Gauge -} - -// NewBucketStores makes a new BucketStores. -func NewBucketStores(cfg tsdb.BlocksStorageConfig, shardingStrategy ShardingStrategy, bucketClient objstore.Bucket, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*BucketStores, error) { - cachingBucket, err := tsdb.CreateCachingBucket(cfg.BucketStore.ChunksCache, cfg.BucketStore.MetadataCache, bucketClient, logger, reg) - if err != nil { - return nil, errors.Wrapf(err, "create caching bucket") - } - - // The number of concurrent queries against the tenants BucketStores are limited. - queryGateReg := extprom.WrapRegistererWithPrefix("cortex_bucket_stores_", reg) - queryGate := gate.New(queryGateReg, cfg.BucketStore.MaxConcurrent) - promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_bucket_stores_gate_queries_concurrent_max", - Help: "Number of maximum concurrent queries allowed.", - }).Set(float64(cfg.BucketStore.MaxConcurrent)) - - u := &BucketStores{ - logger: logger, - cfg: cfg, - limits: limits, - bucket: cachingBucket, - shardingStrategy: shardingStrategy, - stores: map[string]*store.BucketStore{}, - logLevel: logLevel, - bucketStoreMetrics: NewBucketStoreMetrics(), - metaFetcherMetrics: NewMetadataFetcherMetrics(), - queryGate: queryGate, - partitioner: newGapBasedPartitioner(cfg.BucketStore.PartitionerMaxGapBytes, reg), - syncTimes: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_bucket_stores_blocks_sync_seconds", - Help: "The total time it takes to perform a sync stores", - Buckets: []float64{0.1, 1, 10, 30, 60, 120, 300, 600, 900}, - }), - syncLastSuccess: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_bucket_stores_blocks_last_successful_sync_timestamp_seconds", - Help: "Unix timestamp of the last successful blocks sync.", - }), - tenantsDiscovered: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_bucket_stores_tenants_discovered", - Help: "Number of tenants discovered in the bucket.", - }), - tenantsSynced: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_bucket_stores_tenants_synced", - Help: "Number of tenants synced.", - }), - } - - // Init the index cache. - if u.indexCache, err = tsdb.NewIndexCache(cfg.BucketStore.IndexCache, logger, reg); err != nil { - return nil, errors.Wrap(err, "create index cache") - } - - // Init the chunks bytes pool. - if u.chunksPool, err = newChunkBytesPool(cfg.BucketStore.ChunkPoolMinBucketSizeBytes, cfg.BucketStore.ChunkPoolMaxBucketSizeBytes, cfg.BucketStore.MaxChunkPoolBytes, reg); err != nil { - return nil, errors.Wrap(err, "create chunks bytes pool") - } - - if reg != nil { - reg.MustRegister(u.bucketStoreMetrics, u.metaFetcherMetrics) - } - - return u, nil -} - -// InitialSync does an initial synchronization of blocks for all users. -func (u *BucketStores) InitialSync(ctx context.Context) error { - level.Info(u.logger).Log("msg", "synchronizing TSDB blocks for all users") - - if err := u.syncUsersBlocksWithRetries(ctx, func(ctx context.Context, s *store.BucketStore) error { - return s.InitialSync(ctx) - }); err != nil { - level.Warn(u.logger).Log("msg", "failed to synchronize TSDB blocks", "err", err) - return err - } - - level.Info(u.logger).Log("msg", "successfully synchronized TSDB blocks for all users") - return nil -} - -// SyncBlocks synchronizes the stores state with the Bucket store for every user. -func (u *BucketStores) SyncBlocks(ctx context.Context) error { - return u.syncUsersBlocksWithRetries(ctx, func(ctx context.Context, s *store.BucketStore) error { - return s.SyncBlocks(ctx) - }) -} - -func (u *BucketStores) syncUsersBlocksWithRetries(ctx context.Context, f func(context.Context, *store.BucketStore) error) error { - retries := backoff.New(ctx, backoff.Config{ - MinBackoff: 1 * time.Second, - MaxBackoff: 10 * time.Second, - MaxRetries: 3, - }) - - var lastErr error - for retries.Ongoing() { - lastErr = u.syncUsersBlocks(ctx, f) - if lastErr == nil { - return nil - } - - retries.Wait() - } - - if lastErr == nil { - return retries.Err() - } - - return lastErr -} - -func (u *BucketStores) syncUsersBlocks(ctx context.Context, f func(context.Context, *store.BucketStore) error) (returnErr error) { - defer func(start time.Time) { - u.syncTimes.Observe(time.Since(start).Seconds()) - if returnErr == nil { - u.syncLastSuccess.SetToCurrentTime() - } - }(time.Now()) - - type job struct { - userID string - store *store.BucketStore - } - - wg := &sync.WaitGroup{} - jobs := make(chan job) - errs := tsdb_errors.NewMulti() - errsMx := sync.Mutex{} - - // Scan users in the bucket. In case of error, it may return a subset of users. If we sync a subset of users - // during a periodic sync, we may end up unloading blocks for users that still belong to this store-gateway - // so we do prefer to not run the sync at all. - userIDs, err := u.scanUsers(ctx) - if err != nil { - return err - } - - includeUserIDs := make(map[string]struct{}) - for _, userID := range u.shardingStrategy.FilterUsers(ctx, userIDs) { - includeUserIDs[userID] = struct{}{} - } - - u.tenantsDiscovered.Set(float64(len(userIDs))) - u.tenantsSynced.Set(float64(len(includeUserIDs))) - - // Create a pool of workers which will synchronize blocks. The pool size - // is limited in order to avoid to concurrently sync a lot of tenants in - // a large cluster. - for i := 0; i < u.cfg.BucketStore.TenantSyncConcurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - for job := range jobs { - if err := f(ctx, job.store); err != nil { - errsMx.Lock() - errs.Add(errors.Wrapf(err, "failed to synchronize TSDB blocks for user %s", job.userID)) - errsMx.Unlock() - } - } - }() - } - - // Lazily create a bucket store for each new user found - // and submit a sync job for each user. - for _, userID := range userIDs { - // If we don't have a store for the tenant yet, then we should skip it if it's not - // included in the store-gateway shard. If we already have it, we need to sync it - // anyway to make sure all its blocks are unloaded and metrics updated correctly - // (but bucket API calls are skipped thanks to the objstore client adapter). - if _, included := includeUserIDs[userID]; !included && u.getStore(userID) == nil { - continue - } - - bs, err := u.getOrCreateStore(userID) - if err != nil { - errsMx.Lock() - errs.Add(err) - errsMx.Unlock() - - continue - } - - select { - case jobs <- job{userID: userID, store: bs}: - // Nothing to do. Will loop to push more jobs. - case <-ctx.Done(): - return ctx.Err() - } - } - - // Wait until all workers completed. - close(jobs) - wg.Wait() - - u.deleteLocalFilesForExcludedTenants(includeUserIDs) - - return errs.Err() -} - -// Series makes a series request to the underlying user bucket store. -func (u *BucketStores) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - spanLog, spanCtx := spanlogger.New(srv.Context(), "BucketStores.Series") - defer spanLog.Span.Finish() - - userID := getUserIDFromGRPCContext(spanCtx) - if userID == "" { - return fmt.Errorf("no userID") - } - - store := u.getStore(userID) - if store == nil { - return nil - } - - return store.Series(req, spanSeriesServer{ - Store_SeriesServer: srv, - ctx: spanCtx, - }) -} - -// LabelNames implements the Storegateway proto service. -func (u *BucketStores) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { - spanLog, spanCtx := spanlogger.New(ctx, "BucketStores.LabelNames") - defer spanLog.Span.Finish() - - userID := getUserIDFromGRPCContext(spanCtx) - if userID == "" { - return nil, fmt.Errorf("no userID") - } - - store := u.getStore(userID) - if store == nil { - return &storepb.LabelNamesResponse{}, nil - } - - return store.LabelNames(ctx, req) -} - -// LabelValues implements the Storegateway proto service. -func (u *BucketStores) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { - spanLog, spanCtx := spanlogger.New(ctx, "BucketStores.LabelValues") - defer spanLog.Span.Finish() - - userID := getUserIDFromGRPCContext(spanCtx) - if userID == "" { - return nil, fmt.Errorf("no userID") - } - - store := u.getStore(userID) - if store == nil { - return &storepb.LabelValuesResponse{}, nil - } - - return store.LabelValues(ctx, req) -} - -// scanUsers in the bucket and return the list of found users. If an error occurs while -// iterating the bucket, it may return both an error and a subset of the users in the bucket. -func (u *BucketStores) scanUsers(ctx context.Context) ([]string, error) { - var users []string - - // Iterate the bucket to find all users in the bucket. Due to how the bucket listing - // caching works, it's more likely to have a cache hit if there's no delay while - // iterating the bucket, so we do load all users in memory and later process them. - err := u.bucket.Iter(ctx, "", func(s string) error { - users = append(users, strings.TrimSuffix(s, "/")) - return nil - }) - - return users, err -} - -func (u *BucketStores) getStore(userID string) *store.BucketStore { - u.storesMu.RLock() - defer u.storesMu.RUnlock() - return u.stores[userID] -} - -var ( - errBucketStoreNotEmpty = errors.New("bucket store not empty") - errBucketStoreNotFound = errors.New("bucket store not found") -) - -// closeEmptyBucketStore closes bucket store for given user, if it is empty, -// and removes it from bucket stores map and metrics. -// If bucket store doesn't exist, returns errBucketStoreNotFound. -// If bucket store is not empty, returns errBucketStoreNotEmpty. -// Otherwise returns error from closing the bucket store. -func (u *BucketStores) closeEmptyBucketStore(userID string) error { - u.storesMu.Lock() - unlockInDefer := true - defer func() { - if unlockInDefer { - u.storesMu.Unlock() - } - }() - - bs := u.stores[userID] - if bs == nil { - return errBucketStoreNotFound - } - - if !isEmptyBucketStore(bs) { - return errBucketStoreNotEmpty - } - - delete(u.stores, userID) - unlockInDefer = false - u.storesMu.Unlock() - - u.metaFetcherMetrics.RemoveUserRegistry(userID) - u.bucketStoreMetrics.RemoveUserRegistry(userID) - return bs.Close() -} - -func isEmptyBucketStore(bs *store.BucketStore) bool { - min, max := bs.TimeRange() - return min == math.MaxInt64 && max == math.MinInt64 -} - -func (u *BucketStores) syncDirForUser(userID string) string { - return filepath.Join(u.cfg.BucketStore.SyncDir, userID) -} - -func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, error) { - // Check if the store already exists. - bs := u.getStore(userID) - if bs != nil { - return bs, nil - } - - u.storesMu.Lock() - defer u.storesMu.Unlock() - - // Check again for the store in the event it was created in-between locks. - bs = u.stores[userID] - if bs != nil { - return bs, nil - } - - userLogger := util_log.WithUserID(userID, u.logger) - - level.Info(userLogger).Log("msg", "creating user bucket store") - - userBkt := bucket.NewUserBucketClient(userID, u.bucket, u.limits) - fetcherReg := prometheus.NewRegistry() - - // The sharding strategy filter MUST be before the ones we create here (order matters). - filters := append([]block.MetadataFilter{NewShardingMetadataFilterAdapter(userID, u.shardingStrategy)}, []block.MetadataFilter{ - block.NewConsistencyDelayMetaFilter(userLogger, u.cfg.BucketStore.ConsistencyDelay, fetcherReg), - // Use our own custom implementation. - NewIgnoreDeletionMarkFilter(userLogger, userBkt, u.cfg.BucketStore.IgnoreDeletionMarksDelay, u.cfg.BucketStore.MetaSyncConcurrency), - // The duplicate filter has been intentionally omitted because it could cause troubles with - // the consistency check done on the querier. The duplicate filter removes redundant blocks - // but if the store-gateway removes redundant blocks before the querier discovers them, the - // consistency check on the querier will fail. - }...) - - modifiers := []block.MetadataModifier{ - // Remove Cortex external labels so that they're not injected when querying blocks. - NewReplicaLabelRemover(userLogger, []string{ - tsdb.TenantIDExternalLabel, - tsdb.IngesterIDExternalLabel, - tsdb.ShardIDExternalLabel, - }), - } - - // Instantiate a different blocks metadata fetcher based on whether bucket index is enabled or not. - var fetcher block.MetadataFetcher - if u.cfg.BucketStore.BucketIndex.Enabled { - fetcher = NewBucketIndexMetadataFetcher( - userID, - u.bucket, - u.shardingStrategy, - u.limits, - u.logger, - fetcherReg, - filters, - modifiers) - } else { - // Wrap the bucket reader to skip iterating the bucket at all if the user doesn't - // belong to the store-gateway shard. We need to run the BucketStore synching anyway - // in order to unload previous tenants in case of a resharding leading to tenants - // moving out from the store-gateway shard and also make sure both MetaFetcher and - // BucketStore metrics are correctly updated. - fetcherBkt := NewShardingBucketReaderAdapter(userID, u.shardingStrategy, userBkt) - - var err error - fetcher, err = block.NewMetaFetcher( - userLogger, - u.cfg.BucketStore.MetaSyncConcurrency, - fetcherBkt, - u.syncDirForUser(userID), // The fetcher stores cached metas in the "meta-syncer/" sub directory - fetcherReg, - filters, - modifiers, - ) - if err != nil { - return nil, err - } - } - - bucketStoreReg := prometheus.NewRegistry() - bucketStoreOpts := []store.BucketStoreOption{ - store.WithLogger(userLogger), - store.WithRegistry(bucketStoreReg), - store.WithIndexCache(u.indexCache), - store.WithQueryGate(u.queryGate), - store.WithChunkPool(u.chunksPool), - } - if u.logLevel.String() == "debug" { - bucketStoreOpts = append(bucketStoreOpts, store.WithDebugLogging()) - } - - bs, err := store.NewBucketStore( - userBkt, - fetcher, - u.syncDirForUser(userID), - newChunksLimiterFactory(u.limits, userID), - newSeriesLimiterFactory(u.limits, userID), - u.partitioner, - u.cfg.BucketStore.BlockSyncConcurrency, - false, // No need to enable backward compatibility with Thanos pre 0.8.0 queriers - u.cfg.BucketStore.PostingOffsetsInMemSampling, - true, // Enable series hints. - u.cfg.BucketStore.IndexHeaderLazyLoadingEnabled, - u.cfg.BucketStore.IndexHeaderLazyLoadingIdleTimeout, - bucketStoreOpts..., - ) - if err != nil { - return nil, err - } - - u.stores[userID] = bs - u.metaFetcherMetrics.AddUserRegistry(userID, fetcherReg) - u.bucketStoreMetrics.AddUserRegistry(userID, bucketStoreReg) - - return bs, nil -} - -// deleteLocalFilesForExcludedTenants removes local "sync" directories for tenants that are not included in the current -// shard. -func (u *BucketStores) deleteLocalFilesForExcludedTenants(includeUserIDs map[string]struct{}) { - files, err := ioutil.ReadDir(u.cfg.BucketStore.SyncDir) - if err != nil { - return - } - - for _, f := range files { - if !f.IsDir() { - continue - } - - userID := f.Name() - if _, included := includeUserIDs[userID]; included { - // Preserve directory for users owned by this shard. - continue - } - - err := u.closeEmptyBucketStore(userID) - switch { - case errors.Is(err, errBucketStoreNotEmpty): - continue - case errors.Is(err, errBucketStoreNotFound): - // This is OK, nothing was closed. - case err == nil: - level.Info(u.logger).Log("msg", "closed bucket store for user", "user", userID) - default: - level.Warn(u.logger).Log("msg", "failed to close bucket store for user", "user", userID, "err", err) - } - - userSyncDir := u.syncDirForUser(userID) - err = os.RemoveAll(userSyncDir) - if err == nil { - level.Info(u.logger).Log("msg", "deleted user sync directory", "dir", userSyncDir) - } else { - level.Warn(u.logger).Log("msg", "failed to delete user sync directory", "dir", userSyncDir, "err", err) - } - } -} - -func getUserIDFromGRPCContext(ctx context.Context) string { - meta, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "" - } - - values := meta.Get(tsdb.TenantIDExternalLabel) - if len(values) != 1 { - return "" - } - - return values[0] -} - -// ReplicaLabelRemover is a BaseFetcher modifier modifies external labels of existing blocks, it removes given replica labels from the metadata of blocks that have it. -type ReplicaLabelRemover struct { - logger log.Logger - - replicaLabels []string -} - -// NewReplicaLabelRemover creates a ReplicaLabelRemover. -func NewReplicaLabelRemover(logger log.Logger, replicaLabels []string) *ReplicaLabelRemover { - return &ReplicaLabelRemover{logger: logger, replicaLabels: replicaLabels} -} - -// Modify modifies external labels of existing blocks, it removes given replica labels from the metadata of blocks that have it. -func (r *ReplicaLabelRemover) Modify(_ context.Context, metas map[ulid.ULID]*thanos_metadata.Meta, modified *extprom.TxGaugeVec) error { - for u, meta := range metas { - l := meta.Thanos.Labels - for _, replicaLabel := range r.replicaLabels { - if _, exists := l[replicaLabel]; exists { - level.Debug(r.logger).Log("msg", "replica label removed", "label", replicaLabel) - delete(l, replicaLabel) - } - } - metas[u].Thanos.Labels = l - } - return nil -} - -type spanSeriesServer struct { - storepb.Store_SeriesServer - - ctx context.Context -} - -func (s spanSeriesServer) Context() context.Context { - return s.ctx -} - -type limiter struct { - limiter *store.Limiter -} - -func (c *limiter) Reserve(num uint64) error { - err := c.limiter.Reserve(num) - if err != nil { - return httpgrpc.Errorf(http.StatusUnprocessableEntity, err.Error()) - } - - return nil -} - -func newChunksLimiterFactory(limits *validation.Overrides, userID string) store.ChunksLimiterFactory { - return func(failedCounter prometheus.Counter) store.ChunksLimiter { - // Since limit overrides could be live reloaded, we have to get the current user's limit - // each time a new limiter is instantiated. - return &limiter{ - limiter: store.NewLimiter(uint64(limits.MaxChunksPerQueryFromStore(userID)), failedCounter), - } - } -} - -func newSeriesLimiterFactory(limits *validation.Overrides, userID string) store.SeriesLimiterFactory { - return func(failedCounter prometheus.Counter) store.SeriesLimiter { - // Since limit overrides could be live reloaded, we have to get the current user's limit - // each time a new limiter is instantiated. - return &limiter{ - limiter: store.NewLimiter(uint64(limits.MaxFetchedSeriesPerQuery(userID)), failedCounter), - } - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/chunk_bytes_pool.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/chunk_bytes_pool.go deleted file mode 100644 index fee8ac402..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/chunk_bytes_pool.go +++ /dev/null @@ -1,50 +0,0 @@ -package storegateway - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/pool" -) - -type chunkBytesPool struct { - pool *pool.BucketedBytes - - // Metrics. - requestedBytes prometheus.Counter - returnedBytes prometheus.Counter -} - -func newChunkBytesPool(minBucketSize, maxBucketSize int, maxChunkPoolBytes uint64, reg prometheus.Registerer) (*chunkBytesPool, error) { - upstream, err := pool.NewBucketedBytes(minBucketSize, maxBucketSize, 2, maxChunkPoolBytes) - if err != nil { - return nil, err - } - - return &chunkBytesPool{ - pool: upstream, - requestedBytes: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_bucket_store_chunk_pool_requested_bytes_total", - Help: "Total bytes requested to chunk bytes pool.", - }), - returnedBytes: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_bucket_store_chunk_pool_returned_bytes_total", - Help: "Total bytes returned by the chunk bytes pool.", - }), - }, nil -} - -func (p *chunkBytesPool) Get(sz int) (*[]byte, error) { - buffer, err := p.pool.Get(sz) - if err != nil { - return buffer, err - } - - p.requestedBytes.Add(float64(sz)) - p.returnedBytes.Add(float64(cap(*buffer))) - - return buffer, err -} - -func (p *chunkBytesPool) Put(b *[]byte) { - p.pool.Put(b) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go deleted file mode 100644 index 741f73f3b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go +++ /dev/null @@ -1,376 +0,0 @@ -package storegateway - -import ( - "context" - "flag" - "fmt" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/logging" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/validation" -) - -const ( - syncReasonInitial = "initial" - syncReasonPeriodic = "periodic" - syncReasonRingChange = "ring-change" - - // sharedOptionWithQuerier is a message appended to all config options that should be also - // set on the querier in order to work correct. - sharedOptionWithQuerier = " This option needs be set both on the store-gateway and querier when running in microservices mode." - - // ringAutoForgetUnhealthyPeriods is how many consecutive timeout periods an unhealthy instance - // in the ring will be automatically removed. - ringAutoForgetUnhealthyPeriods = 10 -) - -var ( - supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} - - // Validation errors. - errInvalidShardingStrategy = errors.New("invalid sharding strategy") - errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") -) - -// Config holds the store gateway config. -type Config struct { - ShardingEnabled bool `yaml:"sharding_enabled"` - ShardingRing RingConfig `yaml:"sharding_ring" doc:"description=The hash ring configuration. This option is required only if blocks sharding is enabled."` - ShardingStrategy string `yaml:"sharding_strategy"` -} - -// RegisterFlags registers the Config flags. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.ShardingRing.RegisterFlags(f) - - f.BoolVar(&cfg.ShardingEnabled, "store-gateway.sharding-enabled", false, "Shard blocks across multiple store gateway instances."+sharedOptionWithQuerier) - f.StringVar(&cfg.ShardingStrategy, "store-gateway.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) -} - -// Validate the Config. -func (cfg *Config) Validate(limits validation.Limits) error { - if cfg.ShardingEnabled { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { - return errInvalidShardingStrategy - } - - if cfg.ShardingStrategy == util.ShardingStrategyShuffle && limits.StoreGatewayTenantShardSize <= 0 { - return errInvalidTenantShardSize - } - } - - return nil -} - -// StoreGateway is the Cortex service responsible to expose an API over the bucket -// where blocks are stored, supporting blocks sharding and replication across a pool -// of store gateway instances (optional). -type StoreGateway struct { - services.Service - - gatewayCfg Config - storageCfg cortex_tsdb.BlocksStorageConfig - logger log.Logger - stores *BucketStores - - // Ring used for sharding blocks. - ringLifecycler *ring.BasicLifecycler - ring *ring.Ring - - // Subservices manager (ring, lifecycler) - subservices *services.Manager - subservicesWatcher *services.FailureWatcher - - bucketSync *prometheus.CounterVec -} - -func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { - var ringStore kv.Client - - bucketClient, err := createBucketClient(storageCfg, logger, reg) - if err != nil { - return nil, err - } - - if gatewayCfg.ShardingEnabled { - ringStore, err = kv.NewClient( - gatewayCfg.ShardingRing.KVStore, - ring.GetCodec(), - kv.RegistererWithKVName(prometheus.WrapRegistererWithPrefix("cortex_", reg), "store-gateway"), - logger, - ) - if err != nil { - return nil, errors.Wrap(err, "create KV store client") - } - } - - return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, limits, logLevel, logger, reg) -} - -func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, bucketClient objstore.Bucket, ringStore kv.Client, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { - var err error - - g := &StoreGateway{ - gatewayCfg: gatewayCfg, - storageCfg: storageCfg, - logger: logger, - bucketSync: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_storegateway_bucket_sync_total", - Help: "Total number of times the bucket sync operation triggered.", - }, []string{"reason"}), - } - - // Init metrics. - g.bucketSync.WithLabelValues(syncReasonInitial) - g.bucketSync.WithLabelValues(syncReasonPeriodic) - g.bucketSync.WithLabelValues(syncReasonRingChange) - - // Init sharding strategy. - var shardingStrategy ShardingStrategy - - if gatewayCfg.ShardingEnabled { - lifecyclerCfg, err := gatewayCfg.ShardingRing.ToLifecyclerConfig(logger) - if err != nil { - return nil, errors.Wrap(err, "invalid ring lifecycler config") - } - - // Define lifecycler delegates in reverse order (last to be called defined first because they're - // chained via "next delegate"). - delegate := ring.BasicLifecyclerDelegate(g) - delegate = ring.NewLeaveOnStoppingDelegate(delegate, logger) - delegate = ring.NewTokensPersistencyDelegate(gatewayCfg.ShardingRing.TokensFilePath, ring.JOINING, delegate, logger) - delegate = ring.NewAutoForgetDelegate(ringAutoForgetUnhealthyPeriods*gatewayCfg.ShardingRing.HeartbeatTimeout, delegate, logger) - - g.ringLifecycler, err = ring.NewBasicLifecycler(lifecyclerCfg, RingNameForServer, RingKey, ringStore, delegate, logger, prometheus.WrapRegistererWithPrefix("cortex_", reg)) - if err != nil { - return nil, errors.Wrap(err, "create ring lifecycler") - } - - ringCfg := gatewayCfg.ShardingRing.ToRingConfig() - g.ring, err = ring.NewWithStoreClientAndStrategy(ringCfg, RingNameForServer, RingKey, ringStore, ring.NewIgnoreUnhealthyInstancesReplicationStrategy(), prometheus.WrapRegistererWithPrefix("cortex_", reg), logger) - if err != nil { - return nil, errors.Wrap(err, "create ring client") - } - - // Instance the right strategy. - switch gatewayCfg.ShardingStrategy { - case util.ShardingStrategyDefault: - shardingStrategy = NewDefaultShardingStrategy(g.ring, lifecyclerCfg.Addr, logger) - case util.ShardingStrategyShuffle: - shardingStrategy = NewShuffleShardingStrategy(g.ring, lifecyclerCfg.ID, lifecyclerCfg.Addr, limits, logger) - default: - return nil, errInvalidShardingStrategy - } - } else { - shardingStrategy = NewNoShardingStrategy() - } - - g.stores, err = NewBucketStores(storageCfg, shardingStrategy, bucketClient, limits, logLevel, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "store-gateway"}, reg)) - if err != nil { - return nil, errors.Wrap(err, "create bucket stores") - } - - g.Service = services.NewBasicService(g.starting, g.running, g.stopping) - - return g, nil -} - -func (g *StoreGateway) starting(ctx context.Context) (err error) { - // In case this function will return error we want to unregister the instance - // from the ring. We do it ensuring dependencies are gracefully stopped if they - // were already started. - defer func() { - if err == nil || g.subservices == nil { - return - } - - if stopErr := services.StopManagerAndAwaitStopped(context.Background(), g.subservices); stopErr != nil { - level.Error(g.logger).Log("msg", "failed to gracefully stop store-gateway dependencies", "err", stopErr) - } - }() - - if g.gatewayCfg.ShardingEnabled { - // First of all we register the instance in the ring and wait - // until the lifecycler successfully started. - if g.subservices, err = services.NewManager(g.ringLifecycler, g.ring); err != nil { - return errors.Wrap(err, "unable to start store-gateway dependencies") - } - - g.subservicesWatcher = services.NewFailureWatcher() - g.subservicesWatcher.WatchManager(g.subservices) - - if err = services.StartManagerAndAwaitHealthy(ctx, g.subservices); err != nil { - return errors.Wrap(err, "unable to start store-gateway dependencies") - } - - // Wait until the ring client detected this instance in the JOINING state to - // make sure that when we'll run the initial sync we already know the tokens - // assigned to this instance. - level.Info(g.logger).Log("msg", "waiting until store-gateway is JOINING in the ring") - if err := ring.WaitInstanceState(ctx, g.ring, g.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { - return err - } - level.Info(g.logger).Log("msg", "store-gateway is JOINING in the ring") - - // In the event of a cluster cold start or scale up of 2+ store-gateway instances at the same - // time, we may end up in a situation where each new store-gateway instance starts at a slightly - // different time and thus each one starts with a different state of the ring. It's better - // to just wait the ring stability for a short time. - if g.gatewayCfg.ShardingRing.WaitStabilityMinDuration > 0 { - minWaiting := g.gatewayCfg.ShardingRing.WaitStabilityMinDuration - maxWaiting := g.gatewayCfg.ShardingRing.WaitStabilityMaxDuration - - level.Info(g.logger).Log("msg", "waiting until store-gateway ring topology is stable", "min_waiting", minWaiting.String(), "max_waiting", maxWaiting.String()) - if err := ring.WaitRingStability(ctx, g.ring, BlocksOwnerSync, minWaiting, maxWaiting); err != nil { - level.Warn(g.logger).Log("msg", "store-gateway ring topology is not stable after the max waiting time, proceeding anyway") - } else { - level.Info(g.logger).Log("msg", "store-gateway ring topology is stable") - } - } - } - - // At this point, if sharding is enabled, the instance is registered with some tokens - // and we can run the initial synchronization. - g.bucketSync.WithLabelValues(syncReasonInitial).Inc() - if err = g.stores.InitialSync(ctx); err != nil { - return errors.Wrap(err, "initial blocks synchronization") - } - - if g.gatewayCfg.ShardingEnabled { - // Now that the initial sync is done, we should have loaded all blocks - // assigned to our shard, so we can switch to ACTIVE and start serving - // requests. - if err = g.ringLifecycler.ChangeState(ctx, ring.ACTIVE); err != nil { - return errors.Wrapf(err, "switch instance to %s in the ring", ring.ACTIVE) - } - - // Wait until the ring client detected this instance in the ACTIVE state to - // make sure that when we'll run the loop it won't be detected as a ring - // topology change. - level.Info(g.logger).Log("msg", "waiting until store-gateway is ACTIVE in the ring") - if err := ring.WaitInstanceState(ctx, g.ring, g.ringLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { - return err - } - level.Info(g.logger).Log("msg", "store-gateway is ACTIVE in the ring") - } - - return nil -} - -func (g *StoreGateway) running(ctx context.Context) error { - var ringTickerChan <-chan time.Time - var ringLastState ring.ReplicationSet - - // Apply a jitter to the sync frequency in order to increase the probability - // of hitting the shared cache (if any). - syncTicker := time.NewTicker(util.DurationWithJitter(g.storageCfg.BucketStore.SyncInterval, 0.2)) - defer syncTicker.Stop() - - if g.gatewayCfg.ShardingEnabled { - ringLastState, _ = g.ring.GetAllHealthy(BlocksOwnerSync) // nolint:errcheck - ringTicker := time.NewTicker(util.DurationWithJitter(g.gatewayCfg.ShardingRing.RingCheckPeriod, 0.2)) - defer ringTicker.Stop() - ringTickerChan = ringTicker.C - } - - for { - select { - case <-syncTicker.C: - g.syncStores(ctx, syncReasonPeriodic) - case <-ringTickerChan: - // We ignore the error because in case of error it will return an empty - // replication set which we use to compare with the previous state. - currRingState, _ := g.ring.GetAllHealthy(BlocksOwnerSync) // nolint:errcheck - - if ring.HasReplicationSetChanged(ringLastState, currRingState) { - ringLastState = currRingState - g.syncStores(ctx, syncReasonRingChange) - } - case <-ctx.Done(): - return nil - case err := <-g.subservicesWatcher.Chan(): - return errors.Wrap(err, "store gateway subservice failed") - } - } -} - -func (g *StoreGateway) stopping(_ error) error { - if g.subservices != nil { - return services.StopManagerAndAwaitStopped(context.Background(), g.subservices) - } - return nil -} - -func (g *StoreGateway) syncStores(ctx context.Context, reason string) { - level.Info(g.logger).Log("msg", "synchronizing TSDB blocks for all users", "reason", reason) - g.bucketSync.WithLabelValues(reason).Inc() - - if err := g.stores.SyncBlocks(ctx); err != nil { - level.Warn(g.logger).Log("msg", "failed to synchronize TSDB blocks", "reason", reason, "err", err) - } else { - level.Info(g.logger).Log("msg", "successfully synchronized TSDB blocks for all users", "reason", reason) - } -} - -func (g *StoreGateway) Series(req *storepb.SeriesRequest, srv storegatewaypb.StoreGateway_SeriesServer) error { - return g.stores.Series(req, srv) -} - -// LabelNames implements the Storegateway proto service. -func (g *StoreGateway) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { - return g.stores.LabelNames(ctx, req) -} - -// LabelValues implements the Storegateway proto service. -func (g *StoreGateway) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { - return g.stores.LabelValues(ctx, req) -} - -func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { - // When we initialize the store-gateway instance in the ring we want to start from - // a clean situation, so whatever is the state we set it JOINING, while we keep existing - // tokens (if any) or the ones loaded from file. - var tokens []uint32 - if instanceExists { - tokens = instanceDesc.GetTokens() - } - - takenTokens := ringDesc.GetTokens() - newTokens := ring.GenerateTokens(RingNumTokens-len(tokens), takenTokens) - - // Tokens sorting will be enforced by the parent caller. - tokens = append(tokens, newTokens...) - - return ring.JOINING, tokens -} - -func (g *StoreGateway) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} -func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { -} - -func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { - bucketClient, err := bucket.NewClient(context.Background(), cfg.Bucket, "store-gateway", logger, reg) - if err != nil { - return nil, errors.Wrap(err, "create bucket client") - } - - return bucketClient, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go deleted file mode 100644 index a823eb6f4..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go +++ /dev/null @@ -1,53 +0,0 @@ -package storegateway - -import ( - "net/http" - "text/template" - - "github.com/go-kit/log/level" - - util_log "github.com/cortexproject/cortex/pkg/util/log" - "github.com/cortexproject/cortex/pkg/util/services" -) - -var ( - statusPageTemplate = template.Must(template.New("main").Parse(` - - - - - Cortex Store Gateway Ring - - -

Cortex Store Gateway Ring

-

{{ .Message }}

- - `)) -) - -func writeMessage(w http.ResponseWriter, message string) { - w.WriteHeader(http.StatusOK) - err := statusPageTemplate.Execute(w, struct { - Message string - }{Message: message}) - - if err != nil { - level.Error(util_log.Logger).Log("msg", "unable to serve store gateway ring page", "err", err) - } -} - -func (c *StoreGateway) RingHandler(w http.ResponseWriter, req *http.Request) { - if !c.gatewayCfg.ShardingEnabled { - writeMessage(w, "Store gateway has no ring because sharding is disabled.") - return - } - - if c.State() != services.Running { - // we cannot read the ring before the store gateway is in Running state, - // because that would lead to race condition. - writeMessage(w, "Store gateway is not running yet.") - return - } - - c.ring.ServeHTTP(w, req) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go deleted file mode 100644 index f73065b77..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go +++ /dev/null @@ -1,149 +0,0 @@ -package storegateway - -import ( - "flag" - "fmt" - "os" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - - "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util/flagext" - util_log "github.com/cortexproject/cortex/pkg/util/log" -) - -const ( - // RingKey is the key under which we store the store gateways ring in the KVStore. - RingKey = "store-gateway" - - // RingNameForServer is the name of the ring used by the store gateway server. - RingNameForServer = "store-gateway" - - // RingNameForClient is the name of the ring used by the store gateway client (we need - // a different name to avoid clashing Prometheus metrics when running in single-binary). - RingNameForClient = "store-gateway-client" - - // We use a safe default instead of exposing to config option to the user - // in order to simplify the config. - RingNumTokens = 512 -) - -var ( - // BlocksOwnerSync is the operation used to check the authoritative owners of a block - // (replicas included). - BlocksOwnerSync = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE, ring.LEAVING}, func(s ring.InstanceState) bool { - // Extend the replication set only when an instance is LEAVING so that - // their blocks will be loaded sooner on the next authoritative owner(s). - return s == ring.LEAVING - }) - - // BlocksOwnerRead is the operation used to check the authoritative owners of a block - // (replicas included) that are available for queries (a store-gateway is available for - // queries only when ACTIVE). - BlocksOwnerRead = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, nil) - - // BlocksRead is the operation run by the querier to query blocks via the store-gateway. - BlocksRead = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, func(s ring.InstanceState) bool { - // Blocks can only be queried from ACTIVE instances. However, if the block belongs to - // a non-active instance, then we should extend the replication set and try to query it - // from the next ACTIVE instance in the ring (which is expected to have it because a - // store-gateway keeps their previously owned blocks until new owners are ACTIVE). - return s != ring.ACTIVE - }) -) - -// RingConfig masks the ring lifecycler config which contains -// many options not really required by the store gateways ring. This config -// is used to strip down the config to the minimum, and avoid confusion -// to the user. -type RingConfig struct { - KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances. This option needs be set both on the store-gateway and querier when running in microservices mode."` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - ReplicationFactor int `yaml:"replication_factor"` - TokensFilePath string `yaml:"tokens_file_path"` - ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - - // Wait ring stability. - WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` - WaitStabilityMaxDuration time.Duration `yaml:"wait_stability_max_duration"` - - // Instance details - InstanceID string `yaml:"instance_id" doc:"hidden"` - InstanceInterfaceNames []string `yaml:"instance_interface_names"` - InstancePort int `yaml:"instance_port" doc:"hidden"` - InstanceAddr string `yaml:"instance_addr" doc:"hidden"` - InstanceZone string `yaml:"instance_availability_zone"` - - // Injected internally - ListenPort int `yaml:"-"` - RingCheckPeriod time.Duration `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { - hostname, err := os.Hostname() - if err != nil { - level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) - os.Exit(1) - } - - ringFlagsPrefix := "store-gateway.sharding-ring." - - // Ring flags - cfg.KVStore.RegisterFlagsWithPrefix(ringFlagsPrefix, "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, ringFlagsPrefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") - f.DurationVar(&cfg.HeartbeatTimeout, ringFlagsPrefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which store gateways are considered unhealthy within the ring. 0 = never (timeout disabled)."+sharedOptionWithQuerier) - f.IntVar(&cfg.ReplicationFactor, ringFlagsPrefix+"replication-factor", 3, "The replication factor to use when sharding blocks."+sharedOptionWithQuerier) - f.StringVar(&cfg.TokensFilePath, ringFlagsPrefix+"tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") - f.BoolVar(&cfg.ZoneAwarenessEnabled, ringFlagsPrefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate blocks across different availability zones.") - - // Wait stability flags. - f.DurationVar(&cfg.WaitStabilityMinDuration, ringFlagsPrefix+"wait-stability-min-duration", time.Minute, "Minimum time to wait for ring stability at startup. 0 to disable.") - f.DurationVar(&cfg.WaitStabilityMaxDuration, ringFlagsPrefix+"wait-stability-max-duration", 5*time.Minute, "Maximum time to wait for ring stability at startup. If the store-gateway ring keeps changing after this period of time, the store-gateway will start anyway.") - - // Instance flags - cfg.InstanceInterfaceNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), ringFlagsPrefix+"instance-interface-names", "Name of network interface to read address from.") - f.StringVar(&cfg.InstanceAddr, ringFlagsPrefix+"instance-addr", "", "IP address to advertise in the ring.") - f.IntVar(&cfg.InstancePort, ringFlagsPrefix+"instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") - f.StringVar(&cfg.InstanceID, ringFlagsPrefix+"instance-id", hostname, "Instance ID to register in the ring.") - f.StringVar(&cfg.InstanceZone, ringFlagsPrefix+"instance-availability-zone", "", "The availability zone where this instance is running. Required if zone-awareness is enabled.") - - // Defaults for internal settings. - cfg.RingCheckPeriod = 5 * time.Second -} - -func (cfg *RingConfig) ToRingConfig() ring.Config { - rc := ring.Config{} - flagext.DefaultValues(&rc) - - rc.KVStore = cfg.KVStore - rc.HeartbeatTimeout = cfg.HeartbeatTimeout - rc.ReplicationFactor = cfg.ReplicationFactor - rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled - rc.SubringCacheDisabled = true - - return rc -} - -func (cfg *RingConfig) ToLifecyclerConfig(logger log.Logger) (ring.BasicLifecyclerConfig, error) { - instanceAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames, logger) - if err != nil { - return ring.BasicLifecyclerConfig{}, err - } - - instancePort := ring.GetInstancePort(cfg.InstancePort, cfg.ListenPort) - - return ring.BasicLifecyclerConfig{ - ID: cfg.InstanceID, - Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), - Zone: cfg.InstanceZone, - HeartbeatPeriod: cfg.HeartbeatPeriod, - TokensObservePeriod: 0, - NumTokens: RingNumTokens, - }, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go deleted file mode 100644 index a4d6176d5..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go +++ /dev/null @@ -1,78 +0,0 @@ -package storegateway - -import ( - "context" - "time" - - "github.com/go-kit/log" - "github.com/oklog/ulid" - "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" -) - -type MetadataFilterWithBucketIndex interface { - // FilterWithBucketIndex is like Thanos MetadataFilter.Filter() but it provides in input the bucket index too. - FilterWithBucketIndex(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, idx *bucketindex.Index, synced *extprom.TxGaugeVec) error -} - -// IgnoreDeletionMarkFilter is like the Thanos IgnoreDeletionMarkFilter, but it also implements -// the MetadataFilterWithBucketIndex interface. -type IgnoreDeletionMarkFilter struct { - upstream *block.IgnoreDeletionMarkFilter - - delay time.Duration - deletionMarkMap map[ulid.ULID]*metadata.DeletionMark -} - -// NewIgnoreDeletionMarkFilter creates IgnoreDeletionMarkFilter. -func NewIgnoreDeletionMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader, delay time.Duration, concurrency int) *IgnoreDeletionMarkFilter { - return &IgnoreDeletionMarkFilter{ - upstream: block.NewIgnoreDeletionMarkFilter(logger, bkt, delay, concurrency), - delay: delay, - } -} - -// DeletionMarkBlocks returns blocks that were marked for deletion. -func (f *IgnoreDeletionMarkFilter) DeletionMarkBlocks() map[ulid.ULID]*metadata.DeletionMark { - // If the cached deletion marks exist it means the filter function was called with the bucket - // index, so it's safe to return it. - if f.deletionMarkMap != nil { - return f.deletionMarkMap - } - - return f.upstream.DeletionMarkBlocks() -} - -// Filter implements block.MetadataFilter. -func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec) error { - return f.upstream.Filter(ctx, metas, synced) -} - -// FilterWithBucketIndex implements MetadataFilterWithBucketIndex. -func (f *IgnoreDeletionMarkFilter) FilterWithBucketIndex(_ context.Context, metas map[ulid.ULID]*metadata.Meta, idx *bucketindex.Index, synced *extprom.TxGaugeVec) error { - // Build a map of block deletion marks - marks := make(map[ulid.ULID]*metadata.DeletionMark, len(idx.BlockDeletionMarks)) - for _, mark := range idx.BlockDeletionMarks { - marks[mark.ID] = mark.ThanosDeletionMark() - } - - // Keep it cached. - f.deletionMarkMap = marks - - for _, mark := range marks { - if _, ok := metas[mark.ID]; !ok { - continue - } - - if time.Since(time.Unix(mark.DeletionTime, 0)).Seconds() > f.delay.Seconds() { - synced.WithLabelValues(block.MarkedForDeletionMeta).Inc() - delete(metas, mark.ID) - } - } - - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go deleted file mode 100644 index 238be94de..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go +++ /dev/null @@ -1,79 +0,0 @@ -package storegateway - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/cortexproject/cortex/pkg/util" -) - -// This struct aggregates metrics exported by Thanos MetaFetcher -// and re-exports those aggregates as Cortex metrics. -type MetadataFetcherMetrics struct { - regs *util.UserRegistries - - // Exported metrics, gathered from Thanos MetaFetcher - syncs *prometheus.Desc - syncFailures *prometheus.Desc - syncDuration *prometheus.Desc - syncConsistencyDelay *prometheus.Desc - synced *prometheus.Desc - - // Ignored: - // blocks_meta_modified - // blocks_meta_base_syncs_total -} - -func NewMetadataFetcherMetrics() *MetadataFetcherMetrics { - return &MetadataFetcherMetrics{ - regs: util.NewUserRegistries(), - - // When mapping new metadata fetcher metrics from Thanos, please remember to add these metrics - // to our internal fetcherMetrics implementation too. - syncs: prometheus.NewDesc( - "cortex_blocks_meta_syncs_total", - "Total blocks metadata synchronization attempts", - nil, nil), - syncFailures: prometheus.NewDesc( - "cortex_blocks_meta_sync_failures_total", - "Total blocks metadata synchronization failures", - nil, nil), - syncDuration: prometheus.NewDesc( - "cortex_blocks_meta_sync_duration_seconds", - "Duration of the blocks metadata synchronization in seconds", - nil, nil), - syncConsistencyDelay: prometheus.NewDesc( - "cortex_blocks_meta_sync_consistency_delay_seconds", - "Configured consistency delay in seconds.", - nil, nil), - synced: prometheus.NewDesc( - "cortex_blocks_meta_synced", - "Reflects current state of synced blocks (over all tenants).", - []string{"state"}, nil), - } -} - -func (m *MetadataFetcherMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { - m.regs.AddUserRegistry(user, reg) -} - -func (m *MetadataFetcherMetrics) RemoveUserRegistry(user string) { - m.regs.RemoveUserRegistry(user, false) -} - -func (m *MetadataFetcherMetrics) Describe(out chan<- *prometheus.Desc) { - out <- m.syncs - out <- m.syncFailures - out <- m.syncDuration - out <- m.syncConsistencyDelay - out <- m.synced -} - -func (m *MetadataFetcherMetrics) Collect(out chan<- prometheus.Metric) { - data := m.regs.BuildMetricFamiliesPerUser() - - data.SendSumOfCounters(out, m.syncs, "blocks_meta_syncs_total") - data.SendSumOfCounters(out, m.syncFailures, "blocks_meta_sync_failures_total") - data.SendSumOfHistograms(out, m.syncDuration, "blocks_meta_sync_duration_seconds") - data.SendMaxOfGauges(out, m.syncConsistencyDelay, "consistency_delay_seconds") - data.SendSumOfGaugesWithLabels(out, m.synced, "blocks_meta_synced", "state") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/partitioner.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/partitioner.go deleted file mode 100644 index 816a45d8a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/partitioner.go +++ /dev/null @@ -1,64 +0,0 @@ -package storegateway - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/store" -) - -type gapBasedPartitioner struct { - upstream store.Partitioner - - // Metrics. - requestedBytes prometheus.Counter - requestedRanges prometheus.Counter - expandedBytes prometheus.Counter - expandedRanges prometheus.Counter -} - -func newGapBasedPartitioner(maxGapBytes uint64, reg prometheus.Registerer) *gapBasedPartitioner { - return &gapBasedPartitioner{ - upstream: store.NewGapBasedPartitioner(maxGapBytes), - requestedBytes: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_bucket_store_partitioner_requested_bytes_total", - Help: "Total size of byte ranges required to fetch from the storage before they are passed to the partitioner.", - }), - expandedBytes: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_bucket_store_partitioner_expanded_bytes_total", - Help: "Total size of byte ranges returned by the partitioner after they've been combined together to reduce the number of bucket API calls.", - }), - requestedRanges: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_bucket_store_partitioner_requested_ranges_total", - Help: "Total number of byte ranges required to fetch from the storage before they are passed to the partitioner.", - }), - expandedRanges: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "cortex_bucket_store_partitioner_expanded_ranges_total", - Help: "Total number of byte ranges returned by the partitioner after they've been combined together to reduce the number of bucket API calls.", - }), - } -} - -func (p *gapBasedPartitioner) Partition(length int, rng func(int) (uint64, uint64)) []store.Part { - // Calculate the size of requested ranges. - requestedBytes := uint64(0) - for i := 0; i < length; i++ { - start, end := rng(i) - requestedBytes += end - start - } - - // Run the upstream partitioner to compute the actual ranges that will be fetched. - parts := p.upstream.Partition(length, rng) - - // Calculate the size of ranges that will be fetched. - expandedBytes := uint64(0) - for _, p := range parts { - expandedBytes += p.End - p.Start - } - - p.requestedBytes.Add(float64(requestedBytes)) - p.expandedBytes.Add(float64(expandedBytes)) - p.requestedRanges.Add(float64(length)) - p.expandedRanges.Add(float64(len(parts))) - - return parts -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go deleted file mode 100644 index 64f0f19de..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go +++ /dev/null @@ -1,244 +0,0 @@ -package storegateway - -import ( - "context" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/ring" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" -) - -const ( - shardExcludedMeta = "shard-excluded" -) - -type ShardingStrategy interface { - // FilterUsers whose blocks should be loaded by the store-gateway. Returns the list of user IDs - // that should be synced by the store-gateway. - FilterUsers(ctx context.Context, userIDs []string) []string - - // FilterBlocks filters metas in-place keeping only blocks that should be loaded by the store-gateway. - // The provided loaded map contains blocks which have been previously returned by this function and - // are now loaded or loading in the store-gateway. - FilterBlocks(ctx context.Context, userID string, metas map[ulid.ULID]*metadata.Meta, loaded map[ulid.ULID]struct{}, synced *extprom.TxGaugeVec) error -} - -// ShardingLimits is the interface that should be implemented by the limits provider, -// limiting the scope of the limits to the ones required by sharding strategies. -type ShardingLimits interface { - StoreGatewayTenantShardSize(userID string) int -} - -// NoShardingStrategy is a no-op strategy. When this strategy is used, no tenant/block is filtered out. -type NoShardingStrategy struct{} - -func NewNoShardingStrategy() *NoShardingStrategy { - return &NoShardingStrategy{} -} - -func (s *NoShardingStrategy) FilterUsers(_ context.Context, userIDs []string) []string { - return userIDs -} - -func (s *NoShardingStrategy) FilterBlocks(_ context.Context, _ string, _ map[ulid.ULID]*metadata.Meta, _ map[ulid.ULID]struct{}, _ *extprom.TxGaugeVec) error { - return nil -} - -// DefaultShardingStrategy is a sharding strategy based on the hash ring formed by store-gateways. -// Not go-routine safe. -type DefaultShardingStrategy struct { - r *ring.Ring - instanceAddr string - logger log.Logger -} - -// NewDefaultShardingStrategy creates DefaultShardingStrategy. -func NewDefaultShardingStrategy(r *ring.Ring, instanceAddr string, logger log.Logger) *DefaultShardingStrategy { - return &DefaultShardingStrategy{ - r: r, - instanceAddr: instanceAddr, - logger: logger, - } -} - -// FilterUsers implements ShardingStrategy. -func (s *DefaultShardingStrategy) FilterUsers(_ context.Context, userIDs []string) []string { - return userIDs -} - -// FilterBlocks implements ShardingStrategy. -func (s *DefaultShardingStrategy) FilterBlocks(_ context.Context, _ string, metas map[ulid.ULID]*metadata.Meta, loaded map[ulid.ULID]struct{}, synced *extprom.TxGaugeVec) error { - filterBlocksByRingSharding(s.r, s.instanceAddr, metas, loaded, synced, s.logger) - return nil -} - -// ShuffleShardingStrategy is a shuffle sharding strategy, based on the hash ring formed by store-gateways, -// where each tenant blocks are sharded across a subset of store-gateway instances. -type ShuffleShardingStrategy struct { - r *ring.Ring - instanceID string - instanceAddr string - limits ShardingLimits - logger log.Logger -} - -// NewShuffleShardingStrategy makes a new ShuffleShardingStrategy. -func NewShuffleShardingStrategy(r *ring.Ring, instanceID, instanceAddr string, limits ShardingLimits, logger log.Logger) *ShuffleShardingStrategy { - return &ShuffleShardingStrategy{ - r: r, - instanceID: instanceID, - instanceAddr: instanceAddr, - limits: limits, - logger: logger, - } -} - -// FilterUsers implements ShardingStrategy. -func (s *ShuffleShardingStrategy) FilterUsers(_ context.Context, userIDs []string) []string { - var filteredIDs []string - - for _, userID := range userIDs { - subRing := GetShuffleShardingSubring(s.r, userID, s.limits) - - // Include the user only if it belongs to this store-gateway shard. - if subRing.HasInstance(s.instanceID) { - filteredIDs = append(filteredIDs, userID) - } - } - - return filteredIDs -} - -// FilterBlocks implements ShardingStrategy. -func (s *ShuffleShardingStrategy) FilterBlocks(_ context.Context, userID string, metas map[ulid.ULID]*metadata.Meta, loaded map[ulid.ULID]struct{}, synced *extprom.TxGaugeVec) error { - subRing := GetShuffleShardingSubring(s.r, userID, s.limits) - filterBlocksByRingSharding(subRing, s.instanceAddr, metas, loaded, synced, s.logger) - return nil -} - -func filterBlocksByRingSharding(r ring.ReadRing, instanceAddr string, metas map[ulid.ULID]*metadata.Meta, loaded map[ulid.ULID]struct{}, synced *extprom.TxGaugeVec, logger log.Logger) { - bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet() - - for blockID := range metas { - key := cortex_tsdb.HashBlockID(blockID) - - // Check if the block is owned by the store-gateway - set, err := r.Get(key, BlocksOwnerSync, bufDescs, bufHosts, bufZones) - - // If an error occurs while checking the ring, we keep the previously loaded blocks. - if err != nil { - if _, ok := loaded[blockID]; ok { - level.Warn(logger).Log("msg", "failed to check block owner but block is kept because was previously loaded", "block", blockID.String(), "err", err) - } else { - level.Warn(logger).Log("msg", "failed to check block owner and block has been excluded because was not previously loaded", "block", blockID.String(), "err", err) - - // Skip the block. - synced.WithLabelValues(shardExcludedMeta).Inc() - delete(metas, blockID) - } - - continue - } - - // Keep the block if it is owned by the store-gateway. - if set.Includes(instanceAddr) { - continue - } - - // The block is not owned by the store-gateway. However, if it's currently loaded - // we can safely unload it only once at least 1 authoritative owner is available - // for queries. - if _, ok := loaded[blockID]; ok { - // The ring Get() returns an error if there's no available instance. - if _, err := r.Get(key, BlocksOwnerRead, bufDescs, bufHosts, bufZones); err != nil { - // Keep the block. - continue - } - } - - // The block is not owned by the store-gateway and there's at least 1 available - // authoritative owner available for queries, so we can filter it out (and unload - // it if it was loaded). - synced.WithLabelValues(shardExcludedMeta).Inc() - delete(metas, blockID) - } -} - -// GetShuffleShardingSubring returns the subring to be used for a given user. This function -// should be used both by store-gateway and querier in order to guarantee the same logic is used. -func GetShuffleShardingSubring(ring *ring.Ring, userID string, limits ShardingLimits) ring.ReadRing { - shardSize := limits.StoreGatewayTenantShardSize(userID) - - // A shard size of 0 means shuffle sharding is disabled for this specific user, - // so we just return the full ring so that blocks will be sharded across all store-gateways. - if shardSize <= 0 { - return ring - } - - return ring.ShuffleShard(userID, shardSize) -} - -type shardingMetadataFilterAdapter struct { - userID string - strategy ShardingStrategy - - // Keep track of the last blocks returned by the Filter() function. - lastBlocks map[ulid.ULID]struct{} -} - -func NewShardingMetadataFilterAdapter(userID string, strategy ShardingStrategy) block.MetadataFilter { - return &shardingMetadataFilterAdapter{ - userID: userID, - strategy: strategy, - lastBlocks: map[ulid.ULID]struct{}{}, - } -} - -// Filter implements block.MetadataFilter. -// This function is NOT safe for use by multiple goroutines concurrently. -func (a *shardingMetadataFilterAdapter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec) error { - if err := a.strategy.FilterBlocks(ctx, a.userID, metas, a.lastBlocks, synced); err != nil { - return err - } - - // Keep track of the last filtered blocks. - a.lastBlocks = make(map[ulid.ULID]struct{}, len(metas)) - for blockID := range metas { - a.lastBlocks[blockID] = struct{}{} - } - - return nil -} - -type shardingBucketReaderAdapter struct { - objstore.InstrumentedBucketReader - - userID string - strategy ShardingStrategy -} - -func NewShardingBucketReaderAdapter(userID string, strategy ShardingStrategy, wrapped objstore.InstrumentedBucketReader) objstore.InstrumentedBucketReader { - return &shardingBucketReaderAdapter{ - InstrumentedBucketReader: wrapped, - userID: userID, - strategy: strategy, - } -} - -// Iter implements objstore.BucketReader. -func (a *shardingBucketReaderAdapter) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - // Skip iterating the bucket if the tenant doesn't belong to the shard. From the caller - // perspective, this will look like the tenant has no blocks in the storage. - if len(a.strategy.FilterUsers(ctx, []string{a.userID})) == 0 { - return nil - } - - return a.InstrumentedBucketReader.Iter(ctx, dir, f, options...) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go deleted file mode 100644 index fa5913faf..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gateway.proto - -package storegatewaypb - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - storepb "github.com/thanos-io/thanos/pkg/store/storepb" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } - -var fileDescriptor_f1a937782ebbded5 = []byte{ - // 257 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x4f, 0x2c, 0x49, - 0x2d, 0x4f, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x72, 0x0b, 0x92, 0xa4, - 0xcc, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x4b, 0x32, 0x12, 0xf3, - 0xf2, 0x8b, 0x75, 0x33, 0xf3, 0xa1, 0x2c, 0xfd, 0x82, 0xec, 0x74, 0xfd, 0xe2, 0x92, 0xfc, 0xa2, - 0x54, 0x08, 0x59, 0x90, 0xa4, 0x5f, 0x54, 0x90, 0x0c, 0x31, 0xc3, 0xe8, 0x1a, 0x23, 0x17, 0x4f, - 0x30, 0x48, 0xd4, 0x1d, 0x62, 0x96, 0x90, 0x25, 0x17, 0x5b, 0x70, 0x6a, 0x51, 0x66, 0x6a, 0xb1, - 0x90, 0xa8, 0x1e, 0x44, 0xbf, 0x1e, 0x84, 0x1f, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, - 0x86, 0x2e, 0x5c, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x6a, 0xc0, 0x28, 0xe4, 0xcc, 0xc5, 0xe5, 0x93, - 0x98, 0x94, 0x9a, 0xe3, 0x97, 0x98, 0x9b, 0x5a, 0x2c, 0x24, 0x09, 0x53, 0x87, 0x10, 0x83, 0x19, - 0x21, 0x85, 0x4d, 0x0a, 0x62, 0x8c, 0x90, 0x1b, 0x17, 0x37, 0x58, 0x34, 0x2c, 0x31, 0xa7, 0x34, - 0xb5, 0x58, 0x08, 0x55, 0x29, 0x44, 0x10, 0x66, 0x8c, 0x34, 0x56, 0x39, 0x88, 0x39, 0x4e, 0x2e, - 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, - 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, - 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, - 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xf8, 0xc0, 0x21, 0x04, 0x0f, 0xd7, 0x24, 0x36, - 0x70, 0x28, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xec, 0xe6, 0x0a, 0x7a, 0x01, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// StoreGatewayClient is the client API for StoreGateway service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type StoreGatewayClient interface { - // Series streams each Series for given label matchers and time range. - // - // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain - // partition of the single series, but once a new series is started to be streamed it means that no more data will - // be sent for previous one. - // - // Series are sorted. - Series(ctx context.Context, in *storepb.SeriesRequest, opts ...grpc.CallOption) (StoreGateway_SeriesClient, error) - // LabelNames returns all label names that is available. - LabelNames(ctx context.Context, in *storepb.LabelNamesRequest, opts ...grpc.CallOption) (*storepb.LabelNamesResponse, error) - // LabelValues returns all label values for given label name. - LabelValues(ctx context.Context, in *storepb.LabelValuesRequest, opts ...grpc.CallOption) (*storepb.LabelValuesResponse, error) -} - -type storeGatewayClient struct { - cc *grpc.ClientConn -} - -func NewStoreGatewayClient(cc *grpc.ClientConn) StoreGatewayClient { - return &storeGatewayClient{cc} -} - -func (c *storeGatewayClient) Series(ctx context.Context, in *storepb.SeriesRequest, opts ...grpc.CallOption) (StoreGateway_SeriesClient, error) { - stream, err := c.cc.NewStream(ctx, &_StoreGateway_serviceDesc.Streams[0], "/gatewaypb.StoreGateway/Series", opts...) - if err != nil { - return nil, err - } - x := &storeGatewaySeriesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type StoreGateway_SeriesClient interface { - Recv() (*storepb.SeriesResponse, error) - grpc.ClientStream -} - -type storeGatewaySeriesClient struct { - grpc.ClientStream -} - -func (x *storeGatewaySeriesClient) Recv() (*storepb.SeriesResponse, error) { - m := new(storepb.SeriesResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storeGatewayClient) LabelNames(ctx context.Context, in *storepb.LabelNamesRequest, opts ...grpc.CallOption) (*storepb.LabelNamesResponse, error) { - out := new(storepb.LabelNamesResponse) - err := c.cc.Invoke(ctx, "/gatewaypb.StoreGateway/LabelNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storeGatewayClient) LabelValues(ctx context.Context, in *storepb.LabelValuesRequest, opts ...grpc.CallOption) (*storepb.LabelValuesResponse, error) { - out := new(storepb.LabelValuesResponse) - err := c.cc.Invoke(ctx, "/gatewaypb.StoreGateway/LabelValues", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// StoreGatewayServer is the server API for StoreGateway service. -type StoreGatewayServer interface { - // Series streams each Series for given label matchers and time range. - // - // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain - // partition of the single series, but once a new series is started to be streamed it means that no more data will - // be sent for previous one. - // - // Series are sorted. - Series(*storepb.SeriesRequest, StoreGateway_SeriesServer) error - // LabelNames returns all label names that is available. - LabelNames(context.Context, *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) - // LabelValues returns all label values for given label name. - LabelValues(context.Context, *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) -} - -// UnimplementedStoreGatewayServer can be embedded to have forward compatible implementations. -type UnimplementedStoreGatewayServer struct { -} - -func (*UnimplementedStoreGatewayServer) Series(req *storepb.SeriesRequest, srv StoreGateway_SeriesServer) error { - return status.Errorf(codes.Unimplemented, "method Series not implemented") -} -func (*UnimplementedStoreGatewayServer) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") -} -func (*UnimplementedStoreGatewayServer) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") -} - -func RegisterStoreGatewayServer(s *grpc.Server, srv StoreGatewayServer) { - s.RegisterService(&_StoreGateway_serviceDesc, srv) -} - -func _StoreGateway_Series_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(storepb.SeriesRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StoreGatewayServer).Series(m, &storeGatewaySeriesServer{stream}) -} - -type StoreGateway_SeriesServer interface { - Send(*storepb.SeriesResponse) error - grpc.ServerStream -} - -type storeGatewaySeriesServer struct { - grpc.ServerStream -} - -func (x *storeGatewaySeriesServer) Send(m *storepb.SeriesResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _StoreGateway_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(storepb.LabelNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StoreGatewayServer).LabelNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/gatewaypb.StoreGateway/LabelNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StoreGatewayServer).LabelNames(ctx, req.(*storepb.LabelNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StoreGateway_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(storepb.LabelValuesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StoreGatewayServer).LabelValues(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/gatewaypb.StoreGateway/LabelValues", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StoreGatewayServer).LabelValues(ctx, req.(*storepb.LabelValuesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _StoreGateway_serviceDesc = grpc.ServiceDesc{ - ServiceName: "gatewaypb.StoreGateway", - HandlerType: (*StoreGatewayServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LabelNames", - Handler: _StoreGateway_LabelNames_Handler, - }, - { - MethodName: "LabelValues", - Handler: _StoreGateway_LabelValues_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Series", - Handler: _StoreGateway_Series_Handler, - ServerStreams: true, - }, - }, - Metadata: "gateway.proto", -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto deleted file mode 100644 index 14e65859c..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; -package gatewaypb; - -import "github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto"; - -option go_package = "storegatewaypb"; - -service StoreGateway { - // Series streams each Series for given label matchers and time range. - // - // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain - // partition of the single series, but once a new series is started to be streamed it means that no more data will - // be sent for previous one. - // - // Series are sorted. - rpc Series(thanos.SeriesRequest) returns (stream thanos.SeriesResponse); - - // LabelNames returns all label names that is available. - rpc LabelNames(thanos.LabelNamesRequest) returns (thanos.LabelNamesResponse); - - // LabelValues returns all label values for given label name. - rpc LabelValues(thanos.LabelValuesRequest) returns (thanos.LabelValuesResponse); -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/chunkcompat/compat.go b/vendor/github.com/cortexproject/cortex/pkg/util/chunkcompat/compat.go deleted file mode 100644 index 8021497e4..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/chunkcompat/compat.go +++ /dev/null @@ -1,101 +0,0 @@ -package chunkcompat - -import ( - "bytes" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/cortexproject/cortex/pkg/chunk" - prom_chunk "github.com/cortexproject/cortex/pkg/chunk/encoding" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util" -) - -// StreamsToMatrix converts a slice of QueryStreamResponse to a model.Matrix. -func StreamsToMatrix(from, through model.Time, responses []*client.QueryStreamResponse) (model.Matrix, error) { - result := model.Matrix{} - for _, response := range responses { - series, err := SeriesChunksToMatrix(from, through, response.Chunkseries) - if err != nil { - return nil, err - } - - result = append(result, series...) - } - return result, nil -} - -// SeriesChunksToMatrix converts slice of []client.TimeSeriesChunk to a model.Matrix. -func SeriesChunksToMatrix(from, through model.Time, serieses []client.TimeSeriesChunk) (model.Matrix, error) { - if serieses == nil { - return nil, nil - } - - result := model.Matrix{} - for _, series := range serieses { - metric := cortexpb.FromLabelAdaptersToMetric(series.Labels) - chunks, err := FromChunks("", cortexpb.FromLabelAdaptersToLabels(series.Labels), series.Chunks) - if err != nil { - return nil, err - } - - samples := []model.SamplePair{} - for _, chunk := range chunks { - ss, err := chunk.Samples(from, through) - if err != nil { - return nil, err - } - samples = util.MergeSampleSets(samples, ss) - } - - result = append(result, &model.SampleStream{ - Metric: metric, - Values: samples, - }) - } - return result, nil -} - -// FromChunks converts []client.Chunk to []chunk.Chunk. -func FromChunks(userID string, metric labels.Labels, in []client.Chunk) ([]chunk.Chunk, error) { - out := make([]chunk.Chunk, 0, len(in)) - for _, i := range in { - o, err := prom_chunk.NewForEncoding(prom_chunk.Encoding(byte(i.Encoding))) - if err != nil { - return nil, err - } - - if err := o.UnmarshalFromBuf(i.Data); err != nil { - return nil, err - } - - firstTime, lastTime := model.Time(i.StartTimestampMs), model.Time(i.EndTimestampMs) - // As the lifetime of this chunk is scopes to this request, we don't need - // to supply a fingerprint. - out = append(out, chunk.NewChunk(userID, 0, metric, o, firstTime, lastTime)) - } - return out, nil -} - -// ToChunks converts []chunk.Chunk to []client.Chunk. -func ToChunks(in []chunk.Chunk) ([]client.Chunk, error) { - out := make([]client.Chunk, 0, len(in)) - for _, i := range in { - wireChunk := client.Chunk{ - StartTimestampMs: int64(i.From), - EndTimestampMs: int64(i.Through), - Encoding: int32(i.Data.Encoding()), - } - - buf := bytes.NewBuffer(make([]byte, 0, prom_chunk.ChunkLen)) - if err := i.Data.Marshal(buf); err != nil { - return nil, err - } - - wireChunk.Data = buf.Bytes() - out = append(out, wireChunk) - } - return out, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go deleted file mode 100644 index 623b9a707..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go +++ /dev/null @@ -1,26 +0,0 @@ -package concurrency - -import ( - "bytes" - "sync" -) - -// SyncBuffer is a io.writer implementation with atomic writes. It only keeps data in memory. -type SyncBuffer struct { - mu sync.Mutex - buf bytes.Buffer -} - -func (sb *SyncBuffer) Write(p []byte) (n int, err error) { - sb.mu.Lock() - defer sb.mu.Unlock() - - return sb.buf.Write(p) -} - -func (sb *SyncBuffer) String() string { - sb.mu.Lock() - defer sb.mu.Unlock() - - return sb.buf.String() -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go deleted file mode 100644 index 5f5078cc8..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go +++ /dev/null @@ -1,106 +0,0 @@ -package concurrency - -import ( - "context" - "sync" - - "golang.org/x/sync/errgroup" - - "github.com/cortexproject/cortex/pkg/util/math" - "github.com/cortexproject/cortex/pkg/util/multierror" -) - -// ForEachUser runs the provided userFunc for each userIDs up to concurrency concurrent workers. -// In case userFunc returns error, it will continue to process remaining users but returns an -// error with all errors userFunc has returned. -func ForEachUser(ctx context.Context, userIDs []string, concurrency int, userFunc func(ctx context.Context, userID string) error) error { - if len(userIDs) == 0 { - return nil - } - - // Push all jobs to a channel. - ch := make(chan string, len(userIDs)) - for _, userID := range userIDs { - ch <- userID - } - close(ch) - - // Keep track of all errors occurred. - errs := multierror.MultiError{} - errsMx := sync.Mutex{} - - wg := sync.WaitGroup{} - for ix := 0; ix < math.Min(concurrency, len(userIDs)); ix++ { - wg.Add(1) - go func() { - defer wg.Done() - - for userID := range ch { - // Ensure the context has not been canceled (ie. shutdown has been triggered). - if ctx.Err() != nil { - break - } - - if err := userFunc(ctx, userID); err != nil { - errsMx.Lock() - errs.Add(err) - errsMx.Unlock() - } - } - }() - } - - // wait for ongoing workers to finish. - wg.Wait() - - if ctx.Err() != nil { - return ctx.Err() - } - - return errs.Err() -} - -// ForEach runs the provided jobFunc for each job up to concurrency concurrent workers. -// The execution breaks on first error encountered. -func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc func(ctx context.Context, job interface{}) error) error { - if len(jobs) == 0 { - return nil - } - - // Push all jobs to a channel. - ch := make(chan interface{}, len(jobs)) - for _, job := range jobs { - ch <- job - } - close(ch) - - // Start workers to process jobs. - g, ctx := errgroup.WithContext(ctx) - for ix := 0; ix < math.Min(concurrency, len(jobs)); ix++ { - g.Go(func() error { - for job := range ch { - if err := ctx.Err(); err != nil { - return err - } - - if err := jobFunc(ctx, job); err != nil { - return err - } - } - - return nil - }) - } - - // Wait until done (or context has canceled). - return g.Wait() -} - -// CreateJobsFromStrings is an utility to create jobs from an slice of strings. -func CreateJobsFromStrings(values []string) []interface{} { - jobs := make([]interface{}, len(values)) - for i := 0; i < len(values); i++ { - jobs[i] = values[i] - } - return jobs -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go b/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go deleted file mode 100644 index ee850e804..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go +++ /dev/null @@ -1,78 +0,0 @@ -// Package fakeauth provides middlewares thats injects a fake userID, so the rest of the code -// can continue to be multitenant. -package fakeauth - -import ( - "context" - "net/http" - - "github.com/weaveworks/common/middleware" - "github.com/weaveworks/common/server" - "github.com/weaveworks/common/user" - "google.golang.org/grpc" -) - -// SetupAuthMiddleware for the given server config. -func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []string) middleware.Interface { - if enabled { - ignoredMethods := map[string]bool{} - for _, m := range noGRPCAuthOn { - ignoredMethods[m] = true - } - - config.GRPCMiddleware = append(config.GRPCMiddleware, func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - if ignoredMethods[info.FullMethod] { - return handler(ctx, req) - } - return middleware.ServerUserHeaderInterceptor(ctx, req, info, handler) - }) - - config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, - func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if ignoredMethods[info.FullMethod] { - return handler(srv, ss) - } - return middleware.StreamServerUserHeaderInterceptor(srv, ss, info, handler) - }, - ) - - return middleware.AuthenticateUser - } - - config.GRPCMiddleware = append(config.GRPCMiddleware, - fakeGRPCAuthUniaryMiddleware, - ) - config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, - fakeGRPCAuthStreamMiddleware, - ) - return fakeHTTPAuthMiddleware -} - -var fakeHTTPAuthMiddleware = middleware.Func(func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := user.InjectOrgID(r.Context(), "fake") - next.ServeHTTP(w, r.WithContext(ctx)) - }) -}) - -var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - ctx = user.InjectOrgID(ctx, "fake") - return handler(ctx, req) -} - -var fakeGRPCAuthStreamMiddleware = func(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - ctx := user.InjectOrgID(ss.Context(), "fake") - return handler(srv, serverStream{ - ctx: ctx, - ServerStream: ss, - }) -} - -type serverStream struct { - ctx context.Context - grpc.ServerStream -} - -func (ss serverStream) Context() context.Context { - return ss.ctx -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/carrier.go b/vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/carrier.go deleted file mode 100644 index 910b4898b..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/carrier.go +++ /dev/null @@ -1,40 +0,0 @@ -package httpgrpcutil - -import ( - "github.com/opentracing/opentracing-go" - "github.com/weaveworks/common/httpgrpc" -) - -// Used to transfer trace information from/to HTTP request. -type HttpgrpcHeadersCarrier httpgrpc.HTTPRequest - -func (c *HttpgrpcHeadersCarrier) Set(key, val string) { - c.Headers = append(c.Headers, &httpgrpc.Header{ - Key: key, - Values: []string{val}, - }) -} - -func (c *HttpgrpcHeadersCarrier) ForeachKey(handler func(key, val string) error) error { - for _, h := range c.Headers { - for _, v := range h.Values { - if err := handler(h.Key, v); err != nil { - return err - } - } - } - return nil -} - -func GetParentSpanForRequest(tracer opentracing.Tracer, req *httpgrpc.HTTPRequest) (opentracing.SpanContext, error) { - if tracer == nil { - return nil, nil - } - - carrier := (*HttpgrpcHeadersCarrier)(req) - extracted, err := tracer.Extract(opentracing.HTTPHeaders, carrier) - if err == opentracing.ErrSpanContextNotFound { - err = nil - } - return extracted, err -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/limiter/query_limiter.go b/vendor/github.com/cortexproject/cortex/pkg/util/limiter/query_limiter.go deleted file mode 100644 index b5c33c62e..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/limiter/query_limiter.go +++ /dev/null @@ -1,110 +0,0 @@ -package limiter - -import ( - "context" - "fmt" - "sync" - - "github.com/prometheus/common/model" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/ingester/client" -) - -type queryLimiterCtxKey struct{} - -var ( - ctxKey = &queryLimiterCtxKey{} - ErrMaxSeriesHit = "the query hit the max number of series limit (limit: %d series)" - ErrMaxChunkBytesHit = "the query hit the aggregated chunks size limit (limit: %d bytes)" - ErrMaxChunksPerQueryLimit = "the query hit the max number of chunks limit (limit: %d chunks)" -) - -type QueryLimiter struct { - uniqueSeriesMx sync.Mutex - uniqueSeries map[model.Fingerprint]struct{} - - chunkBytesCount atomic.Int64 - chunkCount atomic.Int64 - - maxSeriesPerQuery int - maxChunkBytesPerQuery int - maxChunksPerQuery int -} - -// NewQueryLimiter makes a new per-query limiter. Each query limiter -// is configured using the `maxSeriesPerQuery` limit. -func NewQueryLimiter(maxSeriesPerQuery, maxChunkBytesPerQuery int, maxChunksPerQuery int) *QueryLimiter { - return &QueryLimiter{ - uniqueSeriesMx: sync.Mutex{}, - uniqueSeries: map[model.Fingerprint]struct{}{}, - - maxSeriesPerQuery: maxSeriesPerQuery, - maxChunkBytesPerQuery: maxChunkBytesPerQuery, - maxChunksPerQuery: maxChunksPerQuery, - } -} - -func AddQueryLimiterToContext(ctx context.Context, limiter *QueryLimiter) context.Context { - return context.WithValue(ctx, ctxKey, limiter) -} - -// QueryLimiterFromContextWithFallback returns a QueryLimiter from the current context. -// If there is not a QueryLimiter on the context it will return a new no-op limiter. -func QueryLimiterFromContextWithFallback(ctx context.Context) *QueryLimiter { - ql, ok := ctx.Value(ctxKey).(*QueryLimiter) - if !ok { - // If there's no limiter return a new unlimited limiter as a fallback - ql = NewQueryLimiter(0, 0, 0) - } - return ql -} - -// AddSeries adds the input series and returns an error if the limit is reached. -func (ql *QueryLimiter) AddSeries(seriesLabels []cortexpb.LabelAdapter) error { - // If the max series is unlimited just return without managing map - if ql.maxSeriesPerQuery == 0 { - return nil - } - fingerprint := client.FastFingerprint(seriesLabels) - - ql.uniqueSeriesMx.Lock() - defer ql.uniqueSeriesMx.Unlock() - - ql.uniqueSeries[fingerprint] = struct{}{} - if len(ql.uniqueSeries) > ql.maxSeriesPerQuery { - // Format error with max limit - return fmt.Errorf(ErrMaxSeriesHit, ql.maxSeriesPerQuery) - } - return nil -} - -// uniqueSeriesCount returns the count of unique series seen by this query limiter. -func (ql *QueryLimiter) uniqueSeriesCount() int { - ql.uniqueSeriesMx.Lock() - defer ql.uniqueSeriesMx.Unlock() - return len(ql.uniqueSeries) -} - -// AddChunkBytes adds the input chunk size in bytes and returns an error if the limit is reached. -func (ql *QueryLimiter) AddChunkBytes(chunkSizeInBytes int) error { - if ql.maxChunkBytesPerQuery == 0 { - return nil - } - if ql.chunkBytesCount.Add(int64(chunkSizeInBytes)) > int64(ql.maxChunkBytesPerQuery) { - return fmt.Errorf(ErrMaxChunkBytesHit, ql.maxChunkBytesPerQuery) - } - return nil -} - -func (ql *QueryLimiter) AddChunks(count int) error { - if ql.maxChunksPerQuery == 0 { - return nil - } - - if ql.chunkCount.Add(int64(count)) > int64(ql.maxChunksPerQuery) { - return fmt.Errorf(fmt.Sprintf(ErrMaxChunksPerQueryLimit, ql.maxChunksPerQuery)) - } - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go b/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go deleted file mode 100644 index 48fc6a426..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go +++ /dev/null @@ -1,122 +0,0 @@ -package limiter - -import ( - "sync" - "time" - - "golang.org/x/time/rate" -) - -// RateLimiterStrategy defines the interface which a pluggable strategy should -// implement. The returned limit and burst can change over the time, and the -// local rate limiter will apply them every recheckPeriod. -type RateLimiterStrategy interface { - Limit(tenantID string) float64 - Burst(tenantID string) int -} - -// RateLimiter is a multi-tenant local rate limiter based on golang.org/x/time/rate. -// It requires a custom strategy in input, which is used to get the limit and burst -// settings for each tenant. -type RateLimiter struct { - strategy RateLimiterStrategy - recheckPeriod time.Duration - - tenantsLock sync.RWMutex - tenants map[string]*tenantLimiter -} - -type tenantLimiter struct { - limiter *rate.Limiter - recheckAt time.Time -} - -// NewRateLimiter makes a new multi-tenant rate limiter. Each per-tenant limiter -// is configured using the input strategy and its limit/burst is rechecked (and -// reconfigured if changed) every recheckPeriod. -func NewRateLimiter(strategy RateLimiterStrategy, recheckPeriod time.Duration) *RateLimiter { - return &RateLimiter{ - strategy: strategy, - recheckPeriod: recheckPeriod, - tenants: map[string]*tenantLimiter{}, - } -} - -// AllowN reports whether n tokens may be consumed happen at time now. -func (l *RateLimiter) AllowN(now time.Time, tenantID string, n int) bool { - return l.getTenantLimiter(now, tenantID).AllowN(now, n) -} - -// Limit returns the currently configured maximum overall tokens rate. -func (l *RateLimiter) Limit(now time.Time, tenantID string) float64 { - return float64(l.getTenantLimiter(now, tenantID).Limit()) -} - -// Burst returns the currently configured maximum burst size. -func (l *RateLimiter) Burst(now time.Time, tenantID string) int { - return l.getTenantLimiter(now, tenantID).Burst() -} - -func (l *RateLimiter) getTenantLimiter(now time.Time, tenantID string) *rate.Limiter { - recheck := false - - // Check if the per-tenant limiter already exists and if should - // be rechecked because the recheck period has elapsed - l.tenantsLock.RLock() - entry, ok := l.tenants[tenantID] - if ok && !now.Before(entry.recheckAt) { - recheck = true - } - l.tenantsLock.RUnlock() - - // If the limiter already exist, we return it, making sure to recheck it - // if the recheck period has elapsed - if ok && recheck { - return l.recheckTenantLimiter(now, tenantID) - } else if ok { - return entry.limiter - } - - // Create a new limiter - limit := rate.Limit(l.strategy.Limit(tenantID)) - burst := l.strategy.Burst(tenantID) - limiter := rate.NewLimiter(limit, burst) - - l.tenantsLock.Lock() - if entry, ok = l.tenants[tenantID]; !ok { - entry = &tenantLimiter{limiter, now.Add(l.recheckPeriod)} - l.tenants[tenantID] = entry - } - l.tenantsLock.Unlock() - - return entry.limiter -} - -func (l *RateLimiter) recheckTenantLimiter(now time.Time, tenantID string) *rate.Limiter { - limit := rate.Limit(l.strategy.Limit(tenantID)) - burst := l.strategy.Burst(tenantID) - - l.tenantsLock.Lock() - defer l.tenantsLock.Unlock() - - entry := l.tenants[tenantID] - - // We check again if the recheck period elapsed, cause it may - // have already been rechecked in the meanwhile. - if now.Before(entry.recheckAt) { - return entry.limiter - } - - // Ensure the limiter's limit and burst match the expected value - if entry.limiter.Limit() != limit { - entry.limiter.SetLimitAt(now, limit) - } - - if entry.limiter.Burst() != burst { - entry.limiter.SetBurstAt(now, burst) - } - - entry.recheckAt = now.Add(l.recheckPeriod) - - return entry.limiter -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service.go b/vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service.go deleted file mode 100644 index ac18cdcd4..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service.go +++ /dev/null @@ -1,114 +0,0 @@ -package modules - -import ( - "context" - "fmt" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util/services" -) - -// ErrStopProcess is the error returned by a service as a hint to stop the server entirely. -var ErrStopProcess = errors.New("stop process") - -// moduleService is a Service implementation that adds waiting for dependencies to start before starting, -// and dependant modules to stop before stopping this module service. -type moduleService struct { - services.Service - - service services.Service - name string - logger log.Logger - - // startDeps, stopDeps return map of service names to services - startDeps, stopDeps func(string) map[string]services.Service -} - -// NewModuleService wraps a module service, and makes sure that dependencies are started/stopped before module service starts or stops. -// If any dependency fails to start, this service fails as well. -// On stop, errors from failed dependencies are ignored. -func NewModuleService(name string, logger log.Logger, service services.Service, startDeps, stopDeps func(string) map[string]services.Service) services.Service { - w := &moduleService{ - name: name, - logger: logger, - service: service, - startDeps: startDeps, - stopDeps: stopDeps, - } - - w.Service = services.NewBasicService(w.start, w.run, w.stop) - return w -} - -func (w *moduleService) start(serviceContext context.Context) error { - // wait until all startDeps are running - startDeps := w.startDeps(w.name) - for m, s := range startDeps { - if s == nil { - continue - } - - level.Debug(w.logger).Log("msg", "module waiting for initialization", "module", w.name, "waiting_for", m) - - err := s.AwaitRunning(serviceContext) - if err != nil { - return fmt.Errorf("failed to start %v, because it depends on module %v, which has failed: %w", w.name, m, err) - } - } - - // we don't want to let this service to stop until all dependant services are stopped, - // so we use independent context here - level.Info(w.logger).Log("msg", "initialising", "module", w.name) - err := w.service.StartAsync(context.Background()) - if err != nil { - return errors.Wrapf(err, "error starting module: %s", w.name) - } - - return w.service.AwaitRunning(serviceContext) -} - -func (w *moduleService) run(serviceContext context.Context) error { - // wait until service stops, or context is canceled, whatever happens first. - // We don't care about exact error here - _ = w.service.AwaitTerminated(serviceContext) - return w.service.FailureCase() -} - -func (w *moduleService) stop(_ error) error { - var err error - if w.service.State() == services.Running { - // Only wait for other modules, if underlying service is still running. - w.waitForModulesToStop() - - level.Debug(w.logger).Log("msg", "stopping", "module", w.name) - - err = services.StopAndAwaitTerminated(context.Background(), w.service) - } else { - err = w.service.FailureCase() - } - - if err != nil && err != ErrStopProcess { - level.Warn(w.logger).Log("msg", "module failed with error", "module", w.name, "err", err) - } else { - level.Info(w.logger).Log("msg", "module stopped", "module", w.name) - } - return err -} - -func (w *moduleService) waitForModulesToStop() { - // wait until all stopDeps have stopped - stopDeps := w.stopDeps(w.name) - for n, s := range stopDeps { - if s == nil { - continue - } - - level.Debug(w.logger).Log("msg", "module waiting for", "module", w.name, "waiting_for", n) - // Passed context isn't canceled, so we can only get error here, if service - // fails. But we don't care *how* service stops, as long as it is done. - _ = s.AwaitTerminated(context.Background()) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service_wrapper.go b/vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service_wrapper.go deleted file mode 100644 index ef61abb27..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/modules/module_service_wrapper.go +++ /dev/null @@ -1,31 +0,0 @@ -package modules - -import ( - "github.com/go-kit/log" - - "github.com/cortexproject/cortex/pkg/util/services" -) - -// This function wraps module service, and adds waiting for dependencies to start before starting, -// and dependant modules to stop before stopping this module service. -func newModuleServiceWrapper(serviceMap map[string]services.Service, mod string, logger log.Logger, modServ services.Service, startDeps []string, stopDeps []string) services.Service { - getDeps := func(deps []string) map[string]services.Service { - r := map[string]services.Service{} - for _, m := range deps { - s := serviceMap[m] - if s != nil { - r[m] = s - } - } - return r - } - - return NewModuleService(mod, logger, modServ, - func(_ string) map[string]services.Service { - return getDeps(startDeps) - }, - func(_ string) map[string]services.Service { - return getDeps(stopDeps) - }, - ) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go b/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go deleted file mode 100644 index 06e7e05a1..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go +++ /dev/null @@ -1,237 +0,0 @@ -package modules - -import ( - "fmt" - "sort" - - "github.com/go-kit/log" - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util/services" -) - -// module is the basic building block of the application -type module struct { - // dependencies of this module - deps []string - - // initFn for this module (can return nil) - initFn func() (services.Service, error) - - // is this module user visible (i.e intended to be passed to `InitModuleServices`) - userVisible bool -} - -// Manager is a component that initialises modules of the application -// in the right order of dependencies. -type Manager struct { - modules map[string]*module - logger log.Logger -} - -// UserInvisibleModule is an option for `RegisterModule` that marks module not visible to user. Modules are user visible by default. -func UserInvisibleModule(m *module) { - m.userVisible = false -} - -// NewManager creates a new Manager -func NewManager(logger log.Logger) *Manager { - return &Manager{ - modules: make(map[string]*module), - logger: logger, - } -} - -// RegisterModule registers a new module with name, init function, and options. Name must -// be unique to avoid overwriting modules. If initFn is nil, the module will not initialise. -// Modules are user visible by default. -func (m *Manager) RegisterModule(name string, initFn func() (services.Service, error), options ...func(option *module)) { - m.modules[name] = &module{ - initFn: initFn, - userVisible: true, - } - - for _, o := range options { - o(m.modules[name]) - } -} - -// AddDependency adds a dependency from name(source) to dependsOn(targets) -// An error is returned if the source module name is not found -func (m *Manager) AddDependency(name string, dependsOn ...string) error { - if mod, ok := m.modules[name]; ok { - mod.deps = append(mod.deps, dependsOn...) - } else { - return fmt.Errorf("no such module: %s", name) - } - return nil -} - -// InitModuleServices initialises given modules by initialising all their dependencies -// in the right order. Modules are wrapped in such a way that they start after their -// dependencies have been started and stop before their dependencies are stopped. -func (m *Manager) InitModuleServices(modules ...string) (map[string]services.Service, error) { - servicesMap := map[string]services.Service{} - initMap := map[string]bool{} - - for _, module := range modules { - if err := m.initModule(module, initMap, servicesMap); err != nil { - return nil, err - } - } - - return servicesMap, nil -} - -func (m *Manager) initModule(name string, initMap map[string]bool, servicesMap map[string]services.Service) error { - if _, ok := m.modules[name]; !ok { - return fmt.Errorf("unrecognised module name: %s", name) - } - - // initialize all of our dependencies first - deps := m.orderedDeps(name) - deps = append(deps, name) // lastly, initialize the requested module - - for ix, n := range deps { - // Skip already initialized modules - if initMap[n] { - continue - } - - mod := m.modules[n] - - var serv services.Service - - if mod.initFn != nil { - s, err := mod.initFn() - if err != nil { - return errors.Wrap(err, fmt.Sprintf("error initialising module: %s", n)) - } - - if s != nil { - // We pass servicesMap, which isn't yet complete. By the time service starts, - // it will be fully built, so there is no need for extra synchronization. - serv = newModuleServiceWrapper(servicesMap, n, m.logger, s, m.DependenciesForModule(n), m.findInverseDependencies(n, deps[ix+1:])) - } - } - - if serv != nil { - servicesMap[n] = serv - } - - initMap[n] = true - } - - return nil -} - -// UserVisibleModuleNames gets list of module names that are -// user visible. Returned list is sorted in increasing order. -func (m *Manager) UserVisibleModuleNames() []string { - var result []string - for key, val := range m.modules { - if val.userVisible { - result = append(result, key) - } - } - - sort.Strings(result) - - return result -} - -// IsUserVisibleModule check if given module is public or not. Returns true -// if and only if the given module is registered and is public. -func (m *Manager) IsUserVisibleModule(mod string) bool { - val, ok := m.modules[mod] - - if ok { - return val.userVisible - } - - return false -} - -// IsModuleRegistered checks if the given module has been registered or not. Returns true -// if the module has previously been registered via a call to RegisterModule, false otherwise. -func (m *Manager) IsModuleRegistered(mod string) bool { - _, ok := m.modules[mod] - return ok -} - -// listDeps recursively gets a list of dependencies for a passed moduleName -func (m *Manager) listDeps(mod string) []string { - deps := m.modules[mod].deps - for _, d := range m.modules[mod].deps { - deps = append(deps, m.listDeps(d)...) - } - return deps -} - -// orderedDeps gets a list of all dependencies ordered so that items are always after any of their dependencies. -func (m *Manager) orderedDeps(mod string) []string { - deps := m.listDeps(mod) - - // get a unique list of moduleNames, with a flag for whether they have been added to our result - uniq := map[string]bool{} - for _, dep := range deps { - uniq[dep] = false - } - - result := make([]string, 0, len(uniq)) - - // keep looping through all modules until they have all been added to the result. - - for len(result) < len(uniq) { - OUTER: - for name, added := range uniq { - if added { - continue - } - for _, dep := range m.modules[name].deps { - // stop processing this module if one of its dependencies has - // not been added to the result yet. - if !uniq[dep] { - continue OUTER - } - } - - // if all of the module's dependencies have been added to the result slice, - // then we can safely add this module to the result slice as well. - uniq[name] = true - result = append(result, name) - } - } - return result -} - -// find modules in the supplied list, that depend on mod -func (m *Manager) findInverseDependencies(mod string, mods []string) []string { - result := []string(nil) - - for _, n := range mods { - for _, d := range m.modules[n].deps { - if d == mod { - result = append(result, n) - break - } - } - } - - return result -} - -// DependenciesForModule returns transitive dependencies for given module, sorted by name. -func (m *Manager) DependenciesForModule(module string) []string { - dedup := map[string]bool{} - for _, d := range m.listDeps(module) { - dedup[d] = true - } - - result := make([]string, 0, len(dedup)) - for d := range dedup { - result = append(result, d) - } - sort.Strings(result) - return result -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/net/firewall_dialer.go b/vendor/github.com/cortexproject/cortex/pkg/util/net/firewall_dialer.go deleted file mode 100644 index 6ab34441a..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/net/firewall_dialer.go +++ /dev/null @@ -1,96 +0,0 @@ -package net - -import ( - "context" - "net" - "syscall" - - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util/flagext" -) - -var errBlockedAddress = errors.New("blocked address") -var errInvalidAddress = errors.New("invalid address") - -type FirewallDialerConfigProvider interface { - BlockCIDRNetworks() []flagext.CIDR - BlockPrivateAddresses() bool -} - -// FirewallDialer is a net dialer which integrates a firewall to block specific addresses. -type FirewallDialer struct { - parent *net.Dialer - cfgProvider FirewallDialerConfigProvider -} - -func NewFirewallDialer(cfgProvider FirewallDialerConfigProvider) *FirewallDialer { - d := &FirewallDialer{cfgProvider: cfgProvider} - d.parent = &net.Dialer{Control: d.control} - return d -} - -func (d *FirewallDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - return d.parent.DialContext(ctx, network, address) -} - -func (d *FirewallDialer) control(_, address string, _ syscall.RawConn) error { - blockPrivateAddresses := d.cfgProvider.BlockPrivateAddresses() - blockCIDRNetworks := d.cfgProvider.BlockCIDRNetworks() - - // Skip any control if no firewall has been configured. - if !blockPrivateAddresses && len(blockCIDRNetworks) == 0 { - return nil - } - - host, _, err := net.SplitHostPort(address) - if err != nil { - return errInvalidAddress - } - - // We expect an IP as address because the DNS resolution already occurred. - ip := net.ParseIP(host) - if ip == nil { - return errBlockedAddress - } - - if blockPrivateAddresses && (isPrivate(ip) || isLocal(ip)) { - return errBlockedAddress - } - - for _, cidr := range blockCIDRNetworks { - if cidr.Value.Contains(ip) { - return errBlockedAddress - } - } - - return nil -} - -func isLocal(ip net.IP) bool { - return ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() -} - -// isPrivate reports whether ip is a private address, according to -// RFC 1918 (IPv4 addresses) and RFC 4193 (IPv6 addresses). -// -// This function has been copied from golang and should be removed once -// we'll upgrade to go 1.17. See: https://github.com/golang/go/pull/42793 -func isPrivate(ip net.IP) bool { - if ip4 := ip.To4(); ip4 != nil { - // Following RFC 4193, Section 3. Local IPv6 Unicast Addresses which says: - // The Internet Assigned Numbers Authority (IANA) has reserved the - // following three blocks of the IPv4 address space for private internets: - // 10.0.0.0 - 10.255.255.255 (10/8 prefix) - // 172.16.0.0 - 172.31.255.255 (172.16/12 prefix) - // 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) - return ip4[0] == 10 || - (ip4[0] == 172 && ip4[1]&0xf0 == 16) || - (ip4[0] == 192 && ip4[1] == 168) - } - // Following RFC 4193, Section 3. Private Address Space which says: - // The Internet Assigned Numbers Authority (IANA) has reserved the - // following block of the IPv6 address space for local internets: - // FC00:: - FDFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF (FC00::/7 prefix) - return len(ip) == net.IPv6len && ip[0]&0xfe == 0xfc -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go b/vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go deleted file mode 100644 index e3a863ad2..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go +++ /dev/null @@ -1,132 +0,0 @@ -package process - -import ( - "bufio" - "bytes" - "errors" - "io/ioutil" - "os" - "path/filepath" - "strconv" - - "github.com/prometheus/client_golang/prometheus" -) - -const ( - // DefaultProcMountPoint is the common mount point of the proc filesystem. - DefaultProcMountPoint = "/proc" -) - -var ( - ErrUnsupportedCollector = errors.New("unsupported platform") -) - -type processCollector struct { - pid int - procMountPoint string - - // Metrics. - currMaps *prometheus.Desc - maxMaps *prometheus.Desc -} - -// NewProcessCollector makes a new custom process collector used to collect process metrics the -// default instrumentation doesn't support. -func NewProcessCollector() (prometheus.Collector, error) { - return newProcessCollector(os.Getpid(), DefaultProcMountPoint) -} - -func newProcessCollector(pid int, procMountPoint string) (prometheus.Collector, error) { - // Check whether it's supported on this platform. - if !isSupported(procMountPoint) { - return nil, ErrUnsupportedCollector - } - - return &processCollector{ - pid: pid, - procMountPoint: procMountPoint, - currMaps: prometheus.NewDesc( - "process_memory_map_areas", - "Number of memory map areas allocated by the process.", - nil, nil, - ), - maxMaps: prometheus.NewDesc( - "process_memory_map_areas_limit", - "Maximum number of memory map ares the process can allocate.", - nil, nil, - ), - }, nil -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- c.currMaps - ch <- c.maxMaps -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- prometheus.Metric) { - if value, err := c.getMapsCount(); err == nil { - ch <- prometheus.MustNewConstMetric(c.currMaps, prometheus.GaugeValue, value) - } - - if value, err := c.getMapsCountLimit(); err == nil { - ch <- prometheus.MustNewConstMetric(c.maxMaps, prometheus.GaugeValue, value) - } -} - -// getMapsCount returns the number of memory map ares the process has allocated. -func (c *processCollector) getMapsCount() (float64, error) { - file, err := os.Open(processMapsPath(c.procMountPoint, c.pid)) - if err != nil { - return 0, err - } - defer file.Close() - - count := 0 - scan := bufio.NewScanner(file) - for scan.Scan() { - count++ - } - - return float64(count), scan.Err() -} - -// getMapsCountLimit returns the maximum of memory map ares the process can allocate. -func (c *processCollector) getMapsCountLimit() (float64, error) { - file, err := os.Open(vmMapsLimitPath(c.procMountPoint)) - if err != nil { - return 0, err - } - defer file.Close() - - content, err := ioutil.ReadAll(file) - if err != nil { - return 0, err - } - - content = bytes.TrimSpace(content) - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - value, err := strconv.ParseInt(string(content), base, 64) - if err != nil { - return 0, err - } - - return float64(value), nil -} - -func isSupported(procPath string) bool { - _, err := os.Stat(vmMapsLimitPath(procPath)) - return err == nil -} - -func processMapsPath(procPath string, pid int) string { - return filepath.Join(procPath, strconv.Itoa(pid), "maps") -} - -func vmMapsLimitPath(procPath string) string { - return filepath.Join(procPath, "sys", "vm", "max_map_count") -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go b/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go deleted file mode 100644 index f2005a4f0..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go +++ /dev/null @@ -1,56 +0,0 @@ -package push - -import ( - "context" - "net/http" - - "github.com/go-kit/log/level" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/middleware" - - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/log" -) - -// Func defines the type of the push. It is similar to http.HandlerFunc. -type Func func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) - -// Handler is a http.Handler which accepts WriteRequests. -func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - logger := log.WithContext(ctx, log.Logger) - if sourceIPs != nil { - source := sourceIPs.Get(r) - if source != "" { - ctx = util.AddSourceIPsToOutgoingContext(ctx, source) - logger = log.WithSourceIPs(source, logger) - } - } - var req cortexpb.PreallocWriteRequest - err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) - if err != nil { - level.Error(logger).Log("err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - req.SkipLabelNameValidation = false - if req.Source == 0 { - req.Source = cortexpb.API - } - - if _, err := push(ctx, &req.WriteRequest); err != nil { - resp, ok := httpgrpc.HTTPResponseFromError(err) - if !ok { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if resp.GetCode() != 202 { - level.Error(logger).Log("msg", "push error", "err", err) - } - http.Error(w, string(resp.Body), int(resp.Code)) - } - }) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go deleted file mode 100644 index 674dee288..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go +++ /dev/null @@ -1,210 +0,0 @@ -package runtimeconfig - -import ( - "bytes" - "context" - "crypto/sha256" - "flag" - "fmt" - "io" - "os" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/util/services" -) - -// Loader loads the configuration from file. -type Loader func(r io.Reader) (interface{}, error) - -// Config holds the config for an Manager instance. -// It holds config related to loading per-tenant config. -type Config struct { - ReloadPeriod time.Duration `yaml:"period"` - // LoadPath contains the path to the runtime config file, requires an - // non-empty value - LoadPath string `yaml:"file"` - Loader Loader `yaml:"-"` -} - -// RegisterFlags registers flags. -func (mc *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&mc.LoadPath, "runtime-config.file", "", "File with the configuration that can be updated in runtime.") - f.DurationVar(&mc.ReloadPeriod, "runtime-config.reload-period", 10*time.Second, "How often to check runtime config file.") -} - -// Manager periodically reloads the configuration from a file, and keeps this -// configuration available for clients. -type Manager struct { - services.Service - - cfg Config - logger log.Logger - - listenersMtx sync.Mutex - listeners []chan interface{} - - configMtx sync.RWMutex - config interface{} - - configLoadSuccess prometheus.Gauge - configHash *prometheus.GaugeVec -} - -// New creates an instance of Manager and starts reload config loop based on config -func New(cfg Config, registerer prometheus.Registerer, logger log.Logger) (*Manager, error) { - if cfg.LoadPath == "" { - return nil, errors.New("LoadPath is empty") - } - - mgr := Manager{ - cfg: cfg, - configLoadSuccess: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ - Name: "runtime_config_last_reload_successful", - Help: "Whether the last runtime-config reload attempt was successful.", - }), - configHash: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ - Name: "runtime_config_hash", - Help: "Hash of the currently active runtime config file.", - }, []string{"sha256"}), - logger: logger, - } - - mgr.Service = services.NewBasicService(mgr.starting, mgr.loop, mgr.stopping) - return &mgr, nil -} - -func (om *Manager) starting(_ context.Context) error { - if om.cfg.LoadPath == "" { - return nil - } - - return errors.Wrap(om.loadConfig(), "failed to load runtime config") -} - -// CreateListenerChannel creates new channel that can be used to receive new config values. -// If there is no receiver waiting for value when config manager tries to send the update, -// or channel buffer is full, update is discarded. -// -// When config manager is stopped, it closes all channels to notify receivers that they will -// not receive any more updates. -func (om *Manager) CreateListenerChannel(buffer int) <-chan interface{} { - ch := make(chan interface{}, buffer) - - om.listenersMtx.Lock() - defer om.listenersMtx.Unlock() - - om.listeners = append(om.listeners, ch) - return ch -} - -// CloseListenerChannel removes given channel from list of channels to send notifications to and closes channel. -func (om *Manager) CloseListenerChannel(listener <-chan interface{}) { - om.listenersMtx.Lock() - defer om.listenersMtx.Unlock() - - for ix, ch := range om.listeners { - if ch == listener { - om.listeners = append(om.listeners[:ix], om.listeners[ix+1:]...) - close(ch) - break - } - } -} - -func (om *Manager) loop(ctx context.Context) error { - if om.cfg.LoadPath == "" { - level.Info(om.logger).Log("msg", "runtime config disabled: file not specified") - <-ctx.Done() - return nil - } - - ticker := time.NewTicker(om.cfg.ReloadPeriod) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - err := om.loadConfig() - if err != nil { - // Log but don't stop on error - we don't want to halt all ingesters because of a typo - level.Error(om.logger).Log("msg", "failed to load config", "err", err) - } - case <-ctx.Done(): - return nil - } - } -} - -// loadConfig loads configuration using the loader function, and if successful, -// stores it as current configuration and notifies listeners. -func (om *Manager) loadConfig() error { - buf, err := os.ReadFile(om.cfg.LoadPath) - if err != nil { - om.configLoadSuccess.Set(0) - return errors.Wrap(err, "read file") - } - hash := sha256.Sum256(buf) - - cfg, err := om.cfg.Loader(bytes.NewReader(buf)) - if err != nil { - om.configLoadSuccess.Set(0) - return errors.Wrap(err, "load file") - } - om.configLoadSuccess.Set(1) - - om.setConfig(cfg) - om.callListeners(cfg) - - // expose hash of runtime config - om.configHash.Reset() - om.configHash.WithLabelValues(fmt.Sprintf("%x", hash[:])).Set(1) - - return nil -} - -func (om *Manager) setConfig(config interface{}) { - om.configMtx.Lock() - defer om.configMtx.Unlock() - om.config = config -} - -func (om *Manager) callListeners(newValue interface{}) { - om.listenersMtx.Lock() - defer om.listenersMtx.Unlock() - - for _, ch := range om.listeners { - select { - case ch <- newValue: - // ok - default: - // nobody is listening or buffer full. - } - } -} - -// Stop stops the Manager -func (om *Manager) stopping(_ error) error { - om.listenersMtx.Lock() - defer om.listenersMtx.Unlock() - - for _, ch := range om.listeners { - close(ch) - } - om.listeners = nil - return nil -} - -// GetConfig returns last loaded config value, possibly nil. -func (om *Manager) GetConfig() interface{} { - om.configMtx.RLock() - defer om.configMtx.RUnlock() - - return om.config -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c741..000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e13..000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d60552..000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d..000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index 85f6ab071..000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { - size *= float64(mul) - } - - return int64(size), nil -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc..000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/felixge/fgprof/LICENSE.txt b/vendor/github.com/felixge/fgprof/LICENSE.txt deleted file mode 100644 index 3e424911b..000000000 --- a/vendor/github.com/felixge/fgprof/LICENSE.txt +++ /dev/null @@ -1,8 +0,0 @@ -The MIT License (MIT) -Copyright © 2020 Felix Geisendörfer - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/felixge/fgprof/README.md b/vendor/github.com/felixge/fgprof/README.md deleted file mode 100644 index fe0c0a25d..000000000 --- a/vendor/github.com/felixge/fgprof/README.md +++ /dev/null @@ -1,214 +0,0 @@ -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go)](https://pkg.go.dev/github.com/felixge/fgprof) -![GitHub Workflow Status](https://img.shields.io/github/workflow/status/felixge/fgprof/Go) -![GitHub](https://img.shields.io/github/license/felixge/fgprof) - -# :rocket: fgprof - The Full Go Profiler - -fgprof is a sampling [Go](https://golang.org/) profiler that allows you to analyze On-CPU as well as [Off-CPU](http://www.brendangregg.com/offcpuanalysis.html) (e.g. I/O) time together. - -Go's builtin sampling CPU profiler can only show On-CPU time, but it's better than fgprof at that. Go also includes tracing profilers that can analyze I/O, but they can't be combined with the CPU profiler. - -fgprof is designed for analyzing applications with mixed I/O and CPU workloads. - -## Quick Start - -If this is the first time you hear about fgprof, you should start by reading -about [The Problem](#the-problem) & [How it Works](#how-it-works). - -There is no need to choose between fgprof and the builtin profiler. Here is how to add both to your application: - -```go -package main - -import( - _ "net/http/pprof" - "github.com/felixge/fgprof" -) - -func main() { - http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) - go func() { - log.Println(http.ListenAndServe(":6060", nil)) - }() - - // -} -``` - -fgprof is compatible with the `go tool pprof` visualizer, so taking and analyzing a 3s profile is as simple as: - -``` -go tool pprof --http=:6061 http://localhost:6060/debug/fgprof?seconds=3 -``` - -![](./assets/fgprof_pprof.png) - -Additionally fgprof supports the plain text format used by Brendan Gregg's [FlameGraph](http://www.brendangregg.com/flamegraphs.html) utility: - -``` -git clone https://github.com/brendangregg/FlameGraph -cd FlameGraph -curl -s 'localhost:6060/debug/fgprof?seconds=3&format=folded' > fgprof.folded -./flamegraph.pl fgprof.folded > fgprof.svg -``` - -![](./assets/fgprof_gregg.png) - -Which tool you prefer is up to you, but one thing I like about Gregg's tool is that you can filter the plaintext files using grep which can be very useful when analyzing large programs. - -If you don't have a program to profile right now, you can `go run ./example` which should allow you to reproduce the graphs you see above. If you've never seen such graphs before, and are unsure how to read them, head over to Brendan Gregg's [Flame Graph](http://www.brendangregg.com/flamegraphs.html) page. - -## The Problem - -Let's say you've been tasked to optimize a simple program that has a loop calling out to three functions: - -```go -func main() { - for { - // Http request to a web service that might be slow. - slowNetworkRequest() - // Some heavy CPU computation. - cpuIntensiveTask() - // Poorly named function that you don't understand yet. - weirdFunction() - } -} -``` - -One way to decide which of these three functions you should focus your attention on would be to wrap each function call like this: - -```go -start := time.Start() -slowNetworkRequest() -fmt.Printf("slowNetworkRequest: %s\n", time.Since(start)) -// ... -``` - -However, this can be very tedious for large programs. You'll also have to figure out how to average the numbers in case they fluctuate. And once you've done that, you'll have to repeat the process for the functions called by the function you decide to focus on. - -### /debug/pprof/profile - -So, this seems like a perfect use case for a profiler. Let's try the `/debug/pprof/profile` endpoint of the builtin `net/http/pprof` pkg to analyze our program for 10s: - -```go -import _ "net/http/pprof" - -func main() { - go func() { - log.Println(http.ListenAndServe(":6060", nil)) - }() - - // -} -``` - -``` -go tool pprof -http=:6061 http://localhost:6060/debug/pprof/profile?seconds=10 -``` - -That was easy! Looks like we're spending all our time in `cpuIntensiveTask()`, so let's focus on that? - -![](./assets/pprof_cpu.png) - -But before we get carried away, let's quickly double check this assumption by manually timing our function calls with `time.Since()` as described above: - -``` -slowNetworkRequest: 66.815041ms -cpuIntensiveTask: 30.000672ms -weirdFunction: 10.64764ms -slowNetworkRequest: 67.194516ms -cpuIntensiveTask: 30.000912ms -weirdFunction: 10.105371ms -// ... -``` - -Oh no, the builtin CPU profiler is misleading us! How is that possible? Well, it turns out the builtin profiler only shows On-CPU time. Time spent waiting on I/O is completely hidden from us. - -### /debug/pprof/trace - -Let's try something else. The `/debug/pprof/trace` endpoint includes a "synchronization blocking profile", maybe that's what we need? - -``` -curl -so pprof.trace http://localhost:6060/debug/pprof/trace?seconds=10 -go tool trace --pprof=sync pprof.trace > sync.pprof -go tool pprof --http=:6061 sync.pprof -``` - -Oh no, we're being mislead again. This profiler thinks all our time is spent on `slowNetworkRequest()`. It's completely missing `cpuIntensiveTask()`. And what about `weirdFunction()`? It seems like no builtin profiler can see it? - -![](./assets/pprof_trace.png) - -### /debug/fgprof - -So what can we do? Let's try fgprof, which is designed to analyze mixed I/O and CPU workloads like the one we're dealing with here. We can easily add it alongside the builtin profilers. - -```go -import( - _ "net/http/pprof" - "github.com/felixge/fgprof" -) - -func main() { - http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) - go func() { - log.Println(http.ListenAndServe(":6060", nil)) - }() - - // -} -``` - - - -``` -go tool pprof --http=:6061 http://localhost:6060/debug/fgprof?seconds=10 -``` - -Finally, a profile that shows all three of our functions and how much time we're spending on them. It also turns out our `weirdFunction()` was simply calling `time.Sleep()`, how weird indeed! - -![](./assets/fgprof_pprof.png) - -## How it Works - -### fgprof - -fgprof is implemented as a background goroutine that wakes up 99 times per second and calls `runtime.GoroutineProfile`. This returns a list of all goroutines regardless of their current On/Off CPU scheduling status and their call stacks. - -This data is used to maintain an in-memory stack counter which can be converted to the pprof or folded output format. The meat of the implementation is super simple and < 100 lines of code, you should [check it out](./fgprof.go). - -Generally speaking, fgprof should not have a big impact on the performance of your program. However `runtime.GoroutineProfile` calls `stopTheWorld()` and could be slow if you have a lot of goroutines. For now the advise is to test the impact of the profiler on a development environment before running it against production instances. In the future this README will try to provide a more detailed analysis of the performance impact. - -### Go's builtin CPU Profiler - -The builtin Go CPU profiler uses the [setitimer(2)](https://linux.die.net/man/2/setitimer) system call to ask the operating system to be sent a `SIGPROF` signal 100 times a second. Each signal stops the Go process and gets delivered to a random thread's `sigtrampgo()` function. This function then proceeds to call `sigprof()` or `sigprofNonGo()` to record the thread's current stack. - -Since Go uses non-blocking I/O, Goroutines that wait on I/O are parked and not running on any threads. Therefore they end up being largely invisible to Go's builtin CPU profiler. - -## The Future of Go Profiling - -There is a great proposal for [hardware performance counters for CPU profiling](https://go.googlesource.com/proposal/+/refs/changes/08/219508/2/design/36821-perf-counter-pprof.md#5-empirical-evidence-on-the-accuracy-and-precision-of-pmu-profiles) in Go. The proposal is aimed at making the builtin CPU Profiler even more accurate, especially under highly parallel workloads on many CPUs. It also includes a very in-depth analysis of the current profiler. Based on the design, I think the proposed profiler would also be blind to I/O workloads, but still seems appealing for CPU based workloads. - -As far as fgprof itself is concerned, I might implement streaming output, leaving the final aggregation to other tools. This would open the door to even more advanced analysis, perhaps by integrating with tools such as [flamescope](https://github.com/Netflix/flamescope). - -Additionally I'm also open to the idea of contributing fgprof to the Go project itself. I've [floated the idea](https://groups.google.com/g/golang-dev/c/LCJyvL90xv8) on the golang-dev mailing list, so let's see what happens. - - -## Known Issues - -There is no perfect approach to profiling, and fgprof is no exception. Below is a list of known issues that will hopefully not be of practical concern for most users, but are important to highlight. - -- fgprof can't catch goroutines while they are running in loops without function calls, only when they get asynchronously preempted. This can lead to reporting inaccuracies. Use the builtin CPU profiler if this is a problem for you. -- fgprof may not work in Go 1.13 if another goroutine is in a loop without function calls the whole time. Async preemption in Go 1.14 should mostly fix this issue. -- Internal C functions are not showing up in the stack traces, e.g. `runtime.nanotime` which is called by `time.Since` in the example program. -- The current implementation is relying on the Go scheduler to schedule the internal goroutine at a fixed sample rate. Scheduler delays, especially biased ones, might cause inaccuracies. - -## Credits - -The following articles helped me to learn more about how profilers in general, and the Go profiler in particular work. - -- [How do Ruby & Python profilers work?](https://jvns.ca/blog/2017/12/17/how-do-ruby---python-profilers-work-/) by Julia Evans -- [Profiling Go programs with pprof](https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/) by Julia Evans - -## License - -fgprof is licensed under the MIT License. diff --git a/vendor/github.com/felixge/fgprof/fgprof.go b/vendor/github.com/felixge/fgprof/fgprof.go deleted file mode 100644 index dba16161e..000000000 --- a/vendor/github.com/felixge/fgprof/fgprof.go +++ /dev/null @@ -1,97 +0,0 @@ -// fgprof is a sampling Go profiler that allows you to analyze On-CPU as well -// as [Off-CPU](http://www.brendangregg.com/offcpuanalysis.html) (e.g. I/O) -// time together. -package fgprof - -import ( - "io" - "runtime" - "strings" - "time" -) - -// Start begins profiling the goroutines of the program and returns a function -// that needs to be invoked by the caller to stop the profiling and write the -// results to w using the given format. -func Start(w io.Writer, format Format) func() error { - // Go's CPU profiler uses 100hz, but 99hz might be less likely to result in - // accidental synchronization with the program we're profiling. - const hz = 99 - ticker := time.NewTicker(time.Second / hz) - stopCh := make(chan struct{}) - - stackCounts := stackCounter{} - go func() { - defer ticker.Stop() - - for { - select { - case <-ticker.C: - stackCounts.Update() - case <-stopCh: - return - } - } - }() - - return func() error { - stopCh <- struct{}{} - return writeFormat(w, stackCounts, format, hz) - } -} - -type stackCounter map[string]int - -func (s stackCounter) Update() { - // Determine the runtime.Frame of this func so we can hide it from our - // profiling output. - rpc := make([]uintptr, 1) - n := runtime.Callers(1, rpc) - if n < 1 { - panic("could not determine selfFrame") - } - selfFrame, _ := runtime.CallersFrames(rpc).Next() - - // COPYRIGHT: The code for populating `p` below is copied from - // writeRuntimeProfile in src/runtime/pprof/pprof.go. - // - // Find out how many records there are (GoroutineProfile(nil)), - // allocate that many records, and get the data. - // There's a race—more records might be added between - // the two calls—so allocate a few extra records for safety - // and also try again if we're very unlucky. - // The loop should only execute one iteration in the common case. - var p []runtime.StackRecord - n, ok := runtime.GoroutineProfile(nil) - for { - // Allocate room for a slightly bigger profile, - // in case a few more entries have been added - // since the call to ThreadProfile. - p = make([]runtime.StackRecord, n+10) - n, ok = runtime.GoroutineProfile(p) - if ok { - p = p[0:n] - break - } - // Profile grew; try again. - } - -outer: - for _, pp := range p { - frames := runtime.CallersFrames(pp.Stack()) - - var stack []string - for { - frame, more := frames.Next() - if !more { - break - } else if frame.Entry == selfFrame.Entry { - continue outer - } - - stack = append([]string{frame.Function}, stack...) - } - key := strings.Join(stack, ";") - s[key]++ - } -} diff --git a/vendor/github.com/felixge/fgprof/format.go b/vendor/github.com/felixge/fgprof/format.go deleted file mode 100644 index 1a351e39c..000000000 --- a/vendor/github.com/felixge/fgprof/format.go +++ /dev/null @@ -1,102 +0,0 @@ -package fgprof - -import ( - "fmt" - "io" - "sort" - "strings" - - "github.com/google/pprof/profile" -) - -type Format string - -const ( - // FormatFolded is used by Brendan Gregg's FlameGraph utility, see - // https://github.com/brendangregg/FlameGraph#2-fold-stacks. - FormatFolded Format = "folded" - // FormatPprof is used by Google's pprof utility, see - // https://github.com/google/pprof/blob/master/proto/README.md. - FormatPprof Format = "pprof" -) - -func writeFormat(w io.Writer, s stackCounter, f Format, hz int) error { - switch f { - case FormatFolded: - return writeFolded(w, s) - case FormatPprof: - return toPprof(s, hz).Write(w) - default: - return fmt.Errorf("unknown format: %q", f) - } -} - -func writeFolded(w io.Writer, s stackCounter) error { - for _, stack := range sortedKeys(s) { - count := s[stack] - if _, err := fmt.Fprintf(w, "%s %d\n", stack, count); err != nil { - return err - } - } - return nil -} - -func toPprof(s stackCounter, hz int) *profile.Profile { - functionID := uint64(1) - locationID := uint64(1) - line := int64(1) - - p := &profile.Profile{} - m := &profile.Mapping{ID: 1, HasFunctions: true} - p.Mapping = []*profile.Mapping{m} - p.SampleType = []*profile.ValueType{ - { - Type: "samples", - Unit: "count", - }, - { - Type: "time", - Unit: "nanoseconds", - }, - } - - for stack, count := range s { - sample := &profile.Sample{ - Value: []int64{ - int64(count), - int64(1000 * 1000 * 1000 / hz * count), - }, - } - for _, fnName := range strings.Split(stack, ";") { - function := &profile.Function{ - ID: functionID, - Name: fnName, - } - p.Function = append(p.Function, function) - - location := &profile.Location{ - ID: locationID, - Mapping: m, - Line: []profile.Line{{Function: function}}, - } - p.Location = append(p.Location, location) - sample.Location = append([]*profile.Location{location}, sample.Location...) - - line++ - - locationID++ - functionID++ - } - p.Sample = append(p.Sample, sample) - } - return p -} - -func sortedKeys(s stackCounter) []string { - var keys []string - for stack := range s { - keys = append(keys, stack) - } - sort.Strings(keys) - return keys -} diff --git a/vendor/github.com/felixge/fgprof/handler.go b/vendor/github.com/felixge/fgprof/handler.go deleted file mode 100644 index a25cdc695..000000000 --- a/vendor/github.com/felixge/fgprof/handler.go +++ /dev/null @@ -1,32 +0,0 @@ -package fgprof - -import ( - "fmt" - "net/http" - "time" -) - -// Handler returns an http handler that takes an optional "seconds" query -// argument that defaults to "30" and produces a profile over this duration. -// The optional "format" parameter controls if the output is written in -// Google's "pprof" format (default) or Brendan Gregg's "folded" stack format. -func Handler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var seconds int - if s := r.URL.Query().Get("seconds"); s == "" { - seconds = 30 - } else if _, err := fmt.Sscanf(s, "%d", &seconds); err != nil || seconds <= 0 { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "bad seconds: %d: %s\n", seconds, err) - } - - format := Format(r.URL.Query().Get("format")) - if format == "" { - format = FormatPprof - } - - stop := Start(w, format) - defer stop() - time.Sleep(time.Duration(seconds) * time.Second) - }) -} diff --git a/vendor/github.com/felixge/fgprof/pprof.go b/vendor/github.com/felixge/fgprof/pprof.go deleted file mode 100644 index f0908e8e0..000000000 --- a/vendor/github.com/felixge/fgprof/pprof.go +++ /dev/null @@ -1,56 +0,0 @@ -package fgprof - -import ( - "strings" - - "github.com/google/pprof/profile" -) - -func toProfile(s stackCounter, hz int) *profile.Profile { - functionID := uint64(1) - locationID := uint64(1) - - p := &profile.Profile{} - m := &profile.Mapping{ID: 1, HasFunctions: true} - p.Mapping = []*profile.Mapping{m} - p.SampleType = []*profile.ValueType{ - { - Type: "samples", - Unit: "count", - }, - { - Type: "time", - Unit: "nanoseconds", - }, - } - - for _, stack := range sortedKeys(s) { - count := s[stack] - sample := &profile.Sample{ - Value: []int64{ - int64(count), - int64(1000 * 1000 * 1000 / hz * count), - }, - } - for _, fnName := range strings.Split(stack, ";") { - function := &profile.Function{ - ID: functionID, - Name: fnName, - } - p.Function = append(p.Function, function) - - location := &profile.Location{ - ID: locationID, - Mapping: m, - Line: []profile.Line{{Function: function}}, - } - p.Location = append(p.Location, location) - sample.Location = append(sample.Location, location) - - locationID++ - functionID++ - } - p.Sample = append(p.Sample, sample) - } - return p -} diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342ac..000000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md deleted file mode 100644 index 5492dd944..000000000 --- a/vendor/github.com/go-kit/kit/log/README.md +++ /dev/null @@ -1,160 +0,0 @@ -# package log - -**Deprecation notice:** The core Go kit log packages (log, log/level, log/term, and -log/syslog) have been moved to their own repository at github.com/go-kit/log. -The corresponding packages in this directory remain for backwards compatibility. -Their types alias the types and their functions call the functions provided by -the new repository. Using either import path should be equivalent. Prefer the -new import path when practical. - -______ - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled -logging, and so on. It can be used for both typical application log events, -and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are -_data_, and warrant some level of schematic rigor. Using a stricter, -key/value-oriented message format for our logs, containing contextual and -semantic information, makes it much easier to get insight into the -operational activity of the systems we build. Consequently, `package log` is -of the strong belief that "[the benefits of structured logging outweigh the -minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier -than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -w := log.NewSyncWriter(os.Stderr) -logger := log.NewLogfmtLogger(w) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Contextual Loggers - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - logger = log.With(logger, "instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.With(logger, "component", "worker")).Run() - NewSlacker(log.With(logger, "component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, you need to pipe all of your logging through the -stdlib log package, you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) -logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Levels - -Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level). - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like is the product of much iteration -and evaluation. For more details on the evolution of the Logger interface, -see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), -a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see -[#63](https://github.com/go-kit/kit/issues/63), -[#76](https://github.com/go-kit/kit/pull/76), -[#131](https://github.com/go-kit/kit/issues/131), -[#157](https://github.com/go-kit/kit/pull/157), -[#164](https://github.com/go-kit/kit/issues/164), and -[#252](https://github.com/go-kit/kit/pull/252) -to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, -like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), -are of course welcome. Good proposals should - -- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go deleted file mode 100644 index c9873f4bc..000000000 --- a/vendor/github.com/go-kit/kit/log/doc.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package log provides a structured logger. -// -// Deprecated: Use github.com/go-kit/log instead. -// -// Structured logging produces logs easily consumed later by humans or -// machines. Humans might be interested in debugging errors, or tracing -// specific requests. Machines might be interested in counting interesting -// events, or aggregating information for off-line processing. In both cases, -// it is important that the log messages are structured and actionable. -// Package log is designed to encourage both of these best practices. -// -// Basic Usage -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. The Logger interface has a single method, Log, which -// accepts a sequence of alternating key/value pairs, which this package names -// keyvals. -// -// type Logger interface { -// Log(keyvals ...interface{}) error -// } -// -// Here is an example of a function using a Logger to create log events. -// -// func RunTask(task Task, logger log.Logger) string { -// logger.Log("taskID", task.ID, "event", "starting task") -// ... -// logger.Log("taskID", task.ID, "event", "task complete") -// } -// -// The keys in the above example are "taskID" and "event". The values are -// task.ID, "starting task", and "task complete". Every key is followed -// immediately by its value. -// -// Keys are usually plain strings. Values may be any type that has a sensible -// encoding in the chosen log format. With structured logging it is a good -// idea to log simple values without formatting them. This practice allows -// the chosen logger to encode values in the most appropriate way. -// -// Contextual Loggers -// -// A contextual logger stores keyvals that it includes in all log events. -// Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With, WithPrefix, and WithSuffix -// add context to a logger. We can use With to improve the RunTask example. -// -// func RunTask(task Task, logger log.Logger) string { -// logger = log.With(logger, "taskID", task.ID) -// logger.Log("event", "starting task") -// ... -// taskHelper(task.Cmd, logger) -// ... -// logger.Log("event", "task complete") -// } -// -// The improved version emits the same log events as the original for the -// first and last calls to Log. Passing the contextual logger to taskHelper -// enables each log event created by taskHelper to include the task.ID even -// though taskHelper does not have access to that value. Using contextual -// loggers this way simplifies producing log output that enables tracing the -// life cycle of individual tasks. (See the Contextual example for the full -// code of the above snippet.) -// -// Dynamic Contextual Values -// -// A Valuer function stored in a contextual logger generates a new value each -// time an event is logged. The Valuer example demonstrates how this feature -// works. -// -// Valuers provide the basis for consistently logging timestamps and source -// code location. The log package defines several valuers for that purpose. -// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and -// DefaultCaller. A common logger initialization sequence that ensures all log -// entries contain a timestamp and source location looks like this: -// -// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) -// -// Concurrent Safety -// -// Applications with multiple goroutines want each log event written to the -// same logger to remain separate from other log events. Package log provides -// two simple solutions for concurrent safe logging. -// -// NewSyncWriter wraps an io.Writer and serializes each call to its Write -// method. Using a SyncWriter has the benefit that the smallest practical -// portion of the logging logic is performed within a mutex, but it requires -// the formatting Logger to make only one call to Write per log event. -// -// NewSyncLogger wraps any Logger and serializes each call to its Log method. -// Using a SyncLogger has the benefit that it guarantees each log event is -// handled atomically within the wrapped logger, but it typically serializes -// both the formatting and output logic. Use a SyncLogger if the formatting -// logger may perform multiple writes per log event. -// -// Error Handling -// -// This package relies on the practice of wrapping or decorating loggers with -// other loggers to provide composable pieces of functionality. It also means -// that Logger.Log must return an error because some -// implementations—especially those that output log data to an io.Writer—may -// encounter errors that cannot be handled locally. This in turn means that -// Loggers that wrap other loggers should return errors from the wrapped -// logger up the stack. -// -// Fortunately, the decorator pattern also provides a way to avoid the -// necessity to check for errors every time an application calls Logger.Log. -// An application required to panic whenever its Logger encounters -// an error could initialize its logger as follows. -// -// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger := log.LoggerFunc(func(keyvals ...interface{}) error { -// if err := fmtlogger.Log(keyvals...); err != nil { -// panic(err) -// } -// return nil -// }) -package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go deleted file mode 100644 index edfde2f46..000000000 --- a/vendor/github.com/go-kit/kit/log/json_logger.go +++ /dev/null @@ -1,15 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewJSONLogger(w io.Writer) Logger { - return log.NewJSONLogger(w) -} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go deleted file mode 100644 index 7baf8708a..000000000 --- a/vendor/github.com/go-kit/kit/log/level/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package level implements leveled logging on top of Go kit's log package. -// -// Deprecated: Use github.com/go-kit/log/level instead. -// -// To use the level package, create a logger as per normal in your func main, -// and wrap it with level.NewFilter. -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.AllowInfo()) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error -// helper methods to emit leveled log events. -// -// logger.Log("foo", "bar") // as normal, no level -// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) -// if value > 100 { -// level.Error(logger).Log("value", value) -// } -// -// NewFilter allows precise control over what happens when a log event is -// emitted without a level key, or if a squelched level is used. Check the -// Option functions for details. -package level diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go deleted file mode 100644 index 803e8b96c..000000000 --- a/vendor/github.com/go-kit/kit/log/level/level.go +++ /dev/null @@ -1,120 +0,0 @@ -package level - -import ( - "github.com/go-kit/log" - "github.com/go-kit/log/level" -) - -// Error returns a logger that includes a Key/ErrorValue pair. -func Error(logger log.Logger) log.Logger { - return level.Error(logger) -} - -// Warn returns a logger that includes a Key/WarnValue pair. -func Warn(logger log.Logger) log.Logger { - return level.Warn(logger) -} - -// Info returns a logger that includes a Key/InfoValue pair. -func Info(logger log.Logger) log.Logger { - return level.Info(logger) -} - -// Debug returns a logger that includes a Key/DebugValue pair. -func Debug(logger log.Logger) log.Logger { - return level.Debug(logger) -} - -// NewFilter wraps next and implements level filtering. See the commentary on -// the Option functions for a detailed description of how to configure levels. -// If no options are provided, all leveled log events created with Debug, -// Info, Warn or Error helper methods are squelched and non-leveled log -// events are passed to next unmodified. -func NewFilter(next log.Logger, options ...Option) log.Logger { - return level.NewFilter(next, options...) -} - -// Option sets a parameter for the leveled logger. -type Option = level.Option - -// AllowAll is an alias for AllowDebug. -func AllowAll() Option { - return level.AllowAll() -} - -// AllowDebug allows error, warn, info and debug level log events to pass. -func AllowDebug() Option { - return level.AllowDebug() -} - -// AllowInfo allows error, warn and info level log events to pass. -func AllowInfo() Option { - return level.AllowInfo() -} - -// AllowWarn allows error and warn level log events to pass. -func AllowWarn() Option { - return level.AllowWarn() -} - -// AllowError allows only error level log events to pass. -func AllowError() Option { - return level.AllowError() -} - -// AllowNone allows no leveled log events to pass. -func AllowNone() Option { - return level.AllowNone() -} - -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return level.ErrNotAllowed(err) -} - -// SquelchNoLevel instructs Log to squelch log events with no level, so that -// they don't proceed through to the wrapped logger. If SquelchNoLevel is set -// to true and a log event is squelched in this way, the error value -// configured with ErrNoLevel is returned to the caller. -func SquelchNoLevel(squelch bool) Option { - return level.SquelchNoLevel(squelch) -} - -// ErrNoLevel sets the error to return from Log when it squelches a log event -// with no level. By default, ErrNoLevel is nil; in this case the log event is -// squelched with no error. -func ErrNoLevel(err error) Option { - return level.ErrNoLevel(err) -} - -// NewInjector wraps next and returns a logger that adds a Key/level pair to -// the beginning of log events that don't already contain a level. In effect, -// this gives a default level to logs without a level. -func NewInjector(next log.Logger, lvl Value) log.Logger { - return level.NewInjector(next, lvl) -} - -// Value is the interface that each of the canonical level values implement. -// It contains unexported methods that prevent types from other packages from -// implementing it and guaranteeing that NewFilter can distinguish the levels -// defined in this package from all other values. -type Value = level.Value - -// Key returns the unique key added to log events by the loggers in this -// package. -func Key() interface{} { return level.Key() } - -// ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return level.ErrorValue() } - -// WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return level.WarnValue() } - -// InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return level.InfoValue() } - -// DebugValue returns the unique value added to log events by Debug. -func DebugValue() Value { return level.DebugValue() } diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go deleted file mode 100644 index 164a4f94a..000000000 --- a/vendor/github.com/go-kit/kit/log/log.go +++ /dev/null @@ -1,51 +0,0 @@ -package log - -import ( - "github.com/go-kit/log" -) - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies or retains any of its elements must make a copy first. -type Logger = log.Logger - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = log.ErrMissingValue - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With, -// WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func With(logger Logger, keyvals ...interface{}) Logger { - return log.With(logger, keyvals...) -} - -// WithPrefix returns a new contextual logger with keyvals prepended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - return log.WithPrefix(logger, keyvals...) -} - -// WithSuffix returns a new contextual logger with keyvals appended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithSuffix(logger Logger, keyvals ...interface{}) Logger { - return log.WithSuffix(logger, keyvals...) -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc = log.LoggerFunc diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go deleted file mode 100644 index 51cde2c56..000000000 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go +++ /dev/null @@ -1,15 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return log.NewLogfmtLogger(w) -} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go deleted file mode 100644 index b02c68606..000000000 --- a/vendor/github.com/go-kit/kit/log/nop_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package log - -import "github.com/go-kit/log" - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { - return log.NewNopLogger() -} diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go deleted file mode 100644 index cb604a7a8..000000000 --- a/vendor/github.com/go-kit/kit/log/stdlib.go +++ /dev/null @@ -1,54 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter = log.StdlibWriter - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter = log.StdlibAdapter - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption = log.StdlibAdapterOption - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return log.TimestampKey(key) -} - -// FileKey sets the key for the file and line field. By default, it's "caller". -func FileKey(key string) StdlibAdapterOption { - return log.FileKey(key) -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return log.MessageKey(key) -} - -// Prefix configures the adapter to parse a prefix from stdlib log events. If -// you provide a non-empty prefix to the stdlib logger, then your should provide -// that same prefix to the adapter via this option. -// -// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to -// true if you want to include the parsed prefix in the msg. -func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption { - return log.Prefix(prefix, joinPrefixToMsg) -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - return log.NewStdlibAdapter(logger, options...) -} diff --git a/vendor/github.com/go-kit/kit/log/sync.go b/vendor/github.com/go-kit/kit/log/sync.go deleted file mode 100644 index bcfee2bfd..000000000 --- a/vendor/github.com/go-kit/kit/log/sync.go +++ /dev/null @@ -1,37 +0,0 @@ -package log - -import ( - "io" - - "github.com/go-kit/log" -) - -// SwapLogger wraps another logger that may be safely replaced while other -// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger -// will discard all log events without error. -// -// SwapLogger serves well as a package global logger that can be changed by -// importers. -type SwapLogger = log.SwapLogger - -// NewSyncWriter returns a new writer that is safe for concurrent use by -// multiple goroutines. Writes to the returned writer are passed on to w. If -// another write is already in progress, the calling goroutine blocks until -// the writer is available. -// -// If w implements the following interface, so does the returned writer. -// -// interface { -// Fd() uintptr -// } -func NewSyncWriter(w io.Writer) io.Writer { - return log.NewSyncWriter(w) -} - -// NewSyncLogger returns a logger that synchronizes concurrent use of the -// wrapped logger. When multiple goroutines use the SyncLogger concurrently -// only one goroutine will be allowed to log to the wrapped logger at a time. -// The other goroutines will block until the logger is available. -func NewSyncLogger(logger Logger) Logger { - return log.NewSyncLogger(logger) -} diff --git a/vendor/github.com/go-kit/kit/log/value.go b/vendor/github.com/go-kit/kit/log/value.go deleted file mode 100644 index 96d783bd5..000000000 --- a/vendor/github.com/go-kit/kit/log/value.go +++ /dev/null @@ -1,52 +0,0 @@ -package log - -import ( - "time" - - "github.com/go-kit/log" -) - -// A Valuer generates a log value. When passed to With, WithPrefix, or -// WithSuffix in a value element (odd indexes), it represents a dynamic -// value which is re-evaluated with each log event. -type Valuer = log.Valuer - -// Timestamp returns a timestamp Valuer. It invokes the t function to get the -// time; unless you are doing something tricky, pass time.Now. -// -// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which -// are TimestampFormats that use the RFC3339Nano format. -func Timestamp(t func() time.Time) Valuer { - return log.Timestamp(t) -} - -// TimestampFormat returns a timestamp Valuer with a custom time format. It -// invokes the t function to get the time to format; unless you are doing -// something tricky, pass time.Now. The layout string is passed to -// Time.Format. -// -// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which -// are TimestampFormats that use the RFC3339Nano format. -func TimestampFormat(t func() time.Time, layout string) Valuer { - return log.TimestampFormat(t, layout) -} - -// Caller returns a Valuer that returns a file and line from a specified depth -// in the callstack. Users will probably want to use DefaultCaller. -func Caller(depth int) Valuer { - return log.Caller(depth) -} - -var ( - // DefaultTimestamp is a Valuer that returns the current wallclock time, - // respecting time zones, when bound. - DefaultTimestamp = log.DefaultTimestamp - - // DefaultTimestampUTC is a Valuer that returns the current time in UTC - // when bound. - DefaultTimestampUTC = log.DefaultTimestampUTC - - // DefaultCaller is a Valuer that returns the file and line where the Log - // method was invoked. It can only be used with log.With. - DefaultCaller = log.DefaultCaller -) diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml deleted file mode 100644 index 841c4281e..000000000 --- a/vendor/github.com/go-openapi/analysis/.codecov.yml +++ /dev/null @@ -1,5 +0,0 @@ -coverage: - status: - patch: - default: - target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes deleted file mode 100644 index d020be8ea..000000000 --- a/vendor/github.com/go-openapi/analysis/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go text eol=lf - diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore deleted file mode 100644 index 87c3bd3e6..000000000 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -coverage.txt -*.cov -.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml deleted file mode 100644 index 8cad29879..000000000 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ /dev/null @@ -1,53 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 40 - gocognit: - min-complexity: 40 - maligned: - suggest-new: true - dupl: - threshold: 150 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits - # scopelint is useful, but also reports false positives - # that unfortunately can't be disabled. So we disable the - # linter rather than changing code that works. - # see: https://github.com/kyoh86/scopelint/issues/4 - - scopelint - - godox - - gocognit - #- whitespace - - wsl - - funlen - - testpackage - - wrapcheck - #- nlreturn - - gomnd - - goerr113 - - exhaustivestruct - #- errorlint - #- nestif - - gofumpt - - godot - - gci - - dogsled - - paralleltest - - tparallel - - thelper - - ifshort - - forbidigo - - cyclop - - varnamelen diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/analysis/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index aad6da10f..000000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# OpenAPI initiative analysis - -[![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) -[![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) -[![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. - -## What's inside? - -* A analyzer providing methods to walk the functional content of a specification -* A spec flattener producing a self-contained document bundle, while preserving `$ref`s -* A spec merger ("mixin") to merge several spec documents into a primary spec -* A spec "fixer" ensuring that response descriptions are non empty - -[Documentation](https://godoc.org/github.com/go-openapi/analysis) - -## FAQ - -* Does this library support OpenAPI 3? - -> No. -> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). -> There is no plan to make it evolve toward supporting OpenAPI 3.x. -> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. -> diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index c17aee1b6..000000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,1064 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - headerItems map[string]spec.Ref - parameterItems map[string]spec.Ref - allRefs map[string]spec.Ref - pathItems map[string]spec.Ref -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) - if location == "header" { - // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas - // and $ref are not supported here. However it is possible to analyze this. - r.headerItems["#"+key] = items.Ref - } else { - r.parameterItems["#"+key] = items.Ref - } -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { - r.pathItems["#"+key] = pathItem.Ref - r.addRef(key, pathItem.Ref) -} - -type patternAnalysis struct { - parameters map[string]string - headers map[string]string - items map[string]string - schemas map[string]string - allPatterns map[string]string -} - -func (p *patternAnalysis) addPattern(key, pattern string) { - p.allPatterns["#"+key] = pattern -} - -func (p *patternAnalysis) addParameterPattern(key, pattern string) { - p.parameters["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addHeaderPattern(key, pattern string) { - p.headers["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addItemsPattern(key, pattern string) { - p.items["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addSchemaPattern(key, pattern string) { - p.schemas["#"+key] = pattern - p.addPattern(key, pattern) -} - -type enumAnalysis struct { - parameters map[string][]interface{} - headers map[string][]interface{} - items map[string][]interface{} - schemas map[string][]interface{} - allEnums map[string][]interface{} -} - -func (p *enumAnalysis) addEnum(key string, enum []interface{}) { - p.allEnums["#"+key] = enum -} - -func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { - p.parameters["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { - p.headers["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { - p.items["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { - p.schemas["#"+key] = enum - p.addEnum(key, enum) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - a.reset() - a.initialize() - - return a -} - -// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec. -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - patterns patternAnalysis - enums enumAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) reset() { - s.consumes = make(map[string]struct{}, 150) - s.produces = make(map[string]struct{}, 150) - s.authSchemes = make(map[string]struct{}, 150) - s.operations = make(map[string]map[string]*spec.Operation, 150) - s.allSchemas = make(map[string]SchemaRef, 150) - s.allOfs = make(map[string]SchemaRef, 150) - s.references.schemas = make(map[string]spec.Ref, 150) - s.references.pathItems = make(map[string]spec.Ref, 150) - s.references.responses = make(map[string]spec.Ref, 150) - s.references.parameters = make(map[string]spec.Ref, 150) - s.references.items = make(map[string]spec.Ref, 150) - s.references.headerItems = make(map[string]spec.Ref, 150) - s.references.parameterItems = make(map[string]spec.Ref, 150) - s.references.allRefs = make(map[string]spec.Ref, 150) - s.patterns.parameters = make(map[string]string, 150) - s.patterns.headers = make(map[string]string, 150) - s.patterns.items = make(map[string]string, 150) - s.patterns.schemas = make(map[string]string, 150) - s.patterns.allPatterns = make(map[string]string, 150) - s.enums.parameters = make(map[string][]interface{}, 150) - s.enums.headers = make(map[string][]interface{}, 150) - s.enums.items = make(map[string][]interface{}, 150) - s.enums.schemas = make(map[string][]interface{}, 150) - s.enums.allEnums = make(map[string][]interface{}, 150) -} - -func (s *Spec) reload() { - s.reset() - s.initialize() -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) //#nosec - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref, "parameter") - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", parameter.Schema, refPref) - } - if parameter.Pattern != "" { - s.patterns.addParameterPattern(refPref, parameter.Pattern) - } - if len(parameter.Enum) > 0 { - s.enums.addParameterEnum(refPref, parameter.Enum) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for k, v := range response.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - if v.Items != nil { - s.analyzeItems("items", v.Items, hRefPref, "header") - } - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", response.Schema, refPref) - } - } - - for name := range s.spec.Definitions { - schema := s.spec.Definitions[name] - s.analyzeSchema(name, &schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - // Currently, operations declared via pathItem $ref are known only after expansion - op := pi - if pi.Ref.String() != "" { - key := slashpath.Join("/paths", jsonpointer.Escape(path)) - s.references.addPathItemRef(key, pi) - } - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) //#nosec - } - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref, "parameter") - } - if param.Schema != nil { - s.analyzeSchema("schema", param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref, location) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items, location) - } - if items.Pattern != "" { - s.patterns.addItemsPattern(refPref, items.Pattern) - } - if len(items.Enum) > 0 { - s.enums.addItemsEnum(refPref, items.Enum) - } -} - -func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) //#nosec - } - - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - - s.analyzeItems("items", param.Items, refPref, "parameter") - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", param.Schema, refPref) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - s.analyzeParameter(prefix, i, param) - } - - if op.Responses == nil { - return - } - - if op.Responses.Default != nil { - s.analyzeDefaultResponse(prefix, op.Responses.Default) - } - - for k, res := range op.Responses.StatusCodeResponses { - s.analyzeResponse(prefix, k, res) - } -} - -func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { - refPref := slashpath.Join(prefix, "responses", "default") - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, res) - } - - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - } - - if res.Schema != nil { - s.analyzeSchema("schema", res.Schema, refPref) - } -} - -func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) //#nosec - } - - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - - if res.Schema != nil { - s.analyzeSchema("schema", res.Schema, refPref) - } -} - -func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: schema, - Ref: spec.MustCreateRef("#" + refURI), - TopLevel: prefix == "/definitions", - } - - s.allSchemas["#"+refURI] = schRef - - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - - if schema.Pattern != "" { - s.patterns.addSchemaPattern(refURI, schema.Pattern) - } - - if len(schema.Enum) > 0 { - s.enums.addSchemaEnum(refURI, schema.Enum) - } - - for k, v := range schema.Definitions { - v := v - s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) - } - - for k, v := range schema.Properties { - v := v - s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) - } - - for k, v := range schema.PatternProperties { - v := v - // NOTE: swagger 2.0 does not support PatternProperties. - // However it is possible to analyze this in a schema - s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) - } - - for i := range schema.AllOf { - v := &schema.AllOf[i] - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = schRef - } - - for i := range schema.AnyOf { - v := &schema.AnyOf[i] - // NOTE: swagger 2.0 does not support anyOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - - for i := range schema.OneOf { - v := &schema.OneOf[i] - // NOTE: swagger 2.0 does not support oneOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - - if schema.Not != nil { - // NOTE: swagger 2.0 does not support "not" constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema("not", schema.Not, refURI) - } - - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) - } - - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - // NOTE: swagger 2.0 does not support AdditionalItems. - // However it is possible to analyze this in a schema - s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) - } - - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", schema.Items.Schema, refURI) - } - - for i := range schema.Items.Schemas { - sch := &schema.Items.Schemas[i] - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - result := [][]SecurityRequirement{} - for _, scheme := range schemes { - if len(scheme) == 0 { - // append a zero object for anonymous - result = append(result, []SecurityRequirement{{}}) - - continue - } - - var reqs []SecurityRequirement - for k, v := range scheme { - if v == nil { - v = []string{} - } - reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) - } - - result = append(result, reqs) - } - - return result -} - -// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { - result := make(map[string]spec.SecurityScheme) - - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - - result := make(map[string]spec.SecurityScheme) - for _, reqs := range requirements { - for _, v := range reqs { - if v.Name == "" { - // optional requirement - continue - } - - if _, ok := result[v.Name]; ok { - // duplicate requirement - continue - } - - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - } - - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - // TODO: this should be x-go-name - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - - return swag.ToGoName(param.Name) -} - -// ErrorOnParamFunc is a callback function to be invoked -// whenever an error is encountered while resolving references -// on parameters. -// -// This function takes as input the spec.Parameter which triggered the -// error and the error itself. -// -// If the callback function returns false, the calling function should bail. -// -// If it returns true, the calling function should continue evaluating parameters. -// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). -type ErrorOnParamFunc func(spec.Parameter, error) bool - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { - for _, param := range parameters { - pr := param - if pr.Ref.String() == "" { - res[mapKeyFromParam(&pr)] = pr - - continue - } - - // resolve $ref - if callmeOnError == nil { - callmeOnError = func(_ spec.Parameter, err error) bool { - panic(err) - } - } - - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { - continue - } - - break - } - - objAsParam, ok := obj.(spec.Parameter) - if !ok { - if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { - continue - } - - break - } - - pr = objAsParam - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - return s.SafeParametersFor(operationID, nil) -} - -// SafeParametersFor the specified operation id. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag, callmeOnError) - s.paramsAsMap(op.Parameters, bag, callmeOnError) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - - return res - } - - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) //#nosec - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) //#nosec - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) //#nosec - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) //#nosec - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) //#nosec - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) //#nosec - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) //#nosec - } - } - - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - return s.SafeParamsFor(method, path, nil) -} - -// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res, callmeOnError) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) - } - - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - - return op, fn - } - - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - - return result -} - -// OperationMethodPaths gets all the operation ids based on method an dpath -func (s *Spec) OperationMethodPaths() []string { - if len(s.operations) == 0 { - return nil - } - - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p := range v { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema - TopLevel bool -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - - return -} - -// AllPathItemReferences returns the references for all the items -func (s *Spec) AllPathItemReferences() (result []string) { - for _, v := range s.references.pathItems { - result = append(result, v.String()) - } - - return -} - -// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). -// -// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid -// Swagger 2.0 spec. -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - - return -} - -// AllReferences returns all the references found in the document, with possible duplicates -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - - return -} - -func cloneStringMap(source map[string]string) map[string]string { - res := make(map[string]string, len(source)) - for k, v := range source { - res[k] = v - } - - return res -} - -func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { - res := make(map[string][]interface{}, len(source)) - for k, v := range source { - res[k] = v - } - - return res -} - -// ParameterPatterns returns all the patterns found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterPatterns() map[string]string { - return cloneStringMap(s.patterns.parameters) -} - -// HeaderPatterns returns all the patterns found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderPatterns() map[string]string { - return cloneStringMap(s.patterns.headers) -} - -// ItemsPatterns returns all the patterns found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsPatterns() map[string]string { - return cloneStringMap(s.patterns.items) -} - -// SchemaPatterns returns all the patterns found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaPatterns() map[string]string { - return cloneStringMap(s.patterns.schemas) -} - -// AllPatterns returns all the patterns found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllPatterns() map[string]string { - return cloneStringMap(s.patterns.allPatterns) -} - -// ParameterEnums returns all the enums found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.parameters) -} - -// HeaderEnums returns all the enums found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.headers) -} - -// ItemsEnums returns all the enums found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.items) -} - -// SchemaEnums returns all the enums found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.schemas) -} - -// AllEnums returns all the enums found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.allEnums) -} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml deleted file mode 100644 index c2f6fd733..000000000 --- a/vendor/github.com/go-openapi/analysis/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\analysis -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.16 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go deleted file mode 100644 index 33c15704e..000000000 --- a/vendor/github.com/go-openapi/analysis/debug.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "os" - - "github.com/go-openapi/analysis/internal/debug" -) - -var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go deleted file mode 100644 index d5294c095..000000000 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package analysis provides methods to work with a Swagger specification document from -package go-openapi/spec. - -Analyzing a specification - -An analysed specification object (type Spec) provides methods to work with swagger definition. - -Flattening or expanding a specification - -Flattening a specification bundles all remote $ref in the main spec document. -Depending on flattening options, additional preprocessing may take place: - - full flattening: replacing all inline complex constructs by a named entry in #/definitions - - expand: replace all $ref's in the document by their expanded content - -Merging several specifications - -Mixin several specifications merges all Swagger constructs, and warns about found conflicts. - -Fixing a specification - -Unmarshalling a specification with golang json unmarshalling may lead to -some unwanted result on present but empty fields. - -Analyzing a Swagger schema - -Swagger schemas are analyzed to determine their complexity and qualify their content. -*/ -package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go deleted file mode 100644 index 7c2ca0841..000000000 --- a/vendor/github.com/go-openapi/analysis/fixer.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import "github.com/go-openapi/spec" - -// FixEmptyResponseDescriptions replaces empty ("") response -// descriptions in the input with "(empty)" to ensure that the -// resulting Swagger is stays valid. The problem appears to arise -// from reading in valid specs that have a explicit response -// description of "" (valid, response.description is required), but -// due to zero values being omitted upon re-serializing (omitempty) we -// lose them unless we stick some chars in there. -func FixEmptyResponseDescriptions(s *spec.Swagger) { - for k, v := range s.Responses { - FixEmptyDesc(&v) //#nosec - s.Responses[k] = v - } - - if s.Paths == nil { - return - } - - for _, v := range s.Paths.Paths { - if v.Get != nil { - FixEmptyDescs(v.Get.Responses) - } - if v.Put != nil { - FixEmptyDescs(v.Put.Responses) - } - if v.Post != nil { - FixEmptyDescs(v.Post.Responses) - } - if v.Delete != nil { - FixEmptyDescs(v.Delete.Responses) - } - if v.Options != nil { - FixEmptyDescs(v.Options.Responses) - } - if v.Head != nil { - FixEmptyDescs(v.Head.Responses) - } - if v.Patch != nil { - FixEmptyDescs(v.Patch.Responses) - } - } -} - -// FixEmptyDescs adds "(empty)" as the description for any Response in -// the given Responses object that doesn't already have one. -func FixEmptyDescs(rs *spec.Responses) { - FixEmptyDesc(rs.Default) - for k, v := range rs.StatusCodeResponses { - FixEmptyDesc(&v) //#nosec - rs.StatusCodeResponses[k] = v - } -} - -// FixEmptyDesc adds "(empty)" as the description to the given -// Response object if it doesn't already have one and isn't a -// ref. No-op on nil input. -func FixEmptyDesc(rs *spec.Response) { - if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { - return - } - rs.Description = "(empty)" -} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go deleted file mode 100644 index 0576220fb..000000000 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ /dev/null @@ -1,802 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "log" - "path" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/normalize" - "github.com/go-openapi/analysis/internal/flatten/operations" - "github.com/go-openapi/analysis/internal/flatten/replace" - "github.com/go-openapi/analysis/internal/flatten/schutils" - "github.com/go-openapi/analysis/internal/flatten/sortref" - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const definitionsPath = "#/definitions" - -// newRef stores information about refs created during the flattening process -type newRef struct { - key string - newName string - path string - isOAIGen bool - resolved bool - schema *spec.Schema - parents []string -} - -// context stores intermediary results from flatten -type context struct { - newRefs map[string]*newRef - warnings []string - resolved map[string]string -} - -func newContext() *context { - return &context{ - newRefs: make(map[string]*newRef, 150), - warnings: make([]string, 0), - resolved: make(map[string]string, 50), - } -} - -// Flatten an analyzed spec and produce a self-contained spec bundle. -// -// There is a minimal and a full flattening mode. -// -// -// Minimally flattening a spec means: -// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left -// unscathed) -// - Importing external (http, file) references so they become internal to the document -// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers -// like "$ref": "#/definitions/myObject/allOfs/1") -// -// A minimally flattened spec thus guarantees the following properties: -// - all $refs point to a local definition (i.e. '#/definitions/...') -// - definitions are unique -// -// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they -// represent a complex schema or express commonality in the spec. -// Otherwise, they are simply expanded. -// Self-referencing JSON pointers cannot resolve to a type and trigger an error. -// -// -// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. -// -// Fully flattening a spec means: -// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. -// -// By complex, we mean every JSON object with some properties. -// Arrays, when they do not define a tuple, -// or empty objects with or without additionalProperties, are not considered complex and remain inline. -// -// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions -// have been created. -// -// Available flattening options: -// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched -// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) -// - Verbose: croaks about name conflicts detected -// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening -// -// NOTE: expansion removes all $ref save circular $ref, which remain in place -// -// TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a -// x-go-name extension -// - LiftAllOfs: -// - limit the flattening of allOf members when simple objects -// - merge allOf with validation only -// - merge allOf with extensions only -// - ... -// -func Flatten(opts FlattenOpts) error { - debugLog("FlattenOpts: %#v", opts) - - opts.flattenContext = newContext() - - // 1. Recursively expand responses, parameters, path items and items in simple schemas. - // - // This simplifies the spec and leaves only the $ref's in schema objects. - if err := expand(&opts); err != nil { - return err - } - - // 2. Strip the current document from absolute $ref's that actually a in the root, - // so we can recognize them as proper definitions - // - // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped - if err := normalizeRef(&opts); err != nil { - return err - } - - // 3. Optionally remove shared parameters and responses already expanded (now unused). - // - // Operation parameters (i.e. under paths) remain. - if opts.RemoveUnused { - removeUnusedShared(&opts) - } - - // 4. Import all remote references. - if err := importReferences(&opts); err != nil { - return err - } - - // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) - if !opts.Minimal && !opts.Expand { - if err := nameInlinedSchemas(&opts); err != nil { - return err - } - } - - // 6. Rewrite JSON pointers other than $ref to named definitions - // and attempt to resolve conflicting names whenever possible. - if err := stripPointersAndOAIGen(&opts); err != nil { - return err - } - - // 7. Strip the spec from unused definitions - if opts.RemoveUnused { - removeUnused(&opts) - } - - // 8. Issue warning notifications, if any - opts.croak() - - // TODO: simplify known schema patterns to flat objects with properties - // examples: - // - lift simple allOf object, - // - empty allOf with validation only or extensions only - // - rework allOf arrays - // - rework allOf additionalProperties - - return nil -} - -func expand(opts *FlattenOpts) error { - if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { - return err - } - - opts.Spec.reload() // re-analyze - - return nil -} - -// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: -// leading absolute file in $ref is stripped -func normalizeRef(opts *FlattenOpts) error { - debugLog("normalizeRef") - - altered := false - for k, w := range opts.Spec.references.allRefs { - if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS - continue - } - - altered = true - debugLog("stripping absolute path for: %s", w.String()) - - // strip the base path from definition - if err := replace.UpdateRef(opts.Swagger(), k, - spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { - return err - } - } - - if altered { - opts.Spec.reload() // re-analyze - } - - return nil -} - -func removeUnusedShared(opts *FlattenOpts) { - opts.Swagger().Parameters = nil - opts.Swagger().Responses = nil - - opts.Spec.reload() // re-analyze -} - -func importReferences(opts *FlattenOpts) error { - var ( - imported bool - err error - ) - - for !imported && err == nil { - // iteratively import remote references until none left. - // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") - imported, err = importExternalReferences(opts) - - opts.Spec.reload() // re-analyze - } - - return err -} - -// nameInlinedSchemas replaces every complex inline construct by a named definition. -func nameInlinedSchemas(opts *FlattenOpts) error { - debugLog("nameInlinedSchemas") - - namer := &InlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: operations.AllOpRefsByRef(opts.Spec, nil), - flattenContext: opts.flattenContext, - opts: opts, - } - - depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) - for _, key := range depthFirst { - sch := opts.Spec.allSchemas[key] - if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { - continue - } - - asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return fmt.Errorf("schema analysis [%s]: %w", key, err) - } - - if asch.isAnalyzedAsComplex() { // move complex schemas to definitions - if err := namer.Name(key, sch.Schema, asch); err != nil { - return err - } - } - } - - opts.Spec.reload() // re-analyze - - return nil -} - -func removeUnused(opts *FlattenOpts) { - expected := make(map[string]struct{}) - for k := range opts.Swagger().Definitions { - expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} - } - - for _, k := range opts.Spec.AllDefinitionReferences() { - delete(expected, k) - } - - for k := range expected { - debugLog("removing unused definition %s", path.Base(k)) - if opts.Verbose { - log.Printf("info: removing unused definition: %s", path.Base(k)) - } - delete(opts.Swagger().Definitions, path.Base(k)) - } - - opts.Spec.reload() // re-analyze -} - -func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { - // rewrite ref with already resolved external ref (useful for cyclical refs): - // rewrite external refs to local ones - debugLog("resolving known ref [%s] to %s", refStr, newName) - - for _, key := range entry.Keys { - if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - } - - return nil -} - -func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { - var ( - isOAIGen bool - newName string - ) - - debugLog("resolving schema from remote $ref [%s]", refStr) - - sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) - if err != nil { - return fmt.Errorf("could not resolve schema: %w", err) - } - - // at this stage only $ref analysis matters - partialAnalyzer := &Spec{ - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - partialAnalyzer.reset() - partialAnalyzer.analyzeSchema("", sch, "/") - - // now rewrite those refs with rebase - for key, ref := range partialAnalyzer.references.allRefs { - if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { - return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err) - } - } - - // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name - newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) - debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) - - opts.flattenContext.resolved[refStr] = newName - - // rewrite the external refs to local ones - for _, key := range entry.Keys { - if err := replace.UpdateRef(opts.Swagger(), key, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - - // keep track of created refs - resolved := false - if _, ok := opts.flattenContext.newRefs[key]; ok { - resolved = opts.flattenContext.newRefs[key].resolved - } - - debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) - opts.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: path.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - // add the resolved schema to the definitions - schutils.Save(opts.Swagger(), newName, sch) - - return nil -} - -// importExternalReferences iteratively digs remote references and imports them into the main schema. -// -// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. -// -// This returns true when no more remote references can be found. -func importExternalReferences(opts *FlattenOpts) (bool, error) { - debugLog("importExternalReferences") - - groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) - sortedRefStr := make([]string, 0, len(groupedRefs)) - if opts.flattenContext == nil { - opts.flattenContext = newContext() - } - - // sort $ref resolution to ensure deterministic name conflict resolution - for refStr := range groupedRefs { - sortedRefStr = append(sortedRefStr, refStr) - } - sort.Strings(sortedRefStr) - - complete := true - - for _, refStr := range sortedRefStr { - entry := groupedRefs[refStr] - if entry.Ref.HasFragmentOnly { - continue - } - - complete = false - - newName := opts.flattenContext.resolved[refStr] - if newName != "" { - if err := importKnownRef(entry, refStr, newName, opts); err != nil { - return false, err - } - - continue - } - - // resolve schemas - if err := importNewRef(entry, refStr, opts); err != nil { - return false, err - } - } - - // maintains ref index entries - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - - // update tracking with resolved schemas - if r.schema.Ref.String() != "" { - ref := spec.MustCreateRef(r.path) - sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) - if err != nil { - return false, fmt.Errorf("could not resolve schema: %w", err) - } - - r.schema = sch - } - - if r.path == k { - continue - } - - // update tracking with renamed keys: got a cascade of refs - renamed := *r - renamed.key = r.path - opts.flattenContext.newRefs[renamed.path] = &renamed - - // indirect ref - r.newName = path.Base(k) - r.schema = spec.RefSchema(r.path) - r.path = k - r.isOAIGen = strings.Contains(k, "OAIGen") - } - - return complete, nil -} - -// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. -// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. -func stripPointersAndOAIGen(opts *FlattenOpts) error { - // name all JSON pointers to anonymous documents - if err := namePointers(opts); err != nil { - return err - } - - // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) - hasIntroducedPointerOrInline, ers := stripOAIGen(opts) - if ers != nil { - return ers - } - - // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers - for hasIntroducedPointerOrInline { - if !opts.Minimal { - opts.Spec.reload() // re-analyze - if err := nameInlinedSchemas(opts); err != nil { - return err - } - } - - if err := namePointers(opts); err != nil { - return err - } - - // restrip and re-analyze - var err error - if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { - return err - } - } - - return nil -} - -// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. -// -// A dedupe is deemed unnecessary whenever: -// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) -// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to -// the first parent. -// -// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate -// pointer and name resolution again. -func stripOAIGen(opts *FlattenOpts) (bool, error) { - debugLog("stripOAIGen") - replacedWithComplex := false - - // figure out referers of OAIGen definitions (doing it before the ref start mutating) - for _, r := range opts.flattenContext.newRefs { - updateRefParents(opts.Spec.references.allRefs, r) - } - - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", - k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) - - if !r.isOAIGen || len(r.parents) == 0 { - continue - } - - hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) - if err != nil { - return replacedWithComplex, err - } - - replacedWithComplex = replacedWithComplex || hasReplacedWithComplex - } - - debugLog("replacedWithComplex: %t", replacedWithComplex) - opts.Spec.reload() // re-analyze - - return replacedWithComplex, nil -} - -// updateRefParents updates all parents of an updated $ref -func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { - if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) - return - } - for k, v := range allRefs { - if r.path != v.String() { - continue - } - - found := false - for _, p := range r.parents { - if p == k { - found = true - - break - } - } - if !found { - r.parents = append(r.parents, k) - } - } -} - -func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { - replacedWithComplex := false - - pr := sortref.TopmostFirst(r.parents) - - // rewrite first parent schema in hierarchical then lexicographical order - debugLog("rewrite first parent %s with schema", pr[0]) - if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { - // update parent in ref index entry - debugLog("update parent entry: %s", pr[0]) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - - // rewrite other parents to point to first parent - if len(pr) > 1 { - for _, p := range pr[1:] { - replacingRef := spec.MustCreateRef(pr[0]) - - // set complex when replacing ref is an anonymous jsonpointer: further processing may be required - replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath - debugLog("rewrite parent with ref: %s", replacingRef.String()) - - // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). - // Those are stripped later on. - if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { - // update parent in ref index - debugLog("update parent entry: %s", p) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - } - } - - // remove OAIGen definition - debugLog("removing definition %s", path.Base(r.path)) - delete(opts.Swagger().Definitions, path.Base(r.path)) - - // propagate changes in ref index for keys which have this one as a parent - for kk, value := range opts.flattenContext.newRefs { - if kk == k || !value.isOAIGen || value.resolved { - continue - } - - found := false - newParents := make([]string, 0, len(value.parents)) - for _, parent := range value.parents { - switch { - case parent == r.path: - found = true - parent = pr[0] - case strings.HasPrefix(parent, r.path+"/"): - found = true - parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) - } - - newParents = append(newParents, parent) - } - - if found { - value.parents = newParents - } - } - - // mark naming conflict as resolved - debugLog("marking naming conflict resolved for key: %s", r.key) - opts.flattenContext.newRefs[r.key].isOAIGen = false - opts.flattenContext.newRefs[r.key].resolved = true - - // determine if the previous substitution did inline a complex schema - if r.schema != nil && r.schema.Ref.String() == "" { // inline schema - asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return false, err - } - - debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) - replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex() - } - - return replacedWithComplex, nil -} - -// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. -// -// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. -// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). -func namePointers(opts *FlattenOpts) error { - debugLog("name pointers") - - refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) - for k, ref := range opts.Spec.references.allRefs { - if path.Dir(ref.String()) == definitionsPath { - // this a ref to a top-level definition: ok - continue - } - - result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) - if err != nil { - return fmt.Errorf("at %s, %w", k, err) - } - - replacingRef := result.Ref - sch := result.Schema - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) - } - - debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) - refsToReplace[k] = SchemaRef{ - Name: k, // caller - Ref: replacingRef, // called - Schema: sch, - TopLevel: path.Dir(replacingRef.String()) == definitionsPath, - } - } - - depthFirst := sortref.DepthFirst(refsToReplace) - namer := &InlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: operations.AllOpRefsByRef(opts.Spec, nil), - flattenContext: opts.flattenContext, - opts: opts, - } - - for _, key := range depthFirst { - v := refsToReplace[key] - // update current replacement, which may have been updated by previous changes of deeper elements - result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) - if erd != nil { - return fmt.Errorf("at %s, %w", key, erd) - } - - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) - } - - v.Ref = result.Ref - v.Schema = result.Schema - v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath - debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) - - if v.TopLevel { - debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) - - // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref - if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { - return err - } - - continue - } - - if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { - return err - } - } - - opts.Spec.reload() // re-analyze - - return nil -} - -func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { - // this is a JSON pointer to an anonymous document (internal or external): - // create a definition for this schema when: - // - it is a complex schema - // - or it is pointed by more than one $ref (i.e. expresses commonality) - // otherwise, expand the pointer (single reference to a simple type) - // - // The named definition for this follows the target's key, not the caller's - debugLog("namePointers at %s for %s", key, v.Ref.String()) - - // qualify the expanded schema - asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if ers != nil { - return fmt.Errorf("schema analysis [%s]: %w", key, ers) - } - callers := make([]string, 0, 64) - - debugLog("looking for callers") - - an := New(opts.Swagger()) - for k, w := range an.references.allRefs { - r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) - if err != nil { - return fmt.Errorf("at %s, %w", key, err) - } - - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) - } - - if r.Ref.String() == v.Ref.String() { - callers = append(callers, k) - } - } - - debugLog("callers for %s: %d", v.Ref.String(), len(callers)) - if len(callers) == 0 { - // has already been updated and resolved - return nil - } - - parts := sortref.KeyParts(v.Ref.String()) - debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) - - // identifying edge case when the namer did nothing because we point to a non-schema object - // no definition is created and we expand the $ref for all callers - if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { - debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) - if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { - return err - } - - // regular case: we named the $ref as a definition, and we move all callers to this new $ref - for _, caller := range callers { - if caller == key { - continue - } - - // move $ref for next to resolve - debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) - c := refsToReplace[caller] - c.Ref = v.Ref - refsToReplace[caller] = c - } - - return nil - } - - debugLog("expand JSON pointer for key=%s", key) - - if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { - return err - } - // NOTE: there is no other caller to update - - return nil -} diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go deleted file mode 100644 index 3ad2ccfbf..000000000 --- a/vendor/github.com/go-openapi/analysis/flatten_name.go +++ /dev/null @@ -1,293 +0,0 @@ -package analysis - -import ( - "fmt" - "path" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/operations" - "github.com/go-openapi/analysis/internal/flatten/replace" - "github.com/go-openapi/analysis/internal/flatten/schutils" - "github.com/go-openapi/analysis/internal/flatten/sortref" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// InlineSchemaNamer finds a new name for an inlined type -type InlineSchemaNamer struct { - Spec *spec.Swagger - Operations map[string]operations.OpRef - flattenContext *context - opts *FlattenOpts -} - -// Name yields a new name for the inline schema -func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { - debugLog("naming inlined schema at %s", key) - - parts := sortref.KeyParts(key) - for _, name := range namesFromKey(parts, aschema, isn.Operations) { - if name == "" { - continue - } - - // create unique name - newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) - - // clone schema - sch := schutils.Clone(schema) - - // replace values on schema - if err := replace.RewriteSchemaToRef(isn.Spec, key, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) - } - - // rewrite any dependent $ref pointing to this place, - // when not already pointing to a top-level definition. - // - // NOTE: this is important if such referers use arbitrary JSON pointers. - an := New(isn.Spec) - for k, v := range an.references.allRefs { - r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) - if erd != nil { - return fmt.Errorf("at %s, %w", k, erd) - } - - if isn.opts.flattenContext != nil { - isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) - } - - if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { - continue - } - - debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) - - // rewrite $ref to the new target - if err := replace.UpdateRef(isn.Spec, k, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - } - - // NOTE: this extension is currently not used by go-swagger (provided for information only) - sch.AddExtension("x-go-gen-location", GenLocation(parts)) - - // save cloned schema to definitions - schutils.Save(isn.Spec, newName, sch) - - // keep track of created refs - if isn.flattenContext == nil { - continue - } - - debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) - resolved := false - - if _, ok := isn.flattenContext.newRefs[key]; ok { - resolved = isn.flattenContext.newRefs[key].resolved - } - - isn.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: path.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - return nil -} - -// uniqifyName yields a unique name for a definition -func uniqifyName(definitions spec.Definitions, name string) (string, bool) { - isOAIGen := false - if name == "" { - name = "oaiGen" - isOAIGen = true - } - - if len(definitions) == 0 { - return name, isOAIGen - } - - unq := true - for k := range definitions { - if strings.EqualFold(k, name) { - unq = false - - break - } - } - - if unq { - return name, isOAIGen - } - - name += "OAIGen" - isOAIGen = true - var idx int - unique := name - _, known := definitions[unique] - - for known { - idx++ - unique = fmt.Sprintf("%s%d", name, idx) - _, known = definitions[unique] - } - - return unique, isOAIGen -} - -func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { - var ( - baseNames [][]string - startIndex int - ) - - if parts.IsOperation() { - baseNames, startIndex = namesForOperation(parts, operations) - } - - // definitions - if parts.IsDefinition() { - baseNames, startIndex = namesForDefinition(parts) - } - - result := make([]string, 0, len(baseNames)) - for _, segments := range baseNames { - nm := parts.BuildName(segments, startIndex, partAdder(aschema)) - if nm == "" { - continue - } - - result = append(result, nm) - } - sort.Strings(result) - - return result -} - -func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { - var ( - baseNames [][]string - startIndex int - ) - - piref := parts.PathItemRef() - if piref.String() != "" && parts.IsOperationParam() { - if op, ok := operations[piref.String()]; ok { - startIndex = 5 - baseNames = append(baseNames, []string{op.ID, "params", "body"}) - } - } else if parts.IsSharedOperationParam() { - pref := parts.PathRef() - for k, v := range operations { - if strings.HasPrefix(k, pref.String()) { - startIndex = 4 - baseNames = append(baseNames, []string{v.ID, "params", "body"}) - } - } - } - - return baseNames, startIndex -} - -func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { - var ( - baseNames [][]string - startIndex int - ) - - // params - if parts.IsOperationParam() || parts.IsSharedOperationParam() { - baseNames, startIndex = namesForParam(parts, operations) - } - - // responses - if parts.IsOperationResponse() { - piref := parts.PathItemRef() - if piref.String() != "" { - if op, ok := operations[piref.String()]; ok { - startIndex = 6 - baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) - } - } - } - - return baseNames, startIndex -} - -func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { - nm := parts.DefinitionName() - if nm != "" { - return [][]string{{parts.DefinitionName()}}, 2 - } - - return [][]string{}, 0 -} - -// partAdder knows how to interpret a schema when it comes to build a name from parts -func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { - return func(part string) []string { - segments := make([]string, 0, 2) - - if part == "items" || part == "additionalItems" { - if aschema.IsTuple || aschema.IsTupleWithExtra { - segments = append(segments, "tuple") - } else { - segments = append(segments, "items") - } - - if part == "additionalItems" { - segments = append(segments, part) - } - - return segments - } - - segments = append(segments, part) - - return segments - } -} - -func nameFromRef(ref spec.Ref) string { - u := ref.GetURL() - if u.Fragment != "" { - return swag.ToJSONName(path.Base(u.Fragment)) - } - - if u.Path != "" { - bn := path.Base(u.Path) - if bn != "" && bn != "/" { - ext := path.Ext(bn) - if ext != "" { - return swag.ToJSONName(bn[:len(bn)-len(ext)]) - } - - return swag.ToJSONName(bn) - } - } - - return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " ")) -} - -// GenLocation indicates from which section of the specification (models or operations) a definition has been created. -// -// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided -// for information only. -func GenLocation(parts sortref.SplitKey) string { - switch { - case parts.IsOperation(): - return "operations" - case parts.IsDefinition(): - return "models" - default: - return "" - } -} diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go deleted file mode 100644 index c5bb97b0a..000000000 --- a/vendor/github.com/go-openapi/analysis/flatten_options.go +++ /dev/null @@ -1,78 +0,0 @@ -package analysis - -import ( - "log" - - "github.com/go-openapi/spec" -) - -// FlattenOpts configuration for flattening a swagger specification. -// -// The BasePath parameter is used to locate remote relative $ref found in the specification. -// This path is a file: it points to the location of the root document and may be either a local -// file path or a URL. -// -// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") -// found in the spec are searched from the current working directory. -type FlattenOpts struct { - Spec *Spec // The analyzed spec to work with - flattenContext *context // Internal context to track flattening activity - - BasePath string // The location of the root document for this spec to resolve relative $ref - - // Flattening options - Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) - Minimal bool // When true, do not decompose complex structures such as allOf - Verbose bool // enable some reporting on possible name conflicts detected - RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening - ContinueOnError bool // Continue when spec expansion issues are found - - /* Extra keys */ - _ struct{} // require keys -} - -// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. -func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { - return &spec.ExpandOptions{ - RelativeBase: f.BasePath, - SkipSchemas: skipSchemas, - ContinueOnError: f.ContinueOnError, - } -} - -// Swagger gets the swagger specification for this flatten operation -func (f *FlattenOpts) Swagger() *spec.Swagger { - return f.Spec.spec -} - -// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting -// from flattening a spec -func (f *FlattenOpts) croak() { - if !f.Verbose { - return - } - - reported := make(map[string]bool, len(f.flattenContext.newRefs)) - for _, v := range f.Spec.references.allRefs { - // warns about duplicate handling - for _, r := range f.flattenContext.newRefs { - if r.isOAIGen && r.path == v.String() { - reported[r.newName] = true - } - } - } - - for k := range reported { - log.Printf("warning: duplicate flattened definition name resolved as %s", k) - } - - // warns about possible type mismatches - uniqueMsg := make(map[string]bool) - for _, msg := range f.flattenContext.warnings { - if _, ok := uniqueMsg[msg]; ok { - continue - } - log.Printf("warning: %s", msg) - uniqueMsg[msg] = true - } -} diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go deleted file mode 100644 index ec0fec022..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package debug - -import ( - "fmt" - "log" - "os" - "path/filepath" - "runtime" -) - -var ( - output = os.Stdout -) - -// GetLogger provides a prefix debug logger -func GetLogger(prefix string, debug bool) func(string, ...interface{}) { - if debug { - logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags) - - return func(msg string, args ...interface{}) { - _, file1, pos1, _ := runtime.Caller(1) - logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } - } - - return func(msg string, args ...interface{}) {} -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go deleted file mode 100644 index 8c9df0580..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go +++ /dev/null @@ -1,87 +0,0 @@ -package normalize - -import ( - "net/url" - "path" - "path/filepath" - "strings" - - "github.com/go-openapi/spec" -) - -// RebaseRef rebases a remote ref relative to a base ref. -// -// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func RebaseRef(baseRef string, ref string) string { - baseRef, _ = url.PathUnescape(baseRef) - ref, _ = url.PathUnescape(ref) - - if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { - return ref - } - - parts := strings.Split(ref, "#") - - baseParts := strings.Split(baseRef, "#") - baseURL, _ := url.Parse(baseParts[0]) - if strings.HasPrefix(ref, "#") { - if baseURL.Host == "" { - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - refURL, _ := url.Parse(parts[0]) - if refURL.Host != "" || filepath.IsAbs(parts[0]) { - // not rebasing an absolute path - return ref - } - - // there is a relative path - var basePath string - if baseURL.Host != "" { - // when there is a host, standard URI rules apply (with "/") - baseURL.Path = path.Dir(baseURL.Path) - baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) - - return baseURL.String() - } - - // this is a local relative path - // basePart[0] and parts[0] are local filesystem directories/files - basePath = filepath.Dir(baseParts[0]) - relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) - if len(parts) > 1 { - return strings.Join([]string{relPath, parts[1]}, "#") - } - - return relPath -} - -// Path renders absolute path on remote file refs -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func Path(ref spec.Ref, basePath string) string { - uri, _ := url.PathUnescape(ref.String()) - if ref.HasFragmentOnly || filepath.IsAbs(uri) { - return uri - } - - refURL, _ := url.Parse(uri) - if refURL.Host != "" { - return uri - } - - parts := strings.Split(uri, "#") - // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage - parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) - - return strings.Join(parts, "#") -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go deleted file mode 100644 index 7f3a2b871..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go +++ /dev/null @@ -1,90 +0,0 @@ -package operations - -import ( - "path" - "sort" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// AllOpRefsByRef returns an index of sortable operations -func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { - return OpRefsByRef(GatherOperations(specDoc, operationIDs)) -} - -// OpRefsByRef indexes a map of sortable operations -func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { - result := make(map[string]OpRef, len(oprefs)) - for _, v := range oprefs { - result[v.Ref.String()] = v - } - - return result -} - -// OpRef is an indexable, sortable operation -type OpRef struct { - Method string - Path string - Key string - ID string - Op *spec.Operation - Ref spec.Ref -} - -// OpRefs is a sortable collection of operations -type OpRefs []OpRef - -func (o OpRefs) Len() int { return len(o) } -func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } - -// Provider knows how to collect operations from a spec -type Provider interface { - Operations() map[string]map[string]*spec.Operation -} - -// GatherOperations builds a map of sorted operations from a spec -func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { - var oprefs OpRefs - - for method, pathItem := range specDoc.Operations() { - for pth, operation := range pathItem { - vv := *operation - oprefs = append(oprefs, OpRef{ - Key: swag.ToGoName(strings.ToLower(method) + " " + pth), - Method: method, - Path: pth, - ID: vv.ID, - Op: &vv, - Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), - }) - } - } - - sort.Sort(oprefs) - - operations := make(map[string]OpRef) - for _, opr := range oprefs { - nm := opr.ID - if nm == "" { - nm = opr.Key - } - - oo, found := operations[nm] - if found && oo.Method != opr.Method && oo.Path != opr.Path { - nm = opr.Key - } - - if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) { - opr.ID = nm - opr.Op.ID = nm - operations[nm] = opr - } - } - - return operations -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go deleted file mode 100644 index 26c2a05a3..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go +++ /dev/null @@ -1,434 +0,0 @@ -package replace - -import ( - "fmt" - "net/url" - "os" - "path" - "strconv" - - "github.com/go-openapi/analysis/internal/debug" - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const definitionsPath = "#/definitions" - -var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") - -// RewriteSchemaToRef replaces a schema with a Ref -func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { - debugLog("rewriting schema to ref for %s with %s", key, ref.String()) - _, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - return rewriteParentRef(sp, key, ref) - - case spec.Schema: - return rewriteParentRef(sp, key, ref) - - case *spec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - } - - case *spec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - } - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { - parent, entry, pvalue, err := getParentFromKey(sp, key) - if err != nil { - return err - } - - debugLog("rewriting holder for %T", pvalue) - switch container := pvalue.(type) { - case spec.Response: - if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { - return err - } - - case *spec.Response: - container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.Responses: - statusCode, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - resp := container.StatusCodeResponses[statusCode] - resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container.StatusCodeResponses[statusCode] = resp - - case map[string]spec.Response: - resp := container[entry] - resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[entry] = resp - - case spec.Parameter: - if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { - return err - } - - case map[string]spec.Parameter: - param := container[entry] - param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[entry] = param - - case []spec.Parameter: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - param := container[idx] - param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[idx] = param - - case spec.Definitions: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case map[string]spec.Schema: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case spec.SchemaProperties: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) - } - - return nil -} - -// getPointerFromKey retrieves the content of the JSON pointer "key" -func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - if key == "#/" { - return "", sp, nil - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := url.PathUnescape(key[1:]) - ptr, err := jsonpointer.New(pth) - if err != nil { - return "", nil, err - } - - value, _, err := ptr.Get(sp) - if err != nil { - debugLog("error when getting key: %s with path: %s", key, pth) - - return "", nil, err - } - - return pth, value, nil -} - -// getParentFromKey retrieves the container of the JSON pointer "key" -func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := url.PathUnescape(key[1:]) - - parent, entry := path.Dir(pth), path.Base(pth) - debugLog("getting schema holder at: %s, with entry: %s", parent, entry) - - pptr, err := jsonpointer.New(parent) - if err != nil { - return "", "", nil, err - } - pvalue, _, err := pptr.Get(sp) - if err != nil { - return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err) - } - - return parent, entry, pvalue, nil -} - -// UpdateRef replaces a ref by another one -func UpdateRef(sp interface{}, key string, ref spec.Ref) error { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - debugLog("updating ref for %s with %s", key, ref.String()) - pth, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - refable.Ref = ref - case *spec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case *spec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case spec.Schema: - debugLog("rewriting holder for %T", refable) - _, entry, pvalue, erp := getParentFromKey(sp, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case spec.Definitions: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case map[string]spec.Schema: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case spec.SchemaProperties: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled container type at %s: %T", key, value) - } - - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) -func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { - debugLog("updating ref for %s with schema", key) - pth, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - *refable = *sch - case spec.Schema: - _, entry, pvalue, erp := getParentFromKey(sp, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case spec.Definitions: - container[entry] = *sch - - case map[string]spec.Schema: - container[entry] = *sch - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container[idx] = *sch - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container.Schemas[idx] = *sch - - case spec.SchemaProperties: - container[entry] = *sch - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) - } - case *spec.SchemaOrArray: - *refable.Schema = *sch - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - case *spec.SchemaOrBool: - *refable.Schema = *sch - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// DeepestRefResult holds the results from DeepestRef analysis -type DeepestRefResult struct { - Ref spec.Ref - Schema *spec.Schema - Warnings []string -} - -// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. -// - if no definition is found, returns the deepest ref. -// - pointers to external files are expanded -// -// NOTE: all external $ref's are assumed to be already expanded at this stage. -func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { - if !ref.HasFragmentOnly { - // we found an external $ref, which is odd at this stage: - // do nothing on external $refs - return &DeepestRefResult{Ref: ref}, nil - } - - currentRef := ref - visited := make(map[string]bool, 64) - warnings := make([]string, 0, 2) - -DOWNREF: - for currentRef.String() != "" { - if path.Dir(currentRef.String()) == definitionsPath { - // this is a top-level definition: stop here and return this ref - return &DeepestRefResult{Ref: currentRef}, nil - } - - if _, beenThere := visited[currentRef.String()]; beenThere { - return nil, - fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) - } - - visited[currentRef.String()] = true - value, _, err := currentRef.GetPointer().Get(sp) - if err != nil { - return nil, err - } - - switch refable := value.(type) { - case *spec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case spec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case *spec.SchemaOrArray: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case *spec.SchemaOrBool: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case spec.Response: - // a pointer points to a schema initially marshalled in responses section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema spec.Schema - - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { - return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - } - warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - case spec.Parameter: - // a pointer points to a schema initially marshalled in parameters section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema spec.Schema - if err := asSchema.UnmarshalJSON(asJSON); err != nil { - return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - } - - warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - default: - return nil, - fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", - currentRef.String(), value) - } - } - - // assess what schema we're ending with - sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) - if erv != nil { - return nil, erv - } - - if sch == nil { - return nil, fmt.Errorf("no schema found at %s", currentRef.String()) - } - - return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go deleted file mode 100644 index 4590236e6..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package schutils provides tools to save or clone a schema -// when flattening a spec. -package schutils - -import ( - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// Save registers a schema as an entry in spec #/definitions -func Save(sp *spec.Swagger, name string, schema *spec.Schema) { - if schema == nil { - return - } - - if sp.Definitions == nil { - sp.Definitions = make(map[string]spec.Schema, 150) - } - - sp.Definitions[name] = *schema -} - -// Clone deep-clones a schema -func Clone(schema *spec.Schema) *spec.Schema { - var sch spec.Schema - _ = swag.FromDynamicJSON(schema, &sch) - - return &sch -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go deleted file mode 100644 index 18e552ead..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go +++ /dev/null @@ -1,201 +0,0 @@ -package sortref - -import ( - "net/http" - "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const ( - paths = "paths" - responses = "responses" - parameters = "parameters" - definitions = "definitions" -) - -var ( - ignoredKeys map[string]struct{} - validMethods map[string]struct{} -) - -func init() { - ignoredKeys = map[string]struct{}{ - "schema": {}, - "properties": {}, - "not": {}, - "anyOf": {}, - "oneOf": {}, - } - - validMethods = map[string]struct{}{ - "GET": {}, - "HEAD": {}, - "OPTIONS": {}, - "PATCH": {}, - "POST": {}, - "PUT": {}, - "DELETE": {}, - } -} - -// Key represent a key item constructed from /-separated segments -type Key struct { - Segments int - Key string -} - -// Keys is a sortable collable collection of Keys -type Keys []Key - -func (k Keys) Len() int { return len(k) } -func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k Keys) Less(i, j int) bool { - return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) -} - -// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. -func KeyParts(key string) SplitKey { - var res []string - for _, part := range strings.Split(key[1:], "/") { - if part != "" { - res = append(res, jsonpointer.Unescape(part)) - } - } - - return res -} - -// SplitKey holds of the parts of a /-separated key, soi that their location may be determined. -type SplitKey []string - -// IsDefinition is true when the split key is in the #/definitions section of a spec -func (s SplitKey) IsDefinition() bool { - return len(s) > 1 && s[0] == definitions -} - -// DefinitionName yields the name of the definition -func (s SplitKey) DefinitionName() string { - if !s.IsDefinition() { - return "" - } - - return s[1] -} - -func (s SplitKey) isKeyName(i int) bool { - if i <= 0 { - return false - } - - count := 0 - for idx := i - 1; idx > 0; idx-- { - if s[idx] != "properties" { - break - } - count++ - } - - return count%2 != 0 -} - -// PartAdder know how to construct the components of a new name -type PartAdder func(string) []string - -// BuildName builds a name from segments -func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { - for i, part := range s[startIndex:] { - if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { - segments = append(segments, adder(part)...) - } - } - - return strings.Join(segments, " ") -} - -// IsOperation is true when the split key is in the operations section -func (s SplitKey) IsOperation() bool { - return len(s) > 1 && s[0] == paths -} - -// IsSharedOperationParam is true when the split key is in the parameters section of a path -func (s SplitKey) IsSharedOperationParam() bool { - return len(s) > 2 && s[0] == paths && s[2] == parameters -} - -// IsSharedParam is true when the split key is in the #/parameters section of a spec -func (s SplitKey) IsSharedParam() bool { - return len(s) > 1 && s[0] == parameters -} - -// IsOperationParam is true when the split key is in the parameters section of an operation -func (s SplitKey) IsOperationParam() bool { - return len(s) > 3 && s[0] == paths && s[3] == parameters -} - -// IsOperationResponse is true when the split key is in the responses section of an operation -func (s SplitKey) IsOperationResponse() bool { - return len(s) > 3 && s[0] == paths && s[3] == responses -} - -// IsSharedResponse is true when the split key is in the #/responses section of a spec -func (s SplitKey) IsSharedResponse() bool { - return len(s) > 1 && s[0] == responses -} - -// IsDefaultResponse is true when the split key is the default response for an operation -func (s SplitKey) IsDefaultResponse() bool { - return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" -} - -// IsStatusCodeResponse is true when the split key is an operation response with a status code -func (s SplitKey) IsStatusCodeResponse() bool { - isInt := func() bool { - _, err := strconv.Atoi(s[4]) - - return err == nil - } - - return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() -} - -// ResponseName yields either the status code or "Default" for a response -func (s SplitKey) ResponseName() string { - if s.IsStatusCodeResponse() { - code, _ := strconv.Atoi(s[4]) - - return http.StatusText(code) - } - - if s.IsDefaultResponse() { - return "Default" - } - - return "" -} - -// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} -func (s SplitKey) PathItemRef() spec.Ref { - if len(s) < 3 { - return spec.Ref{} - } - - pth, method := s[1], s[2] - if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { - return spec.Ref{} - } - - return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) -} - -// PathRef constructs a $ref object from a split key of the form /paths/{reference} -func (s SplitKey) PathRef() spec.Ref { - if !s.IsOperation() { - return spec.Ref{} - } - - return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go deleted file mode 100644 index 73243df87..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go +++ /dev/null @@ -1,141 +0,0 @@ -package sortref - -import ( - "reflect" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/normalize" - "github.com/go-openapi/spec" -) - -var depthGroupOrder = []string{ - "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", -} - -type mapIterator struct { - len int - mapIter *reflect.MapIter -} - -func (i *mapIterator) Next() bool { - return i.mapIter.Next() -} - -func (i *mapIterator) Len() int { - return i.len -} - -func (i *mapIterator) Key() string { - return i.mapIter.Key().String() -} - -func mustMapIterator(anyMap interface{}) *mapIterator { - val := reflect.ValueOf(anyMap) - - return &mapIterator{mapIter: val.MapRange(), len: val.Len()} -} - -// DepthFirst sorts a map of anything. It groups keys by category -// (shared params, op param, statuscode response, default response, definitions) -// sort groups internally by number of parts in the key and lexical names -// flatten groups into a single list of keys -func DepthFirst(in interface{}) []string { - iterator := mustMapIterator(in) - sorted := make([]string, 0, iterator.Len()) - grouped := make(map[string]Keys, iterator.Len()) - - for iterator.Next() { - k := iterator.Key() - split := KeyParts(k) - var pk string - - if split.IsSharedOperationParam() { - pk = "sharedOpParam" - } - if split.IsOperationParam() { - pk = "opParam" - } - if split.IsStatusCodeResponse() { - pk = "codeResponse" - } - if split.IsDefaultResponse() { - pk = "defaultResponse" - } - if split.IsDefinition() { - pk = "definition" - } - if split.IsSharedParam() { - pk = "sharedParam" - } - if split.IsSharedResponse() { - pk = "sharedResponse" - } - grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) - } - - for _, pk := range depthGroupOrder { - res := grouped[pk] - sort.Sort(res) - - for _, v := range res { - sorted = append(sorted, v.Key) - } - } - - return sorted -} - -// topMostRefs is able to sort refs by hierarchical then lexicographic order, -// yielding refs ordered breadth-first. -type topmostRefs []string - -func (k topmostRefs) Len() int { return len(k) } -func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k topmostRefs) Less(i, j int) bool { - li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) - if li == lj { - return k[i] < k[j] - } - - return li < lj -} - -// TopmostFirst sorts references by depth -func TopmostFirst(refs []string) []string { - res := topmostRefs(refs) - sort.Sort(res) - - return res -} - -// RefRevIdx is a reverse index for references -type RefRevIdx struct { - Ref spec.Ref - Keys []string -} - -// ReverseIndex builds a reverse index for references in schemas -func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { - collected := make(map[string]RefRevIdx) - for key, schRef := range schemas { - // normalize paths before sorting, - // so we get together keys that are from the same external file - normalizedPath := normalize.Path(schRef, basePath) - - entry, ok := collected[normalizedPath] - if ok { - entry.Keys = append(entry.Keys, key) - collected[normalizedPath] = entry - - continue - } - - collected[normalizedPath] = RefRevIdx{ - Ref: schRef, - Keys: []string{key}, - } - } - - return collected -} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go deleted file mode 100644 index b25305264..000000000 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "reflect" - - "github.com/go-openapi/spec" -) - -// Mixin modifies the primary swagger spec by adding the paths and -// definitions from the mixin specs. Top level parameters and -// responses from the mixins are also carried over. Operation id -// collisions are avoided by appending "Mixin" but only if -// needed. -// -// The following parts of primary are subject to merge, filling empty details -// - Info -// - BasePath -// - Host -// - ExternalDocs -// -// Consider calling FixEmptyResponseDescriptions() on the modified primary -// if you read them from storage and they are valid to start with. -// -// Entries in "paths", "definitions", "parameters" and "responses" are -// added to the primary in the order of the given mixins. If the entry -// already exists in primary it is skipped with a warning message. -// -// The count of skipped entries (from collisions) is returned so any -// deviation from the number expected can flag a warning in your build -// scripts. Carefully review the collisions before accepting them; -// consider renaming things if possible. -// -// No key normalization takes place (paths, type defs, -// etc). Ensure they are canonical if your downstream tools do -// key normalization of any form. -// -// Merging schemes (http, https), and consumers/producers do not account for -// collisions. -func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { - skipped := make([]string, 0, len(mixins)) - opIds := getOpIds(primary) - initPrimary(primary) - - for i, m := range mixins { - skipped = append(skipped, mergeSwaggerProps(primary, m)...) - - skipped = append(skipped, mergeConsumes(primary, m)...) - - skipped = append(skipped, mergeProduces(primary, m)...) - - skipped = append(skipped, mergeTags(primary, m)...) - - skipped = append(skipped, mergeSchemes(primary, m)...) - - skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) - - skipped = append(skipped, mergeSecurityRequirements(primary, m)...) - - skipped = append(skipped, mergeDefinitions(primary, m)...) - - // merging paths requires a map of operationIDs to work with - skipped = append(skipped, mergePaths(primary, m, opIds, i)...) - - skipped = append(skipped, mergeParameters(primary, m)...) - - skipped = append(skipped, mergeResponses(primary, m)...) - } - - return skipped -} - -// getOpIds extracts all the paths..operationIds from the given -// spec and returns them as the keys in a map with 'true' values. -func getOpIds(s *spec.Swagger) map[string]bool { - rv := make(map[string]bool) - if s.Paths == nil { - return rv - } - - for _, v := range s.Paths.Paths { - piops := pathItemOps(v) - - for _, op := range piops { - rv[op.ID] = true - } - } - - return rv -} - -func pathItemOps(p spec.PathItem) []*spec.Operation { - var rv []*spec.Operation - rv = appendOp(rv, p.Get) - rv = appendOp(rv, p.Put) - rv = appendOp(rv, p.Post) - rv = appendOp(rv, p.Delete) - rv = appendOp(rv, p.Head) - rv = appendOp(rv, p.Patch) - - return rv -} - -func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { - if op == nil { - return ops - } - - return append(ops, op) -} - -func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.SecurityDefinitions { - if _, exists := primary.SecurityDefinitions[k]; exists { - warn := fmt.Sprintf( - "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - - primary.SecurityDefinitions[k] = v - } - - return -} - -func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Security { - found := false - for _, vv := range primary.Security { - if reflect.DeepEqual(v, vv) { - found = true - - break - } - } - - if found { - warn := fmt.Sprintf( - "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) - skipped = append(skipped, warn) - - continue - } - primary.Security = append(primary.Security, v) - } - - return -} - -func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Definitions { - // assume name collisions represent IDENTICAL type. careful. - if _, exists := primary.Definitions[k]; exists { - warn := fmt.Sprintf( - "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Definitions[k] = v - } - - return -} - -func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { - if m.Paths != nil { - for k, v := range m.Paths.Paths { - if _, exists := primary.Paths.Paths[k]; exists { - warn := fmt.Sprintf( - "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - - // Swagger requires that operationIds be - // unique within a spec. If we find a - // collision we append "Mixin0" to the - // operatoinId we are adding, where 0 is mixin - // index. We assume that operationIds with - // all the proivded specs are already unique. - piops := pathItemOps(v) - for _, piop := range piops { - if opIds[piop.ID] { - piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) - } - opIds[piop.ID] = true - } - primary.Paths.Paths[k] = v - } - } - - return -} - -func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Parameters { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Parameters[k]; exists { - warn := fmt.Sprintf( - "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Parameters[k] = v - } - - return -} - -func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Responses { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Responses[k]; exists { - warn := fmt.Sprintf( - "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Responses[k] = v - } - - return skipped -} - -func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Consumes { - found := false - for _, vv := range primary.Consumes { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Consumes = append(primary.Consumes, v) - } - - return []string{} -} - -func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Produces { - found := false - for _, vv := range primary.Produces { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Produces = append(primary.Produces, v) - } - - return []string{} -} - -func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Tags { - found := false - for _, vv := range primary.Tags { - if v.Name == vv.Name { - found = true - - break - } - } - - if found { - warn := fmt.Sprintf( - "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", - v.Name, - ) - skipped = append(skipped, warn) - - continue - } - - primary.Tags = append(primary.Tags, v) - } - - return -} - -func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Schemes { - found := false - for _, vv := range primary.Schemes { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Schemes = append(primary.Schemes, v) - } - - return []string{} -} - -func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { - var skipped, skippedInfo, skippedDocs []string - - primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) - - // merging details in swagger top properties - if primary.Host == "" { - primary.Host = m.Host - } - - if primary.BasePath == "" { - primary.BasePath = m.BasePath - } - - if primary.Info == nil { - primary.Info = m.Info - } else if m.Info != nil { - skippedInfo = mergeInfo(primary.Info, m.Info) - skipped = append(skipped, skippedInfo...) - } - - if primary.ExternalDocs == nil { - primary.ExternalDocs = m.ExternalDocs - } else if m != nil { - skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) - skipped = append(skipped, skippedDocs...) - } - - return skipped -} - -// nolint: unparam -func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { - if primary.Description == "" { - primary.Description = m.Description - } - - if primary.URL == "" { - primary.URL = m.URL - } - - return nil -} - -func mergeInfo(primary *spec.Info, m *spec.Info) []string { - var sk, skipped []string - - primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) - skipped = append(skipped, sk...) - - if primary.Description == "" { - primary.Description = m.Description - } - - if primary.Title == "" { - primary.Description = m.Description - } - - if primary.TermsOfService == "" { - primary.TermsOfService = m.TermsOfService - } - - if primary.Version == "" { - primary.Version = m.Version - } - - if primary.Contact == nil { - primary.Contact = m.Contact - } else if m.Contact != nil { - var csk []string - primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) - skipped = append(skipped, csk...) - - if primary.Contact.Name == "" { - primary.Contact.Name = m.Contact.Name - } - - if primary.Contact.URL == "" { - primary.Contact.URL = m.Contact.URL - } - - if primary.Contact.Email == "" { - primary.Contact.Email = m.Contact.Email - } - } - - if primary.License == nil { - primary.License = m.License - } else if m.License != nil { - var lsk []string - primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) - skipped = append(skipped, lsk...) - - if primary.License.Name == "" { - primary.License.Name = m.License.Name - } - - if primary.License.URL == "" { - primary.License.URL = m.License.URL - } - } - - return skipped -} - -func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { - if primary == nil { - result = m - - return - } - - if m == nil { - result = primary - - return - } - - result = primary - for k, v := range m { - if _, found := primary[k]; found { - skipped = append(skipped, k) - - continue - } - - primary[k] = v - } - - return -} - -func initPrimary(primary *spec.Swagger) { - if primary.SecurityDefinitions == nil { - primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) - } - - if primary.Security == nil { - primary.Security = make([]map[string][]string, 0, 10) - } - - if primary.Produces == nil { - primary.Produces = make([]string, 0, 10) - } - - if primary.Consumes == nil { - primary.Consumes = make([]string, 0, 10) - } - - if primary.Tags == nil { - primary.Tags = make([]spec.Tag, 0, 10) - } - - if primary.Schemes == nil { - primary.Schemes = make([]string, 0, 10) - } - - if primary.Paths == nil { - primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} - } - - if primary.Paths.Paths == nil { - primary.Paths.Paths = make(map[string]spec.PathItem) - } - - if primary.Definitions == nil { - primary.Definitions = make(spec.Definitions) - } - - if primary.Parameters == nil { - primary.Parameters = make(map[string]spec.Parameter) - } - - if primary.Responses == nil { - primary.Responses = make(map[string]spec.Response) - } -} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go deleted file mode 100644 index fc055095c..000000000 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ /dev/null @@ -1,256 +0,0 @@ -package analysis - -import ( - "fmt" - - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" -) - -// SchemaOpts configures the schema analyzer -type SchemaOpts struct { - Schema *spec.Schema - Root interface{} - BasePath string - _ struct{} -} - -// Schema analysis, will classify the schema according to known -// patterns. -func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { - if opts.Schema == nil { - return nil, fmt.Errorf("no schema to analyze") - } - - a := &AnalyzedSchema{ - schema: opts.Schema, - root: opts.Root, - basePath: opts.BasePath, - } - - a.initializeFlags() - a.inferKnownType() - a.inferEnum() - a.inferBaseType() - - if err := a.inferMap(); err != nil { - return nil, err - } - if err := a.inferArray(); err != nil { - return nil, err - } - - a.inferTuple() - - if err := a.inferFromRef(); err != nil { - return nil, err - } - - a.inferSimpleSchema() - - return a, nil -} - -// AnalyzedSchema indicates what the schema represents -type AnalyzedSchema struct { - schema *spec.Schema - root interface{} - basePath string - - hasProps bool - hasAllOf bool - hasItems bool - hasAdditionalProps bool - hasAdditionalItems bool - hasRef bool - - IsKnownType bool - IsSimpleSchema bool - IsArray bool - IsSimpleArray bool - IsMap bool - IsSimpleMap bool - IsExtendedObject bool - IsTuple bool - IsTupleWithExtra bool - IsBaseType bool - IsEnum bool -} - -// Inherits copies value fields from other onto this schema -func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { - if other == nil { - return - } - a.hasProps = other.hasProps - a.hasAllOf = other.hasAllOf - a.hasItems = other.hasItems - a.hasAdditionalItems = other.hasAdditionalItems - a.hasAdditionalProps = other.hasAdditionalProps - a.hasRef = other.hasRef - - a.IsKnownType = other.IsKnownType - a.IsSimpleSchema = other.IsSimpleSchema - a.IsArray = other.IsArray - a.IsSimpleArray = other.IsSimpleArray - a.IsMap = other.IsMap - a.IsSimpleMap = other.IsSimpleMap - a.IsExtendedObject = other.IsExtendedObject - a.IsTuple = other.IsTuple - a.IsTupleWithExtra = other.IsTupleWithExtra - a.IsBaseType = other.IsBaseType - a.IsEnum = other.IsEnum -} - -func (a *AnalyzedSchema) inferFromRef() error { - if a.hasRef { - sch := new(spec.Schema) - sch.Ref = a.schema.Ref - err := spec.ExpandSchema(sch, a.root, nil) - if err != nil { - return err - } - rsch, err := Schema(SchemaOpts{ - Schema: sch, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - // NOTE(fredbi): currently the only cause for errors is - // unresolved ref. Since spec.ExpandSchema() expands the - // schema recursively, there is no chance to get there, - // until we add more causes for error in this schema analysis. - return err - } - a.inherits(rsch) - } - - return nil -} - -func (a *AnalyzedSchema) inferSimpleSchema() { - a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap -} - -func (a *AnalyzedSchema) inferKnownType() { - tpe := a.schema.Type - format := a.schema.Format - a.IsKnownType = tpe.Contains("boolean") || - tpe.Contains("integer") || - tpe.Contains("number") || - tpe.Contains("string") || - (format != "" && strfmt.Default.ContainsName(format)) || - (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) -} - -func (a *AnalyzedSchema) inferMap() error { - if !a.isObjectType() { - return nil - } - - hasExtra := a.hasProps || a.hasAllOf - a.IsMap = a.hasAdditionalProps && !hasExtra - a.IsExtendedObject = a.hasAdditionalProps && hasExtra - - if !a.IsMap { - return nil - } - - // maps - if a.schema.AdditionalProperties.Schema != nil { - msch, err := Schema(SchemaOpts{ - Schema: a.schema.AdditionalProperties.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - a.IsSimpleMap = msch.IsSimpleSchema - } else if a.schema.AdditionalProperties.Allows { - a.IsSimpleMap = true - } - - return nil -} - -func (a *AnalyzedSchema) inferArray() error { - // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple - // (yes, even if the Items array contains only one element). - // arrays in JSON schema may be unrestricted (i.e no Items specified). - // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. - // - // NOTE: the spec package misses the distinction between: - // items: [] and items: {}, so we consider both arrays here. - a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) - if a.IsArray && a.hasItems { - if a.schema.Items.Schema != nil { - itsch, err := Schema(SchemaOpts{ - Schema: a.schema.Items.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - - a.IsSimpleArray = itsch.IsSimpleSchema - } - } - - if a.IsArray && !a.hasItems { - a.IsSimpleArray = true - } - - return nil -} - -func (a *AnalyzedSchema) inferTuple() { - tuple := a.hasItems && a.schema.Items.Schemas != nil - a.IsTuple = tuple && !a.hasAdditionalItems - a.IsTupleWithExtra = tuple && a.hasAdditionalItems -} - -func (a *AnalyzedSchema) inferBaseType() { - if a.isObjectType() { - a.IsBaseType = a.schema.Discriminator != "" - } -} - -func (a *AnalyzedSchema) inferEnum() { - a.IsEnum = len(a.schema.Enum) > 0 -} - -func (a *AnalyzedSchema) initializeFlags() { - a.hasProps = len(a.schema.Properties) > 0 - a.hasAllOf = len(a.schema.AllOf) > 0 - a.hasRef = a.schema.Ref.String() != "" - - a.hasItems = a.schema.Items != nil && - (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) - - a.hasAdditionalProps = a.schema.AdditionalProperties != nil && - (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) - - a.hasAdditionalItems = a.schema.AdditionalItems != nil && - (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) -} - -func (a *AnalyzedSchema) isObjectType() bool { - return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) -} - -func (a *AnalyzedSchema) isArrayType() bool { - return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) -} - -// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). -// -// Complex means the schema is any of: -// - a simple type (primitive) -// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) -// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will -// generate a definition) -func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { - return !a.IsSimpleSchema && !a.IsArray && !a.IsMap -} diff --git a/vendor/github.com/go-openapi/errors/.gitattributes b/vendor/github.com/go-openapi/errors/.gitattributes deleted file mode 100644 index a0717e4b3..000000000 --- a/vendor/github.com/go-openapi/errors/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/errors/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml deleted file mode 100644 index 449a43c2b..000000000 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ /dev/null @@ -1,46 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - - testpackage - - gci - - gofumpt - - goerr113 - - gomnd - - tparallel - - nestif - - godot - - errorlint - - paralleltest - - tparallel - - cyclop - - errname - - varnamelen diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/errors/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md deleted file mode 100644 index 4aac049e6..000000000 --- a/vendor/github.com/go-openapi/errors/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# OpenAPI errors - -[![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) -[![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) - -Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go deleted file mode 100644 index 854d6eec1..000000000 --- a/vendor/github.com/go-openapi/errors/api.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" - "reflect" - "strings" -) - -// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. -var DefaultHTTPCode = http.StatusUnprocessableEntity - -// Error represents a error interface all swagger framework errors implement -type Error interface { - error - Code() int32 -} - -type apiError struct { - code int32 - message string -} - -func (a *apiError) Error() string { - return a.message -} - -func (a *apiError) Code() int32 { - return a.code -} - -// MarshalJSON implements the JSON encoding interface -func (a apiError) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": a.code, - "message": a.message, - }) -} - -// New creates a new API error with a code and a message -func New(code int32, message string, args ...interface{}) Error { - if len(args) > 0 { - return &apiError{code, fmt.Sprintf(message, args...)} - } - return &apiError{code, message} -} - -// NotFound creates a new not found error -func NotFound(message string, args ...interface{}) Error { - if message == "" { - message = "Not found" - } - return New(http.StatusNotFound, fmt.Sprintf(message, args...)) -} - -// NotImplemented creates a new not implemented error -func NotImplemented(message string) Error { - return New(http.StatusNotImplemented, message) -} - -// MethodNotAllowedError represents an error for when the path matches but the method doesn't -type MethodNotAllowedError struct { - code int32 - Allowed []string - message string -} - -func (m *MethodNotAllowedError) Error() string { - return m.message -} - -// Code the error code -func (m *MethodNotAllowedError) Code() int32 { - return m.code -} - -// MarshalJSON implements the JSON encoding interface -func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": m.code, - "message": m.message, - "allowed": m.Allowed, - }) -} - -func errorAsJSON(err Error) []byte { - b, _ := json.Marshal(struct { - Code int32 `json:"code"` - Message string `json:"message"` - }{err.Code(), err.Error()}) - return b -} - -func flattenComposite(errs *CompositeError) *CompositeError { - var res []error - for _, er := range errs.Errors { - switch e := er.(type) { - case *CompositeError: - if len(e.Errors) > 0 { - flat := flattenComposite(e) - if len(flat.Errors) > 0 { - res = append(res, flat.Errors...) - } - } - default: - if e != nil { - res = append(res, e) - } - } - } - return CompositeValidationError(res...) -} - -// MethodNotAllowed creates a new method not allowed error -func MethodNotAllowed(requested string, allow []string) Error { - msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} -} - -// ServeError the error handler interface implementation -func ServeError(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Set("Content-Type", "application/json") - switch e := err.(type) { - case *CompositeError: - er := flattenComposite(e) - // strips composite errors to first element only - if len(er.Errors) > 0 { - ServeError(rw, r, er.Errors[0]) - } else { - // guard against empty CompositeError (invalid construct) - ServeError(rw, r, nil) - } - case *MethodNotAllowedError: - rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case Error: - value := reflect.ValueOf(e) - if value.Kind() == reflect.Ptr && value.IsNil() { - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - return - } - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case nil: - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - default: - rw.WriteHeader(http.StatusInternalServerError) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) - } - } -} - -func asHTTPCode(input int) int { - if input >= 600 { - return DefaultHTTPCode - } - return input -} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go deleted file mode 100644 index 0545b501b..000000000 --- a/vendor/github.com/go-openapi/errors/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "net/http" - -// Unauthenticated returns an unauthenticated error -func Unauthenticated(scheme string) Error { - return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) -} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go deleted file mode 100644 index 963d42740..000000000 --- a/vendor/github.com/go-openapi/errors/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - -Package errors provides an Error interface and several concrete types -implementing this interface to manage API errors and JSON-schema validation -errors. - -A middleware handler ServeError() is provided to serve the errors types -it defines. - -It is used throughout the various go-openapi toolkit libraries -(https://github.com/go-openapi). - -*/ -package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go deleted file mode 100644 index dfebe8f95..000000000 --- a/vendor/github.com/go-openapi/errors/headers.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" -) - -// Validation represents a failure of a precondition -type Validation struct { - code int32 - Name string - In string - Value interface{} - message string - Values []interface{} -} - -func (e *Validation) Error() string { - return e.message -} - -// Code the error code -func (e *Validation) Code() int32 { - return e.code -} - -// MarshalJSON implements the JSON encoding interface -func (e Validation) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": e.code, - "message": e.message, - "in": e.In, - "name": e.Name, - "value": e.Value, - "values": e.Values, - }) -} - -// ValidateName sets the name for a validation or updates it for a nested property -func (e *Validation) ValidateName(name string) *Validation { - if name != "" { - if e.Name == "" { - e.Name = name - e.message = name + e.message - } else { - e.Name = name + "." + e.Name - e.message = name + "." + e.message - } - } - return e -} - -const ( - contentTypeFail = `unsupported media type %q, only %v are allowed` - responseFormatFail = `unsupported media type requested, only %v are available` -) - -// InvalidContentType error for an invalid content type -func InvalidContentType(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusUnsupportedMediaType, - Name: "Content-Type", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(contentTypeFail, value, allowed), - } -} - -// InvalidResponseFormat error for an unacceptable response format request -func InvalidResponseFormat(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusNotAcceptable, - Name: "Accept", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(responseFormatFail, allowed), - } -} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go deleted file mode 100644 index c26ad484e..000000000 --- a/vendor/github.com/go-openapi/errors/middleware.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "bytes" - "fmt" - "strings" -) - -// APIVerificationFailed is an error that contains all the missing info for a mismatched section -// between the api registrations and the api spec -type APIVerificationFailed struct { - Section string `json:"section,omitempty"` - MissingSpecification []string `json:"missingSpecification,omitempty"` - MissingRegistration []string `json:"missingRegistration,omitempty"` -} - -// -func (v *APIVerificationFailed) Error() string { - buf := bytes.NewBuffer(nil) - - hasRegMissing := len(v.MissingRegistration) > 0 - hasSpecMissing := len(v.MissingSpecification) > 0 - - if hasRegMissing { - buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) - } - - if hasRegMissing && hasSpecMissing { - buf.WriteString("\n") - } - - if hasSpecMissing { - buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) - } - - return buf.String() -} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go deleted file mode 100644 index 5096e1ea7..000000000 --- a/vendor/github.com/go-openapi/errors/parsing.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" -) - -// ParseError represents a parsing error -type ParseError struct { - code int32 - Name string - In string - Value string - Reason error - message string -} - -func (e *ParseError) Error() string { - return e.message -} - -// Code returns the http status code for this error -func (e *ParseError) Code() int32 { - return e.code -} - -// MarshalJSON implements the JSON encoding interface -func (e ParseError) MarshalJSON() ([]byte, error) { - var reason string - if e.Reason != nil { - reason = e.Reason.Error() - } - return json.Marshal(map[string]interface{}{ - "code": e.code, - "message": e.message, - "in": e.In, - "name": e.Name, - "value": e.Value, - "reason": reason, - }) -} - -const ( - parseErrorTemplContent = `parsing %s %s from %q failed, because %s` - parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` -) - -// NewParseError creates a new parse error -func NewParseError(name, in, value string, reason error) *ParseError { - var msg string - if in == "" { - msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) - } else { - msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) - } - return &ParseError{ - code: 400, - Name: name, - In: in, - Value: value, - Reason: reason, - message: msg, - } -} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go deleted file mode 100644 index da5f6c78c..000000000 --- a/vendor/github.com/go-openapi/errors/schema.go +++ /dev/null @@ -1,611 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "strings" -) - -const ( - invalidType = "%s is an invalid type name" - typeFail = "%s in %s must be of type %s" - typeFailWithData = "%s in %s must be of type %s: %q" - typeFailWithError = "%s in %s must be of type %s, because: %s" - requiredFail = "%s in %s is required" - readOnlyFail = "%s in %s is readOnly" - tooLongMessage = "%s in %s should be at most %d chars long" - tooShortMessage = "%s in %s should be at least %d chars long" - patternFail = "%s in %s should match '%s'" - enumFail = "%s in %s should be one of %v" - multipleOfFail = "%s in %s should be a multiple of %v" - maxIncFail = "%s in %s should be less than or equal to %v" - maxExcFail = "%s in %s should be less than %v" - minIncFail = "%s in %s should be greater than or equal to %v" - minExcFail = "%s in %s should be greater than %v" - uniqueFail = "%s in %s shouldn't contain duplicates" - maxItemsFail = "%s in %s should have at most %d items" - minItemsFail = "%s in %s should have at least %d items" - typeFailNoIn = "%s must be of type %s" - typeFailWithDataNoIn = "%s must be of type %s: %q" - typeFailWithErrorNoIn = "%s must be of type %s, because: %s" - requiredFailNoIn = "%s is required" - readOnlyFailNoIn = "%s is readOnly" - tooLongMessageNoIn = "%s should be at most %d chars long" - tooShortMessageNoIn = "%s should be at least %d chars long" - patternFailNoIn = "%s should match '%s'" - enumFailNoIn = "%s should be one of %v" - multipleOfFailNoIn = "%s should be a multiple of %v" - maxIncFailNoIn = "%s should be less than or equal to %v" - maxExcFailNoIn = "%s should be less than %v" - minIncFailNoIn = "%s should be greater than or equal to %v" - minExcFailNoIn = "%s should be greater than %v" - uniqueFailNoIn = "%s shouldn't contain duplicates" - maxItemsFailNoIn = "%s should have at most %d items" - minItemsFailNoIn = "%s should have at least %d items" - noAdditionalItems = "%s in %s can't have additional items" - noAdditionalItemsNoIn = "%s can't have additional items" - tooFewProperties = "%s in %s should have at least %d properties" - tooFewPropertiesNoIn = "%s should have at least %d properties" - tooManyProperties = "%s in %s should have at most %d properties" - tooManyPropertiesNoIn = "%s should have at most %d properties" - unallowedProperty = "%s.%s in %s is a forbidden property" - unallowedPropertyNoIn = "%s.%s is a forbidden property" - failedAllPatternProps = "%s.%s in %s failed all pattern properties" - failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" - multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" -) - -// All code responses can be used to differentiate errors for different handling -// by the consuming program -const ( - // CompositeErrorCode remains 422 for backwards-compatibility - // and to separate it from validation errors with cause - CompositeErrorCode = 422 - // InvalidTypeCode is used for any subclass of invalid types - InvalidTypeCode = 600 + iota - RequiredFailCode - TooLongFailCode - TooShortFailCode - PatternFailCode - EnumFailCode - MultipleOfFailCode - MaxFailCode - MinFailCode - UniqueFailCode - MaxItemsFailCode - MinItemsFailCode - NoAdditionalItemsCode - TooFewPropertiesCode - TooManyPropertiesCode - UnallowedPropertyCode - FailedAllPatternPropsCode - MultipleOfMustBePositiveCode - ReadOnlyFailCode -) - -// CompositeError is an error that groups several errors together -type CompositeError struct { - Errors []error - code int32 - message string -} - -// Code for this error -func (c *CompositeError) Code() int32 { - return c.code -} - -func (c *CompositeError) Error() string { - if len(c.Errors) > 0 { - msgs := []string{c.message + ":"} - for _, e := range c.Errors { - msgs = append(msgs, e.Error()) - } - return strings.Join(msgs, "\n") - } - return c.message -} - -// MarshalJSON implements the JSON encoding interface -func (c CompositeError) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": c.code, - "message": c.message, - "errors": c.Errors, - }) -} - -// CompositeValidationError an error to wrap a bunch of other errors -func CompositeValidationError(errors ...error) *CompositeError { - return &CompositeError{ - code: CompositeErrorCode, - Errors: append([]error{}, errors...), - message: "validation failure list", - } -} - -// ValidateName recursively sets the name for all validations or updates them for nested properties -func (c *CompositeError) ValidateName(name string) *CompositeError { - for i, e := range c.Errors { - if ve, ok := e.(*Validation); ok { - c.Errors[i] = ve.ValidateName(name) - } else if ce, ok := e.(*CompositeError); ok { - c.Errors[i] = ce.ValidateName(name) - } - } - - return c -} - -// FailedAllPatternProperties an error for when the property doesn't match a pattern -func FailedAllPatternProperties(name, in, key string) *Validation { - msg := fmt.Sprintf(failedAllPatternProps, name, key, in) - if in == "" { - msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) - } - return &Validation{ - code: FailedAllPatternPropsCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// PropertyNotAllowed an error for when the property doesn't match a pattern -func PropertyNotAllowed(name, in, key string) *Validation { - msg := fmt.Sprintf(unallowedProperty, name, key, in) - if in == "" { - msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) - } - return &Validation{ - code: UnallowedPropertyCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// TooFewProperties an error for an object with too few properties -func TooFewProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooFewProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) - } - return &Validation{ - code: TooFewPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// TooManyProperties an error for an object with too many properties -func TooManyProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooManyProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) - } - return &Validation{ - code: TooManyPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// AdditionalItemsNotAllowed an error for invalid additional items -func AdditionalItemsNotAllowed(name, in string) *Validation { - msg := fmt.Sprintf(noAdditionalItems, name, in) - if in == "" { - msg = fmt.Sprintf(noAdditionalItemsNoIn, name) - } - return &Validation{ - code: NoAdditionalItemsCode, - Name: name, - In: in, - message: msg, - } -} - -// InvalidCollectionFormat another flavor of invalid type error -func InvalidCollectionFormat(name, in, format string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: format, - message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), - } -} - -// InvalidTypeName an error for when the type is invalid -func InvalidTypeName(typeName string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Value: typeName, - message: fmt.Sprintf(invalidType, typeName), - } -} - -// InvalidType creates an error for when the type is invalid -func InvalidType(name, in, typeName string, value interface{}) *Validation { - var message string - - if in != "" { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) - default: - message = fmt.Sprintf(typeFail, name, in, typeName) - } - } else { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) - default: - message = fmt.Sprintf(typeFailNoIn, name, typeName) - } - } - - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: value, - message: message, - } - -} - -// DuplicateItems error for when an array contains duplicates -func DuplicateItems(name, in string) *Validation { - msg := fmt.Sprintf(uniqueFail, name, in) - if in == "" { - msg = fmt.Sprintf(uniqueFailNoIn, name) - } - return &Validation{ - code: UniqueFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooManyItems error for when an array contains too many items -func TooManyItems(name, in string, max int64, value interface{}) *Validation { - msg := fmt.Sprintf(maxItemsFail, name, in, max) - if in == "" { - msg = fmt.Sprintf(maxItemsFailNoIn, name, max) - } - - return &Validation{ - code: MaxItemsFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooFewItems error for when an array contains too few items -func TooFewItems(name, in string, min int64, value interface{}) *Validation { - msg := fmt.Sprintf(minItemsFail, name, in, min) - if in == "" { - msg = fmt.Sprintf(minItemsFailNoIn, name, min) - } - return &Validation{ - code: MinItemsFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// ExceedsMaximumInt error for when maximum validation fails -func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMaximumUint error for when maximum validation fails -func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMaximum error for when maximum validation fails -func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimumInt error for when minimum validation fails -func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimumUint error for when minimum validation fails -func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimum error for when minimum validation fails -func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// NotMultipleOf error for when multiple of validation fails -func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) - } else { - msg = fmt.Sprintf(multipleOfFail, name, in, multiple) - } - return &Validation{ - code: MultipleOfFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// EnumFail error for when an enum validation fails -func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(enumFailNoIn, name, values) - } else { - msg = fmt.Sprintf(enumFail, name, in, values) - } - - return &Validation{ - code: EnumFailCode, - Name: name, - In: in, - Value: value, - Values: values, - message: msg, - } -} - -// Required error for when a value is missing -func Required(name, in string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(requiredFailNoIn, name) - } else { - msg = fmt.Sprintf(requiredFail, name, in) - } - return &Validation{ - code: RequiredFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// ReadOnly error for when a value is present in request -func ReadOnly(name, in string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(readOnlyFailNoIn, name) - } else { - msg = fmt.Sprintf(readOnlyFail, name, in) - } - return &Validation{ - code: ReadOnlyFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooLong error for when a string is too long -func TooLong(name, in string, max int64, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooLongMessageNoIn, name, max) - } else { - msg = fmt.Sprintf(tooLongMessage, name, in, max) - } - return &Validation{ - code: TooLongFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooShort error for when a string is too short -func TooShort(name, in string, min int64, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooShortMessageNoIn, name, min) - } else { - msg = fmt.Sprintf(tooShortMessage, name, in, min) - } - - return &Validation{ - code: TooShortFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// FailedPattern error for when a string fails a regex pattern match -// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. -func FailedPattern(name, in, pattern string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(patternFailNoIn, name, pattern) - } else { - msg = fmt.Sprintf(patternFail, name, in, pattern) - } - - return &Validation{ - code: PatternFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// MultipleOfMustBePositive error for when a -// multipleOf factor is negative -func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { - return &Validation{ - code: MultipleOfMustBePositiveCode, - Name: name, - In: in, - Value: factor, - message: fmt.Sprintf(multipleOfMustBePositive, name, factor), - } -} diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore deleted file mode 100644 index 769c24400..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 03a22fe06..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.15.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md deleted file mode 100644 index 813788aff..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) -An implementation of JSON Pointer - Go language - -## Status -Completed YES - -Tested YES - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go deleted file mode 100644 index 7df9853de..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package jsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-openapi/swag" -) - -const ( - emptyPointer = `` - pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator -) - -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() -var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() - -// JSONPointable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONPointable interface { - JSONLookup(string) (interface{}, error) -} - -// JSONSetable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONSetable interface { - JSONSet(string, interface{}) error -} - -// New creates a new json pointer for the given string -func New(jsonPointerString string) (Pointer, error) { - - var p Pointer - err := p.parse(jsonPointerString) - return p, err - -} - -// Pointer the json pointer reprsentation -type Pointer struct { - referenceTokens []string -} - -// "Constructor", parses the given string JSON pointer -func (p *Pointer) parse(jsonPointerString string) error { - - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } - } - } - - return err -} - -// Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - return p.get(document, swag.DefaultJSONNameProvider) -} - -// Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { - return document, p.set(document, value, swag.DefaultJSONNameProvider) -} - -// GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { - return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) -} - -// SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { - return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) -} - -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - } - - switch kind { - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - return fld.Interface(), kind, nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if mv.IsValid() { - return mv.Interface(), kind, nil - } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return nil, kind, err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - return elem.Interface(), kind, nil - - default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { - rValue := reflect.Indirect(reflect.ValueOf(node)) - - if ns, ok := node.(JSONSetable); ok { // pointer impl - return ns.JSONSet(decodedToken, data) - } - - if rValue.Type().Implements(jsonSetableType) { - return node.(JSONSetable).JSONSet(decodedToken, data) - } - - switch rValue.Kind() { - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.IsValid() { - fld.Set(reflect.ValueOf(data)) - } - return nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - rValue.SetMapIndex(kv, reflect.ValueOf(data)) - return nil - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if !elem.CanSet() { - return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) - } - elem.Set(reflect.ValueOf(data)) - return nil - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - return node, kind, nil - } - - for _, token := range p.referenceTokens { - - decodedToken := Unescape(token) - - r, knd, err := getSingleImpl(node, decodedToken, nameProvider) - if err != nil { - return nil, knd, err - } - node, kind = r, knd - - } - - rValue := reflect.ValueOf(node) - kind = rValue.Kind() - - return node, kind, nil -} - -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { - knd := reflect.ValueOf(node).Kind() - - if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") - } - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - // Full document when empty - if len(p.referenceTokens) == 0 { - return nil - } - - lastI := len(p.referenceTokens) - 1 - for i, token := range p.referenceTokens { - isLastToken := i == lastI - decodedToken := Unescape(token) - - if isLastToken { - - return setSingleImpl(node, data, decodedToken, nameProvider) - } - - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return err - } - fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = r - continue - } - - switch kind { - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = fld.Interface() - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if !mv.IsValid() { - return fmt.Errorf("object has no key %q", decodedToken) - } - if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { - node = mv.Addr().Interface() - continue - } - node = mv.Interface() - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { - node = elem.Addr().Interface() - continue - } - node = elem.Interface() - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - - } - - return nil -} - -// DecodedTokens returns the decoded tokens -func (p *Pointer) DecodedTokens() []string { - result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) - } - return result -} - -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document -func (p *Pointer) IsEmpty() bool { - return len(p.referenceTokens) == 0 -} - -// Pointer to string representation function -func (p *Pointer) String() string { - - if len(p.referenceTokens) == 0 { - return emptyPointer - } - - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -const ( - encRefTok0 = `~0` - encRefTok1 = `~1` - decRefTok0 = `~` - decRefTok1 = `/` -) - -// Unescape unescapes a json pointer reference token string to the original representation -func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) - return step2 -} - -// Escape escapes a pointer reference token string -func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) - return step2 -} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore deleted file mode 100644 index 769c24400..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml deleted file mode 100644 index f9381aee5..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ /dev/null @@ -1,41 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - - testpackage - - gci - - gofumpt - - goerr113 - - gomnd - - tparallel - - nestif - - godot - - errorlint diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 05482f4b9..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -install: -- go get gotest.tools/gotestsum -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/jsonreference/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md deleted file mode 100644 index b94753aa5..000000000 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) -An implementation of JSON Reference - Go language - -## Status -Feature complete. Stable API - -## Dependencies -https://github.com/go-openapi/jsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go deleted file mode 100644 index 3bc0a6e26..000000000 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package jsonreference - -import ( - "errors" - "net/url" - "strings" - - "github.com/PuerkitoBio/purell" - "github.com/go-openapi/jsonpointer" -) - -const ( - fragmentRune = `#` -) - -// New creates a new reference for the given string -func New(jsonReferenceString string) (Ref, error) { - - var r Ref - err := r.parse(jsonReferenceString) - return r, err - -} - -// MustCreateRef parses the ref string and panics when it's invalid. -// Use the New method for a version that returns an error -func MustCreateRef(ref string) Ref { - r, err := New(ref) - if err != nil { - panic(err) - } - return r -} - -// Ref represents a json reference object -type Ref struct { - referenceURL *url.URL - referencePointer jsonpointer.Pointer - - HasFullURL bool - HasURLPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -// GetURL gets the URL for this reference -func (r *Ref) GetURL() *url.URL { - return r.referenceURL -} - -// GetPointer gets the json pointer for this reference -func (r *Ref) GetPointer() *jsonpointer.Pointer { - return &r.referencePointer -} - -// String returns the best version of the url for this reference -func (r *Ref) String() string { - - if r.referenceURL != nil { - return r.referenceURL.String() - } - - if r.HasFragmentOnly { - return fragmentRune + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -// IsRoot returns true if this reference is a root document -func (r *Ref) IsRoot() bool { - return r.referenceURL != nil && - !r.IsCanonical() && - !r.HasURLPathOnly && - r.referenceURL.Fragment == "" -} - -// IsCanonical returns true when this pointer starts with http(s):// or file:// -func (r *Ref) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) -} - -// "Constructor", parses the given string JSON reference -func (r *Ref) parse(jsonReferenceString string) error { - - parsed, err := url.Parse(jsonReferenceString) - if err != nil { - return err - } - - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) - refURL := r.referenceURL - - if refURL.Scheme != "" && refURL.Host != "" { - r.HasFullURL = true - } else { - if refURL.Path != "" { - r.HasURLPathOnly = true - } else if refURL.RawQuery == "" && refURL.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refURL.Scheme == "file" - r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = jsonpointer.New(refURL.Fragment) - - return nil -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - childURL := child.GetURL() - parentURL := r.GetURL() - if childURL == nil { - return nil, errors.New("child url is nil") - } - if parentURL == nil { - return &child, nil - } - - ref, err := New(parentURL.ResolveReference(childURL).String()) - if err != nil { - return nil, err - } - return &ref, nil -} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/loads/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore deleted file mode 100644 index e4f15f17b..000000000 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -secrets.yml -coverage.out -profile.cov -profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml deleted file mode 100644 index d48b4a515..000000000 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ /dev/null @@ -1,44 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits - - godox - - gocognit - - whitespace - - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - - testpackage - - gci - - gofumpt - - goerr113 - - gomnd - - tparallel - - nestif - - godot - - errorlint - - paralleltest diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml deleted file mode 100644 index cd4a7c331..000000000 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.16.x -- 1.x -install: -- go get gotest.tools/gotestsum -language: go -arch: -- amd64 -- ppc64le -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -notifications: - slack: - secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/loads/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index df1f62646..000000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![Actions/Go Test Status](https://github.com/go-openapi/loads/workflows/Go%20Test/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test") - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go deleted file mode 100644 index 3046da4ce..000000000 --- a/vendor/github.com/go-openapi/loads/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package loads provides document loading methods for swagger (OAI) specifications. - -It is used by other go-openapi packages to load and run analysis on local or remote spec documents. - -*/ -package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go deleted file mode 100644 index 44bd32b5b..000000000 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ /dev/null @@ -1,134 +0,0 @@ -package loads - -import ( - "encoding/json" - "errors" - "net/url" - - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -var ( - // Default chain of loaders, defined at the package level. - // - // By default this matches json and yaml documents. - // - // May be altered with AddLoader(). - loaders *loader -) - -func init() { - jsonLoader := &loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: func(pth string) bool { - return true - }, - Fn: JSONDoc, - }, - } - - loaders = jsonLoader.WithHead(&loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: swag.YAMLMatcher, - Fn: swag.YAMLDoc, - }, - }) - - // sets the global default loader for go-openapi/spec - spec.PathLoader = loaders.Load -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -// DocLoaderWithMatch describes a loading function for a given extension match. -type DocLoaderWithMatch struct { - Fn DocLoader - Match DocMatcher -} - -// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options -func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { - return DocLoaderWithMatch{ - Fn: fn, - Match: matcher, - } -} - -type loader struct { - DocLoaderWithMatch - Next *loader -} - -// WithHead adds a loader at the head of the current stack -func (l *loader) WithHead(head *loader) *loader { - if head == nil { - return l - } - head.Next = l - return head -} - -// WithNext adds a loader at the trail of the current stack -func (l *loader) WithNext(next *loader) *loader { - l.Next = next - return next -} - -// Load the raw document from path -func (l *loader) Load(path string) (json.RawMessage, error) { - _, erp := url.Parse(path) - if erp != nil { - return nil, erp - } - - var lastErr error = errors.New("no loader matched") // default error if no match was found - for ldr := l; ldr != nil; ldr = ldr.Next { - if ldr.Match != nil && !ldr.Match(path) { - continue - } - - // try then move to next one if there is an error - b, err := ldr.Fn(path) - if err == nil { - return b, nil - } - - lastErr = err - } - - return nil, lastErr -} - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// AddLoader for a document, executed before other previously set loaders. -// -// This sets the configuration at the package level. -// -// NOTE: -// * this updates the default loader used by github.com/go-openapi/spec -// * since this sets package level globals, you shouln't call this concurrently -// -func AddLoader(predicate DocMatcher, load DocLoader) { - loaders = loaders.WithHead(&loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: predicate, - Fn: load, - }, - }) - - // sets the global default loader for go-openapi/spec - spec.PathLoader = loaders.Load -} diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go deleted file mode 100644 index f8305d560..000000000 --- a/vendor/github.com/go-openapi/loads/options.go +++ /dev/null @@ -1,61 +0,0 @@ -package loads - -type options struct { - loader *loader -} - -func defaultOptions() *options { - return &options{ - loader: loaders, - } -} - -func loaderFromOptions(options []LoaderOption) *loader { - opts := defaultOptions() - for _, apply := range options { - apply(opts) - } - - return opts.loader -} - -// LoaderOption allows to fine-tune the spec loader behavior -type LoaderOption func(*options) - -// WithDocLoader sets a custom loader for loading specs -func WithDocLoader(l DocLoader) LoaderOption { - return func(opt *options) { - if l == nil { - return - } - opt.loader = &loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Fn: l, - }, - } - } -} - -// WithDocLoaderMatches sets a chain of custom loaders for loading specs -// for different extension matches. -// -// Loaders are executed in the order of provided DocLoaderWithMatch'es. -func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { - return func(opt *options) { - var final, prev *loader - for _, ldr := range l { - if ldr.Fn == nil { - continue - } - - if prev == nil { - final = &loader{DocLoaderWithMatch: ldr} - prev = final - continue - } - - prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) - } - opt.loader = final - } -} diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index 93c8d4b89..000000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -func init() { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - specFilePath string - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage - pathLoader *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string, options ...LoaderOption) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(data, "", options...) -} - -// Embedded returns a Document based on embedded specs. No analysis is required -func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) { - var origSpec, flatSpec spec.Swagger - if err := json.Unmarshal(orig, &origSpec); err != nil { - return nil, err - } - if err := json.Unmarshal(flat, &flatSpec); err != nil { - return nil, err - } - return &Document{ - raw: orig, - origSpec: &origSpec, - spec: &flatSpec, - pathLoader: loaderFromOptions(options), - }, nil -} - -// Spec loads a new spec document from a local or remote path -func Spec(path string, options ...LoaderOption) (*Document, error) { - - ldr := loaderFromOptions(options) - - b, err := ldr.Load(path) - if err != nil { - return nil, err - } - - document, err := Analyzed(b, "", options...) - if err != nil { - return nil, err - } - - if document != nil { - document.specFilePath = path - document.pathLoader = ldr - } - - return document, err -} - -// Analyzed creates a new analyzed spec document for a root json.RawMessage. -func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - raw, err := trimData(data) // trim blanks, then convert yaml docs into json - if err != nil { - return nil, err - } - - swspec := new(spec.Swagger) - if err = json.Unmarshal(raw, swspec); err != nil { - return nil, err - } - - origsqspec, err := cloneSpec(swspec) - if err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: spec.MustLoadSwagger20Schema(), - spec: swspec, - raw: raw, - origSpec: origsqspec, - pathLoader: loaderFromOptions(options), - } - - return d, nil -} - -func trimData(in json.RawMessage) (json.RawMessage, error) { - trimmed := bytes.TrimSpace(in) - if len(trimmed) == 0 { - return in, nil - } - - if trimmed[0] == '{' || trimmed[0] == '[' { - return trimmed, nil - } - - // assume yaml doc: convert it to json - yml, err := swag.BytesToYAMLDoc(trimmed) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - - d, err := swag.YAMLToJSON(yml) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { - - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - - var expandOptions *spec.ExpandOptions - if len(options) > 0 { - expandOptions = options[0] - } else { - expandOptions = &spec.ExpandOptions{ - RelativeBase: d.specFilePath, - } - } - - if expandOptions.PathLoader == nil { - if d.pathLoader != nil { - // use loader from Document options - expandOptions.PathLoader = d.pathLoader.Load - } else { - // use package level loader - expandOptions.PathLoader = loaders.Load - } - } - - if err := spec.ExpandSpec(swspec, expandOptions); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - specFilePath: d.specFilePath, - schema: spec.MustLoadSwagger20Schema(), - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -// OrigSpec yields the original spec -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset to the original spec -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - dd.pathLoader = d.pathLoader - return dd -} - -// SpecFilePath returns the file path of the spec if one is defined -func (d *Document) SpecFilePath() string { - return d.specFilePath -} - -func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { - var b bytes.Buffer - if err := gob.NewEncoder(&b).Encode(src); err != nil { - return nil, err - } - - var dst spec.Swagger - if err := gob.NewDecoder(&b).Decode(&dst); err != nil { - return nil, err - } - return &dst, nil -} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/runtime/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes deleted file mode 100644 index d207b1802..000000000 --- a/vendor/github.com/go-openapi/runtime/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore deleted file mode 100644 index fea8b84ec..000000000 --- a/vendor/github.com/go-openapi/runtime/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -*.cov -*.out -playground diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml deleted file mode 100644 index b1aa7928a..000000000 --- a/vendor/github.com/go-openapi/runtime/.golangci.yml +++ /dev/null @@ -1,44 +0,0 @@ -linters-settings: - govet: - # Using err repeatedly considered as shadowing. - check-shadowing: false - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 -linters: - disable: - - maligned - - lll - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - - testpackage - - gci - - gofumpt - - goerr113 - - gomnd - - tparallel - - nestif - - godot - - errorlint - - noctx - - interfacer - - nilerr diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/runtime/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md deleted file mode 100644 index 5b1ec6494..000000000 --- a/vendor/github.com/go-openapi/runtime/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) - -# golang Open-API toolkit - runtime - -The runtime component for use in codegeneration or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go deleted file mode 100644 index 6eb6ceb5c..000000000 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -func defaultCloser() error { return nil } - -type byteStreamOpt func(opts *byteStreamOpts) - -// ClosesStream when the bytestream consumer or producer is finished -func ClosesStream(opts *byteStreamOpts) { - opts.Close = true -} - -type byteStreamOpts struct { - Close bool -} - -// ByteStreamConsumer creates a consumer for byte streams, -// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, -// and reads from the provided reader -func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("ByteStreamConsumer requires a reader") // early exit - } - - close := defaultCloser - if vals.Close { - if cl, ok := reader.(io.Closer); ok { - close = cl.Close - } - } - //nolint:errcheck // closing a reader wouldn't fail. - defer close() - - if wrtr, ok := data.(io.Writer); ok { - _, err := io.Copy(wrtr, reader) - return err - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - if bu, ok := data.(encoding.BinaryUnmarshaler); ok { - return bu.UnmarshalBinary(b) - } - - if data != nil { - if str, ok := data.(*string); ok { - *str = string(b) - return nil - } - } - - if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - v.SetBytes(b) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", - data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") - }) -} - -// ByteStreamProducer creates a producer for byte streams, -// takes a Reader/BinaryMarshaler interface or binary slice, -// and writes to a writer (essentially a pipe) -func ByteStreamProducer(opts ...byteStreamOpt) Producer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("ByteStreamProducer requires a writer") // early exit - } - close := defaultCloser - if vals.Close { - if cl, ok := writer.(io.Closer); ok { - close = cl.Close - } - } - //nolint:errcheck // TODO: closing a writer would fail. - defer close() - - if rc, ok := data.(io.ReadCloser); ok { - defer rc.Close() - } - - if rdr, ok := data.(io.Reader); ok { - _, err := io.Copy(writer, rdr) - return err - } - - if bm, ok := data.(encoding.BinaryMarshaler); ok { - bytes, err := bm.MarshalBinary() - if err != nil { - return err - } - - _, err = writer.Write(bytes) - return err - } - - if data != nil { - if str, ok := data.(string); ok { - _, err := writer.Write([]byte(str)) - return err - } - - if e, ok := data.(error); ok { - _, err := writer.Write([]byte(e.Error())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - _, err := writer.Write(v.Bytes()) - return err - } - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", - data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") - }) -} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go deleted file mode 100644 index c6c97d9a7..000000000 --- a/vendor/github.com/go-openapi/runtime/client_auth_info.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/strfmt" - -// A ClientAuthInfoWriterFunc converts a function to a request writer interface -type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error - -// AuthenticateRequest adds authentication data to the request -func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// A ClientAuthInfoWriter implementor knows how to write authentication info to a request -type ClientAuthInfoWriter interface { - AuthenticateRequest(ClientRequest, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go deleted file mode 100644 index fa21eacf3..000000000 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "net/http" -) - -// ClientOperation represents the context for a swagger operation to be submitted to the transport -type ClientOperation struct { - ID string - Method string - PathPattern string - ProducesMediaTypes []string - ConsumesMediaTypes []string - Schemes []string - AuthInfo ClientAuthInfoWriter - Params ClientRequestWriter - Reader ClientResponseReader - Context context.Context - Client *http.Client -} - -// A ClientTransport implementor knows how to submit Request objects to some destination -type ClientTransport interface { - //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) - Submit(*ClientOperation) (interface{}, error) -} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go deleted file mode 100644 index 3efda3482..000000000 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/go-openapi/strfmt" -) - -// ClientRequestWriterFunc converts a function to a request writer interface -type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error - -// WriteToRequest adds data to the request -func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// ClientRequestWriter is an interface for things that know how to write to a request -type ClientRequestWriter interface { - WriteToRequest(ClientRequest, strfmt.Registry) error -} - -// ClientRequest is an interface for things that know how to -// add information to a swagger client request -type ClientRequest interface { - SetHeaderParam(string, ...string) error - - GetHeaderParams() http.Header - - SetQueryParam(string, ...string) error - - SetFormParam(string, ...string) error - - SetPathParam(string, string) error - - GetQueryParams() url.Values - - SetFileParam(string, ...NamedReadCloser) error - - SetBodyParam(interface{}) error - - SetTimeout(time.Duration) error - - GetMethod() string - - GetPath() string - - GetBody() []byte - - GetBodyParam() interface{} - - GetFileParam() map[string][]NamedReadCloser -} - -// NamedReadCloser represents a named ReadCloser interface -type NamedReadCloser interface { - io.ReadCloser - Name() string -} - -// NamedReader creates a NamedReadCloser for use as file upload -func NamedReader(name string, rdr io.Reader) NamedReadCloser { - rc, ok := rdr.(io.ReadCloser) - if !ok { - rc = ioutil.NopCloser(rdr) - } - return &namedReadCloser{ - name: name, - cr: rc, - } -} - -type namedReadCloser struct { - name string - cr io.ReadCloser -} - -func (n *namedReadCloser) Close() error { - return n.cr.Close() -} -func (n *namedReadCloser) Read(p []byte) (int, error) { - return n.cr.Read(p) -} -func (n *namedReadCloser) Name() string { - return n.name -} - -type TestClientRequest struct { - Headers http.Header - Body interface{} -} - -func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { - if t.Headers == nil { - t.Headers = make(http.Header) - } - t.Headers.Set(name, values[0]) - return nil -} - -func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } - -func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } - -func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } - -func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } - -func (t *TestClientRequest) SetBodyParam(body interface{}) error { - t.Body = body - return nil -} - -func (t *TestClientRequest) SetTimeout(time.Duration) error { - return nil -} - -func (t *TestClientRequest) GetQueryParams() url.Values { return nil } - -func (t *TestClientRequest) GetMethod() string { return "" } - -func (t *TestClientRequest) GetPath() string { return "" } - -func (t *TestClientRequest) GetBody() []byte { return nil } - -func (t *TestClientRequest) GetBodyParam() interface{} { - return t.Body -} - -func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { - return nil -} - -func (t *TestClientRequest) GetHeaderParams() http.Header { - return t.Headers -} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go deleted file mode 100644 index 0b7e38246..000000000 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "fmt" - "io" - - "encoding/json" -) - -// A ClientResponse represents a client response -// This bridges between responses obtained from different transports -type ClientResponse interface { - Code() int - Message() string - GetHeader(string) string - GetHeaders(string) []string - Body() io.ReadCloser -} - -// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation -type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) - -// ReadResponse reads the response -func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { - return read(resp, consumer) -} - -// A ClientResponseReader is an interface for things want to read a response. -// An application of this is to create structs from response values -type ClientResponseReader interface { - ReadResponse(ClientResponse, Consumer) (interface{}, error) -} - -// NewAPIError creates a new API error -func NewAPIError(opName string, payload interface{}, code int) *APIError { - return &APIError{ - OperationName: opName, - Response: payload, - Code: code, - } -} - -// APIError wraps an error model and captures the status code -type APIError struct { - OperationName string - Response interface{} - Code int -} - -func (a *APIError) Error() string { - resp, _ := json.Marshal(a.Response) - return fmt.Sprintf("%s (status %d): %s", a.OperationName, a.Code, resp) -} - -func (a *APIError) String() string { - return a.Error() -} - -// IsSuccess returns true when this elapse o k response returns a 2xx status code -func (o *APIError) IsSuccess() bool { - return o.Code/100 == 2 -} - -// IsRedirect returns true when this elapse o k response returns a 3xx status code -func (o *APIError) IsRedirect() bool { - return o.Code/100 == 3 -} - -// IsClientError returns true when this elapse o k response returns a 4xx status code -func (o *APIError) IsClientError() bool { - return o.Code/100 == 4 -} - -// IsServerError returns true when this elapse o k response returns a 5xx status code -func (o *APIError) IsServerError() bool { - return o.Code/100 == 5 -} - -// IsCode returns true when this elapse o k response returns a 4xx status code -func (o *APIError) IsCode(code int) bool { - return o.Code == code -} - -// A ClientResponseStatus is a common interface implemented by all responses on the generated code -// You can use this to treat any client response based on status code -type ClientResponseStatus interface { - IsSuccess() bool - IsRedirect() bool - IsClientError() bool - IsServerError() bool - IsCode(int) bool -} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go deleted file mode 100644 index a4de897ad..000000000 --- a/vendor/github.com/go-openapi/runtime/constants.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -const ( - // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type - HeaderContentType = "Content-Type" - - // HeaderTransferEncoding represents a http transfer-encoding header. - HeaderTransferEncoding = "Transfer-Encoding" - - // HeaderAccept the Accept header - HeaderAccept = "Accept" - - charsetKey = "charset" - - // DefaultMime the default fallback mime type - DefaultMime = "application/octet-stream" - // JSONMime the json mime type - JSONMime = "application/json" - // YAMLMime the yaml mime type - YAMLMime = "application/x-yaml" - // XMLMime the xml mime type - XMLMime = "application/xml" - // TextMime the text mime type - TextMime = "text/plain" - // HTMLMime the html mime type - HTMLMime = "text/html" - // CSVMime the csv mime type - CSVMime = "text/csv" - // MultipartFormMime the multipart form mime type - MultipartFormMime = "multipart/form-data" - // URLencodedFormMime the url encoded form mime type - URLencodedFormMime = "application/x-www-form-urlencoded" -) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go deleted file mode 100644 index d807bd915..000000000 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding/csv" - "errors" - "io" -) - -// CSVConsumer creates a new CSV consumer -func CSVConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("CSVConsumer requires a reader") - } - - csvReader := csv.NewReader(reader) - writer, ok := data.(io.Writer) - if !ok { - return errors.New("data type must be io.Writer") - } - csvWriter := csv.NewWriter(writer) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} - -// CSVProducer creates a new CSV producer -func CSVProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("CSVProducer requires a writer") - } - - dataBytes, ok := data.([]byte) - if !ok { - return errors.New("data type must be byte array") - } - - csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - csvWriter := csv.NewWriter(writer) - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go deleted file mode 100644 index 0d390cfd6..000000000 --- a/vendor/github.com/go-openapi/runtime/discard.go +++ /dev/null @@ -1,9 +0,0 @@ -package runtime - -import "io" - -// DiscardConsumer does absolutely nothing, it's a black hole. -var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) - -// DiscardProducer does absolutely nothing, it's a black hole. -var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go deleted file mode 100644 index 397d8a459..000000000 --- a/vendor/github.com/go-openapi/runtime/file.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/swag" - -type File = swag.File diff --git a/vendor/github.com/go-openapi/runtime/flagext/byte_size.go b/vendor/github.com/go-openapi/runtime/flagext/byte_size.go deleted file mode 100644 index 0f3eeba1f..000000000 --- a/vendor/github.com/go-openapi/runtime/flagext/byte_size.go +++ /dev/null @@ -1,38 +0,0 @@ -package flagext - -import ( - "github.com/docker/go-units" -) - -// ByteSize used to pass byte sizes to a go-flags CLI -type ByteSize int - -// MarshalFlag implements go-flags Marshaller interface -func (b ByteSize) MarshalFlag() (string, error) { - return units.HumanSize(float64(b)), nil -} - -// UnmarshalFlag implements go-flags Unmarshaller interface -func (b *ByteSize) UnmarshalFlag(value string) error { - sz, err := units.FromHumanSize(value) - if err != nil { - return err - } - *b = ByteSize(int(sz)) - return nil -} - -// String method for a bytesize (pflag value and stringer interface) -func (b ByteSize) String() string { - return units.HumanSize(float64(b)) -} - -// Set the value of this bytesize (pflag value interfaces) -func (b *ByteSize) Set(value string) error { - return b.UnmarshalFlag(value) -} - -// Type returns the type of the pflag value (pflag value interface) -func (b *ByteSize) Type() string { - return "byte-size" -} diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go deleted file mode 100644 index 4d111db4f..000000000 --- a/vendor/github.com/go-openapi/runtime/headers.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "mime" - "net/http" - - "github.com/go-openapi/errors" -) - -// ContentType parses a content type header -func ContentType(headers http.Header) (string, string, error) { - ct := headers.Get(HeaderContentType) - orig := ct - if ct == "" { - ct = DefaultMime - } - if ct == "" { - return "", "", nil - } - - mt, opts, err := mime.ParseMediaType(ct) - if err != nil { - return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) - } - - if cs, ok := opts[charsetKey]; ok { - return mt, cs, nil - } - - return mt, "", nil -} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go deleted file mode 100644 index e33412868..000000000 --- a/vendor/github.com/go-openapi/runtime/interfaces.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/go-openapi/strfmt" -) - -// OperationHandlerFunc an adapter for a function to the OperationHandler interface -type OperationHandlerFunc func(interface{}) (interface{}, error) - -// Handle implements the operation handler interface -func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { - return s(data) -} - -// OperationHandler a handler for a swagger operation -type OperationHandler interface { - Handle(interface{}) (interface{}, error) -} - -// ConsumerFunc represents a function that can be used as a consumer -type ConsumerFunc func(io.Reader, interface{}) error - -// Consume consumes the reader into the data parameter -func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { - return fn(reader, data) -} - -// Consumer implementations know how to bind the values on the provided interface to -// data provided by the request body -type Consumer interface { - // Consume performs the binding of request values - Consume(io.Reader, interface{}) error -} - -// ProducerFunc represents a function that can be used as a producer -type ProducerFunc func(io.Writer, interface{}) error - -// Produce produces the response for the provided data -func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { - return f(writer, data) -} - -// Producer implementations know how to turn the provided interface into a valid -// HTTP response -type Producer interface { - // Produce writes to the http response - Produce(io.Writer, interface{}) error -} - -// AuthenticatorFunc turns a function into an authenticator -type AuthenticatorFunc func(interface{}) (bool, interface{}, error) - -// Authenticate authenticates the request with the provided data -func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { - return f(params) -} - -// Authenticator represents an authentication strategy -// implementations of Authenticator know how to authenticate the -// request data and translate that into a valid principal object or an error -type Authenticator interface { - Authenticate(interface{}) (bool, interface{}, error) -} - -// AuthorizerFunc turns a function into an authorizer -type AuthorizerFunc func(*http.Request, interface{}) error - -// Authorize authorizes the processing of the request for the principal -func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { - return f(r, principal) -} - -// Authorizer represents an authorization strategy -// implementations of Authorizer know how to authorize the principal object -// using the request data and returns error if unauthorized -type Authorizer interface { - Authorize(*http.Request, interface{}) error -} - -// Validatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the validations obtained from the spec -type Validatable interface { - Validate(strfmt.Registry) error -} - -// ContextValidatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the context validations obtained from the spec -type ContextValidatable interface { - ContextValidate(context.Context, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go deleted file mode 100644 index 5a690559c..000000000 --- a/vendor/github.com/go-openapi/runtime/json.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONConsumer creates a new JSON consumer -func JSONConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := json.NewDecoder(reader) - dec.UseNumber() // preserve number formats - return dec.Decode(data) - }) -} - -// JSONProducer creates a new JSON producer -func JSONProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := json.NewEncoder(writer) - enc.SetEscapeHTML(false) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/logger/logger.go b/vendor/github.com/go-openapi/runtime/logger/logger.go deleted file mode 100644 index 6f4debcc1..000000000 --- a/vendor/github.com/go-openapi/runtime/logger/logger.go +++ /dev/null @@ -1,20 +0,0 @@ -package logger - -import "os" - -type Logger interface { - Printf(format string, args ...interface{}) - Debugf(format string, args ...interface{}) -} - -func DebugEnabled() bool { - d := os.Getenv("SWAGGER_DEBUG") - if d != "" && d != "false" && d != "0" { - return true - } - d = os.Getenv("DEBUG") - if d != "" && d != "false" && d != "0" { - return true - } - return false -} diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go deleted file mode 100644 index f7e67ebb9..000000000 --- a/vendor/github.com/go-openapi/runtime/logger/standard.go +++ /dev/null @@ -1,22 +0,0 @@ -package logger - -import ( - "fmt" - "os" -) - -type StandardLogger struct{} - -func (StandardLogger) Printf(format string, args ...interface{}) { - if len(format) == 0 || format[len(format)-1] != '\n' { - format += "\n" - } - fmt.Fprintf(os.Stderr, format, args...) -} - -func (StandardLogger) Debugf(format string, args ...interface{}) { - if len(format) == 0 || format[len(format)-1] != '\n' { - format += "\n" - } - fmt.Fprintf(os.Stderr, format, args...) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go deleted file mode 100644 index 250e35fb0..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/context.go +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - stdContext "context" - "fmt" - "net/http" - "strings" - "sync" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/logger" - "github.com/go-openapi/runtime/middleware/untyped" - "github.com/go-openapi/runtime/security" -) - -// Debug when true turns on verbose logging -var Debug = logger.DebugEnabled() -var Logger logger.Logger = logger.StandardLogger{} - -func debugLog(format string, args ...interface{}) { - if Debug { - Logger.Printf(format, args...) - } -} - -// A Builder can create middlewares -type Builder func(http.Handler) http.Handler - -// PassthroughBuilder returns the handler, aka the builder identity function -func PassthroughBuilder(handler http.Handler) http.Handler { return handler } - -// RequestBinder is an interface for types to implement -// when they want to be able to bind from a request -type RequestBinder interface { - BindRequest(*http.Request, *MatchedRoute) error -} - -// Responder is an interface for types to implement -// when they want to be considered for writing HTTP responses -type Responder interface { - WriteResponse(http.ResponseWriter, runtime.Producer) -} - -// ResponderFunc wraps a func as a Responder interface -type ResponderFunc func(http.ResponseWriter, runtime.Producer) - -// WriteResponse writes to the response -func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { - fn(rw, pr) -} - -// Context is a type safe wrapper around an untyped request context -// used throughout to store request context with the standard context attached -// to the http.Request -type Context struct { - spec *loads.Document - analyzer *analysis.Spec - api RoutableAPI - router Router -} - -type routableUntypedAPI struct { - api *untyped.API - hlock *sync.Mutex - handlers map[string]map[string]http.Handler - defaultConsumes string - defaultProduces string -} - -func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI { - var handlers map[string]map[string]http.Handler - if spec == nil || api == nil { - return nil - } - analyzer := analysis.New(spec.Spec()) - for method, hls := range analyzer.Operations() { - um := strings.ToUpper(method) - for path, op := range hls { - schemes := analyzer.SecurityRequirementsFor(op) - - if oh, ok := api.OperationHandlerFor(method, path); ok { - if handlers == nil { - handlers = make(map[string]map[string]http.Handler) - } - if b, ok := handlers[um]; !ok || b == nil { - handlers[um] = make(map[string]http.Handler) - } - - var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // lookup route info in the context - route, rCtx, _ := context.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - - // bind and validate the request using reflection - var bound interface{} - var validation error - bound, r, validation = context.BindAndValidate(r, route) - if validation != nil { - context.Respond(w, r, route.Produces, route, validation) - return - } - - // actually handle the request - result, err := oh.Handle(bound) - if err != nil { - // respond with failure - context.Respond(w, r, route.Produces, route, err) - return - } - - // respond with success - context.Respond(w, r, route.Produces, route, result) - }) - - if len(schemes) > 0 { - handler = newSecureAPI(context, handler) - } - handlers[um][path] = handler - } - } - } - - return &routableUntypedAPI{ - api: api, - hlock: new(sync.Mutex), - handlers: handlers, - defaultProduces: api.DefaultProduces, - defaultConsumes: api.DefaultConsumes, - } -} - -func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) { - r.hlock.Lock() - paths, ok := r.handlers[strings.ToUpper(method)] - if !ok { - r.hlock.Unlock() - return nil, false - } - handler, ok := paths[path] - r.hlock.Unlock() - return handler, ok -} -func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { - return r.api.ServeError -} -func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { - return r.api.ConsumersFor(mediaTypes) -} -func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { - return r.api.ProducersFor(mediaTypes) -} -func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { - return r.api.AuthenticatorsFor(schemes) -} -func (r *routableUntypedAPI) Authorizer() runtime.Authorizer { - return r.api.Authorizer() -} -func (r *routableUntypedAPI) Formats() strfmt.Registry { - return r.api.Formats() -} - -func (r *routableUntypedAPI) DefaultProduces() string { - return r.defaultProduces -} - -func (r *routableUntypedAPI) DefaultConsumes() string { - return r.defaultConsumes -} - -// NewRoutableContext creates a new context for a routable API -func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { - var an *analysis.Spec - if spec != nil { - an = analysis.New(spec.Spec()) - } - ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes} - return ctx -} - -// NewContext creates a new context wrapper -func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { - var an *analysis.Spec - if spec != nil { - an = analysis.New(spec.Spec()) - } - ctx := &Context{spec: spec, analyzer: an} - ctx.api = newRoutableUntypedAPI(spec, api, ctx) - ctx.router = routes - return ctx -} - -// Serve serves the specified spec with the specified api registrations as a http.Handler -func Serve(spec *loads.Document, api *untyped.API) http.Handler { - return ServeWithBuilder(spec, api, PassthroughBuilder) -} - -// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated -// by the Builder -func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { - context := NewContext(spec, api, nil) - return context.APIHandler(builder) -} - -type contextKey int8 - -const ( - _ contextKey = iota - ctxContentType - ctxResponseFormat - ctxMatchedRoute - ctxBoundParams - ctxSecurityPrincipal - ctxSecurityScopes -) - -// MatchedRouteFrom request context value. -func MatchedRouteFrom(req *http.Request) *MatchedRoute { - mr := req.Context().Value(ctxMatchedRoute) - if mr == nil { - return nil - } - if res, ok := mr.(*MatchedRoute); ok { - return res - } - return nil -} - -// SecurityPrincipalFrom request context value. -func SecurityPrincipalFrom(req *http.Request) interface{} { - return req.Context().Value(ctxSecurityPrincipal) -} - -// SecurityScopesFrom request context value. -func SecurityScopesFrom(req *http.Request) []string { - rs := req.Context().Value(ctxSecurityScopes) - if res, ok := rs.([]string); ok { - return res - } - return nil -} - -type contentTypeValue struct { - MediaType string - Charset string -} - -// BasePath returns the base path for this API -func (c *Context) BasePath() string { - return c.spec.BasePath() -} - -// RequiredProduces returns the accepted content types for responses -func (c *Context) RequiredProduces() []string { - return c.analyzer.RequiredProduces() -} - -// BindValidRequest binds a params object to a request but only when the request is valid -// if the request is not valid an error will be returned -func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { - var res []error - var requestContentType string - - // check and validate content type, select consumer - if runtime.HasBody(request) { - ct, _, err := runtime.ContentType(request.Header) - if err != nil { - res = append(res, err) - } else { - if err := validateContentType(route.Consumes, ct); err != nil { - res = append(res, err) - } - if len(res) == 0 { - cons, ok := route.Consumers[ct] - if !ok { - res = append(res, errors.New(500, "no consumer registered for %s", ct)) - } else { - route.Consumer = cons - requestContentType = ct - } - } - } - } - - // check and validate the response format - if len(res) == 0 { - // if the route does not provide Produces and a default contentType could not be identified - // based on a body, typical for GET and DELETE requests, then default contentType to. - if len(route.Produces) == 0 && requestContentType == "" { - requestContentType = "*/*" - } - - if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" { - res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces)) - } - } - - // now bind the request with the provided binder - // it's assumed the binder will also validate the request and return an error if the - // request is invalid - if binder != nil && len(res) == 0 { - if err := binder.BindRequest(request, route); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContentType gets the parsed value of a content type -// Returns the media type, its charset and a shallow copy of the request -// when its context doesn't contain the content type value, otherwise it returns -// the same request -// Returns the error that runtime.ContentType may retunrs. -func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok { - return v.MediaType, v.Charset, request, nil - } - - mt, cs, err := runtime.ContentType(request.Header) - if err != nil { - return "", "", nil, err - } - rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs}) - return mt, cs, request.WithContext(rCtx), nil -} - -// LookupRoute looks a route up and returns true when it is found -func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { - if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { - return route, ok - } - return nil, false -} - -// RouteInfo tries to match a route for this request -// Returns the matched route, a shallow copy of the request if its context -// contains the matched router, otherwise the same request, and a bool to -// indicate if it the request matches one of the routes, if it doesn't -// then it returns false and nil for the other two return values -func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok { - return v, request, ok - } - - if route, ok := c.LookupRoute(request); ok { - rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route) - return route, request.WithContext(rCtx), ok - } - - return nil, nil, false -} - -// ResponseFormat negotiates the response content type -// Returns the response format and a shallow copy of the request if its context -// doesn't contain the response format, otherwise the same request -func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { - var rCtx = r.Context() - - if v, ok := rCtx.Value(ctxResponseFormat).(string); ok { - debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v) - return v, r - } - - format := NegotiateContentType(r, offers, "") - if format != "" { - debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format) - r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format)) - } - debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format) - return format, r -} - -// AllowedMethods gets the allowed methods for the path of this request -func (c *Context) AllowedMethods(request *http.Request) []string { - return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) -} - -// ResetAuth removes the current principal from the request context -func (c *Context) ResetAuth(request *http.Request) *http.Request { - rctx := request.Context() - rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) - rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil) - return request.WithContext(rctx) -} - -// Authorize authorizes the request -// Returns the principal object and a shallow copy of the request when its -// context doesn't contain the principal, otherwise the same request or an error -// (the last) if one of the authenticators returns one or an Unauthenticated error -func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) { - if route == nil || !route.HasAuth() { - return nil, nil, nil - } - - var rCtx = request.Context() - if v := rCtx.Value(ctxSecurityPrincipal); v != nil { - return v, request, nil - } - - applies, usr, err := route.Authenticators.Authenticate(request, route) - if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil { - if err != nil { - return nil, nil, err - } - return nil, nil, errors.Unauthenticated("invalid credentials") - } - if route.Authorizer != nil { - if err := route.Authorizer.Authorize(request, usr); err != nil { - if _, ok := err.(errors.Error); ok { - return nil, nil, err - } - - return nil, nil, errors.New(http.StatusForbidden, err.Error()) - } - } - - rCtx = request.Context() - - rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr) - rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes()) - return usr, request.WithContext(rCtx), nil -} - -// BindAndValidate binds and validates the request -// Returns the validation map and a shallow copy of the request when its context -// doesn't contain the validation, otherwise it returns the same request or an -// CompositeValidationError error -func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok { - debugLog("got cached validation (valid: %t)", len(v.result) == 0) - if len(v.result) > 0 { - return v.bound, request, errors.CompositeValidationError(v.result...) - } - return v.bound, request, nil - } - result := validateRequest(c, request, matched) - rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result) - request = request.WithContext(rCtx) - if len(result.result) > 0 { - return result.bound, request, errors.CompositeValidationError(result.result...) - } - debugLog("no validation errors found") - return result.bound, request, nil -} - -// NotFound the default not found responder for when no route has been matched yet -func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { - c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) -} - -// Respond renders the response after doing some content negotiation -func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) { - debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) - offers := []string{} - for _, mt := range produces { - if mt != c.api.DefaultProduces() { - offers = append(offers, mt) - } - } - // the default producer is last so more specific producers take precedence - offers = append(offers, c.api.DefaultProduces()) - debugLog("offers: %v", offers) - - var format string - format, r = c.ResponseFormat(r, offers) - rw.Header().Set(runtime.HeaderContentType, format) - - if resp, ok := data.(Responder); ok { - producers := route.Producers - prod, ok := producers[format] - if !ok { - prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) - pr, ok := prods[c.api.DefaultProduces()] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - prod = pr - } - resp.WriteResponse(rw, prod) - return - } - - if err, ok := data.(error); ok { - if format == "" { - rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime) - } - - if realm := security.FailedBasicAuth(r); realm != "" { - rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm)) - } - - if route == nil || route.Operation == nil { - c.api.ServeErrorFor("")(rw, r, err) - return - } - c.api.ServeErrorFor(route.Operation.ID)(rw, r, err) - return - } - - if route == nil || route.Operation == nil { - rw.WriteHeader(200) - if r.Method == "HEAD" { - return - } - producers := c.api.ProducersFor(normalizeOffers(offers)) - prod, ok := producers[format] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - if err := prod.Produce(rw, data); err != nil { - panic(err) // let the recovery middleware deal with this - } - return - } - - if _, code, ok := route.Operation.SuccessResponse(); ok { - rw.WriteHeader(code) - if code == 204 || r.Method == "HEAD" { - return - } - - producers := route.Producers - prod, ok := producers[format] - if !ok { - if !ok { - prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) - pr, ok := prods[c.api.DefaultProduces()] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - prod = pr - } - } - if err := prod.Produce(rw, data); err != nil { - panic(err) // let the recovery middleware deal with this - } - return - } - - c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response")) -} - -func (c *Context) APIHandlerSwaggerUI(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - - var title string - sp := c.spec.Spec() - if sp != nil && sp.Info != nil && sp.Info.Title != "" { - title = sp.Info.Title - } - - swaggerUIOpts := SwaggerUIOpts{ - BasePath: c.BasePath(), - Title: title, - } - - return Spec("", c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b))) -} - -// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec -func (c *Context) APIHandler(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - - var title string - sp := c.spec.Spec() - if sp != nil && sp.Info != nil && sp.Info.Title != "" { - title = sp.Info.Title - } - - redocOpts := RedocOpts{ - BasePath: c.BasePath(), - Title: title, - } - - return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b))) -} - -// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec -func (c *Context) RoutesHandler(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - return NewRouter(c, b(NewOperationExecutor(c))) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE deleted file mode 100644 index e65039ad8..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Naoya Inada - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md deleted file mode 100644 index 30109e17d..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md +++ /dev/null @@ -1,180 +0,0 @@ -# Denco [![Build Status](https://travis-ci.org/naoina/denco.png?branch=master)](https://travis-ci.org/naoina/denco) - -The fast and flexible HTTP request router for [Go](http://golang.org). - -Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter). -However, Denco is optimized and some features added. - -## Features - -* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) -* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) -* Small (but enough) URL router API -* HTTP request multiplexer like `http.ServeMux` - -## Installation - - go get -u github.com/go-openapi/runtime/middleware/denco - -## Using as HTTP request multiplexer - -```go -package main - -import ( - "fmt" - "log" - "net/http" - - "github.com/go-openapi/runtime/middleware/denco" -) - -func Index(w http.ResponseWriter, r *http.Request, params denco.Params) { - fmt.Fprintf(w, "Welcome to Denco!\n") -} - -func User(w http.ResponseWriter, r *http.Request, params denco.Params) { - fmt.Fprintf(w, "Hello %s!\n", params.Get("name")) -} - -func main() { - mux := denco.NewMux() - handler, err := mux.Build([]denco.Handler{ - mux.GET("/", Index), - mux.GET("/user/:name", User), - mux.POST("/user/:name", User), - }) - if err != nil { - panic(err) - } - log.Fatal(http.ListenAndServe(":8080", handler)) -} -``` - -## Using as URL router - -```go -package main - -import ( - "fmt" - - "github.com/go-openapi/runtime/middleware/denco" -) - -type route struct { - name string -} - -func main() { - router := denco.New() - router.Build([]denco.Record{ - {"/", &route{"root"}}, - {"/user/:id", &route{"user"}}, - {"/user/:name/:id", &route{"username"}}, - {"/static/*filepath", &route{"static"}}, - }) - - data, params, found := router.Lookup("/") - // print `&main.route{name:"root"}, denco.Params(nil), true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/user/hoge") - // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/user/hoge/7") - // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/static/path/to/file") - // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) -} -``` - -See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details. - -## Getting the value of path parameter - -You can get the value of path parameter by 2 ways. - -1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method -2. Find by loop - -```go -package main - -import ( - "fmt" - - "github.com/go-openapi/runtime/middleware/denco" -) - -func main() { - router := denco.New() - if err := router.Build([]denco.Record{ - {"/user/:name/:id", "route1"}, - }); err != nil { - panic(err) - } - - // 1. Using denco.Params.Get method. - _, params, _ := router.Lookup("/user/alice/1") - name := params.Get("name") - if name != "" { - fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". - } - - // 2. Find by loop. - for _, param := range params { - if param.Name == "name" { - fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". - } - } -} -``` - -## URL patterns - -Denco's route matching strategy is "most nearly matching". - -When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`. -Because URI `/alice` is more match with the route `/alice` than `/:name`. - -For more example, when routes below have been built: - -``` -/user/alice -/user/:name -/user/:name/:id -/user/alice/:id -/user/:id/bob -``` - -Routes matching are: - -``` -/user/alice => "/user/alice" (no match with "/user/:name") -/user/bob => "/user/:name" -/user/naoina/1 => "/user/:name/1" -/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id") -/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id") -/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob") -``` - -## Limitation - -Denco has some limitations below. - -* Number of param records (such as `/:name`) must be less than 2^22 -* Number of elements of internal slice must be less than 2^22 - -## Benchmarks - - cd $GOPATH/github.com/go-openapi/runtime/middleware/denco - go test -bench . -benchmem - -## License - -Denco is licensed under the MIT License. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go deleted file mode 100644 index 5d2691ec3..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go +++ /dev/null @@ -1,460 +0,0 @@ -// Package denco provides fast URL router. -package denco - -import ( - "fmt" - "sort" - "strings" -) - -const ( - // ParamCharacter is a special character for path parameter. - ParamCharacter = ':' - - // WildcardCharacter is a special character for wildcard path parameter. - WildcardCharacter = '*' - - // TerminationCharacter is a special character for end of path. - TerminationCharacter = '#' - - // SeparatorCharacter separates path segments. - SeparatorCharacter = '/' - - // PathParamCharacter indicates a RESTCONF path param - PathParamCharacter = '=' - - // MaxSize is max size of records and internal slice. - MaxSize = (1 << 22) - 1 -) - -// Router represents a URL router. -type Router struct { - // SizeHint expects the maximum number of path parameters in records to Build. - // SizeHint will be used to determine the capacity of the memory to allocate. - // By default, SizeHint will be determined from given records to Build. - SizeHint int - - static map[string]interface{} - param *doubleArray -} - -// New returns a new Router. -func New() *Router { - return &Router{ - SizeHint: -1, - static: make(map[string]interface{}), - param: newDoubleArray(), - } -} - -// Lookup returns data and path parameters that associated with path. -// params is a slice of the Param that arranged in the order in which parameters appeared. -// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. -func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) { - if data, found := rt.static[path]; found { - return data, nil, true - } - if len(rt.param.node) == 1 { - return nil, nil, false - } - nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1) - if !found { - return nil, nil, false - } - for i := 0; i < len(params); i++ { - params[i].Name = nd.paramNames[i] - } - return nd.data, params, true -} - -// Build builds URL router from records. -func (rt *Router) Build(records []Record) error { - statics, params := makeRecords(records) - if len(params) > MaxSize { - return fmt.Errorf("denco: too many records") - } - if rt.SizeHint < 0 { - rt.SizeHint = 0 - for _, p := range params { - size := 0 - for _, k := range p.Key { - if k == ParamCharacter || k == WildcardCharacter { - size++ - } - } - if size > rt.SizeHint { - rt.SizeHint = size - } - } - } - for _, r := range statics { - rt.static[r.Key] = r.Value - } - if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil { - return err - } - return nil -} - -// Param represents name and value of path parameter. -type Param struct { - Name string - Value string -} - -// Params represents the name and value of path parameters. -type Params []Param - -// Get gets the first value associated with the given name. -// If there are no values associated with the key, Get returns "". -func (ps Params) Get(name string) string { - for _, p := range ps { - if p.Name == name { - return p.Value - } - } - return "" -} - -type doubleArray struct { - bc []baseCheck - node []*node -} - -func newDoubleArray() *doubleArray { - return &doubleArray{ - bc: []baseCheck{0}, - node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. - } -} - -// baseCheck contains BASE, CHECK and Extra flags. -// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. -// -// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) -// |----------------------|--|--------| -// 32 10 8 0 -type baseCheck uint32 - -func (bc baseCheck) Base() int { - return int(bc >> 10) -} - -func (bc *baseCheck) SetBase(base int) { - *bc |= baseCheck(base) << 10 -} - -func (bc baseCheck) Check() byte { - return byte(bc) -} - -func (bc *baseCheck) SetCheck(check byte) { - *bc |= baseCheck(check) -} - -func (bc baseCheck) IsEmpty() bool { - return bc&0xfffffcff == 0 -} - -func (bc baseCheck) IsSingleParam() bool { - return bc¶mTypeSingle == paramTypeSingle -} - -func (bc baseCheck) IsWildcardParam() bool { - return bc¶mTypeWildcard == paramTypeWildcard -} - -func (bc baseCheck) IsAnyParam() bool { - return bc¶mTypeAny != 0 -} - -func (bc *baseCheck) SetSingleParam() { - *bc |= (1 << 8) -} - -func (bc *baseCheck) SetWildcardParam() { - *bc |= (1 << 9) -} - -const ( - paramTypeSingle = 0x0100 - paramTypeWildcard = 0x0200 - paramTypeAny = 0x0300 -) - -func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) { - indices := make([]uint64, 0, 1) - for i := 0; i < len(path); i++ { - if da.bc[idx].IsAnyParam() { - indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff)) - } - c := path[i] - if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c { - goto BACKTRACKING - } - } - if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter { - return da.node[da.bc[next].Base()], params, true - } -BACKTRACKING: - for j := len(indices) - 1; j >= 0; j-- { - i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff) - if da.bc[idx].IsSingleParam() { - idx := nextIndex(da.bc[idx].Base(), ParamCharacter) - if idx >= len(da.bc) { - break - } - next := NextSeparator(path, i) - params := append(params, Param{Value: path[i:next]}) - if nd, params, found := da.lookup(path[next:], params, idx); found { - return nd, params, true - } - } - if da.bc[idx].IsWildcardParam() { - idx := nextIndex(da.bc[idx].Base(), WildcardCharacter) - params := append(params, Param{Value: path[i:]}) - return da.node[da.bc[idx].Base()], params, true - } - } - return nil, nil, false -} - -// build builds double-array from records. -func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error { - sort.Stable(recordSlice(srcs)) - base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) - if err != nil { - return err - } - if leaf != nil { - nd, err := makeNode(leaf) - if err != nil { - return err - } - da.bc[idx].SetBase(len(da.node)) - da.node = append(da.node, nd) - } - for _, sib := range siblings { - da.setCheck(nextIndex(base, sib.c), sib.c) - } - for _, sib := range siblings { - records := srcs[sib.start:sib.end] - switch sib.c { - case ParamCharacter: - for _, r := range records { - next := NextSeparator(r.Key, depth+1) - name := r.Key[depth+1 : next] - r.paramNames = append(r.paramNames, name) - r.Key = r.Key[next:] - } - da.bc[idx].SetSingleParam() - if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { - return err - } - case WildcardCharacter: - r := records[0] - name := r.Key[depth+1 : len(r.Key)-1] - r.paramNames = append(r.paramNames, name) - r.Key = "" - da.bc[idx].SetWildcardParam() - if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { - return err - } - default: - if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil { - return err - } - } - } - return nil -} - -// setBase sets BASE. -func (da *doubleArray) setBase(i, base int) { - da.bc[i].SetBase(base) -} - -// setCheck sets CHECK. -func (da *doubleArray) setCheck(i int, check byte) { - da.bc[i].SetCheck(check) -} - -// findEmptyIndex returns an index of unused BASE/CHECK node. -func (da *doubleArray) findEmptyIndex(start int) int { - i := start - for ; i < len(da.bc); i++ { - if da.bc[i].IsEmpty() { - break - } - } - return i -} - -// findBase returns good BASE. -func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { - for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { - base = nextIndex(idx, firstChar) - if _, used := usedBase[base]; used { - continue - } - i := 0 - for ; i < len(siblings); i++ { - next := nextIndex(base, siblings[i].c) - if len(da.bc) <= next { - da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) - } - if !da.bc[next].IsEmpty() { - break - } - } - if i == len(siblings) { - break - } - } - usedBase[base] = struct{}{} - return base -} - -func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { - siblings, leaf, err = makeSiblings(records, depth) - if err != nil { - return -1, nil, nil, err - } - if len(siblings) < 1 { - return -1, nil, leaf, nil - } - base = da.findBase(siblings, idx, usedBase) - if base > MaxSize { - return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice") - } - da.setBase(idx, base) - return base, siblings, leaf, err -} - -// node represents a node of Double-Array. -type node struct { - data interface{} - - // Names of path parameters. - paramNames []string -} - -// makeNode returns a new node from record. -func makeNode(r *record) (*node, error) { - dups := make(map[string]bool) - for _, name := range r.paramNames { - if dups[name] { - return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key) - } - dups[name] = true - } - return &node{data: r.Value, paramNames: r.paramNames}, nil -} - -// sibling represents an intermediate data of build for Double-Array. -type sibling struct { - // An index of start of duplicated characters. - start int - - // An index of end of duplicated characters. - end int - - // A character of sibling. - c byte -} - -// nextIndex returns a next index of array of BASE/CHECK. -func nextIndex(base int, c byte) int { - return base ^ int(c) -} - -// makeSiblings returns slice of sibling. -func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) { - var ( - pc byte - n int - ) - for i, r := range records { - if len(r.Key) <= depth { - leaf = r - continue - } - c := r.Key[depth] - switch { - case pc < c: - sib = append(sib, sibling{start: i, c: c}) - case pc == c: - continue - default: - return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted") - } - if n > 0 { - sib[n-1].end = i - } - pc = c - n++ - } - if n == 0 { - return nil, leaf, nil - } - sib[n-1].end = len(records) - return sib, leaf, nil -} - -// Record represents a record data for router construction. -type Record struct { - // Key for router construction. - Key string - - // Result value for Key. - Value interface{} -} - -// NewRecord returns a new Record. -func NewRecord(key string, value interface{}) Record { - return Record{ - Key: key, - Value: value, - } -} - -// record represents a record that use to build the Double-Array. -type record struct { - Record - paramNames []string -} - -// makeRecords returns the records that use to build Double-Arrays. -func makeRecords(srcs []Record) (statics, params []*record) { - termChar := string(TerminationCharacter) - paramPrefix := string(SeparatorCharacter) + string(ParamCharacter) - wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter) - restconfPrefix := string(PathParamCharacter) + string(ParamCharacter) - for _, r := range srcs { - if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) ||strings.Contains(r.Key, restconfPrefix){ - r.Key += termChar - params = append(params, &record{Record: r}) - } else { - statics = append(statics, &record{Record: r}) - } - } - return statics, params -} - -// recordSlice represents a slice of Record for sort and implements the sort.Interface. -type recordSlice []*record - -// Len implements the sort.Interface.Len. -func (rs recordSlice) Len() int { - return len(rs) -} - -// Less implements the sort.Interface.Less. -func (rs recordSlice) Less(i, j int) bool { - return rs[i].Key < rs[j].Key -} - -// Swap implements the sort.Interface.Swap. -func (rs recordSlice) Swap(i, j int) { - rs[i], rs[j] = rs[j], rs[i] -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go deleted file mode 100644 index 0886713c1..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go +++ /dev/null @@ -1,106 +0,0 @@ -package denco - -import ( - "net/http" -) - -// Mux represents a multiplexer for HTTP request. -type Mux struct{} - -// NewMux returns a new Mux. -func NewMux() *Mux { - return &Mux{} -} - -// GET is shorthand of Mux.Handler("GET", path, handler). -func (m *Mux) GET(path string, handler HandlerFunc) Handler { - return m.Handler("GET", path, handler) -} - -// POST is shorthand of Mux.Handler("POST", path, handler). -func (m *Mux) POST(path string, handler HandlerFunc) Handler { - return m.Handler("POST", path, handler) -} - -// PUT is shorthand of Mux.Handler("PUT", path, handler). -func (m *Mux) PUT(path string, handler HandlerFunc) Handler { - return m.Handler("PUT", path, handler) -} - -// HEAD is shorthand of Mux.Handler("HEAD", path, handler). -func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { - return m.Handler("HEAD", path, handler) -} - -// Handler returns a handler for HTTP method. -func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { - return Handler{ - Method: method, - Path: path, - Func: handler, - } -} - -// Build builds a http.Handler. -func (m *Mux) Build(handlers []Handler) (http.Handler, error) { - recordMap := make(map[string][]Record) - for _, h := range handlers { - recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func)) - } - mux := newServeMux() - for m, records := range recordMap { - router := New() - if err := router.Build(records); err != nil { - return nil, err - } - mux.routers[m] = router - } - return mux, nil -} - -// Handler represents a handler of HTTP request. -type Handler struct { - // Method is an HTTP method. - Method string - - // Path is a routing path for handler. - Path string - - // Func is a function of handler of HTTP request. - Func HandlerFunc -} - -// The HandlerFunc type is aliased to type of handler function. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) - -type serveMux struct { - routers map[string]*Router -} - -func newServeMux() *serveMux { - return &serveMux{ - routers: make(map[string]*Router), - } -} - -// ServeHTTP implements http.Handler interface. -func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - handler, params := mux.handler(r.Method, r.URL.Path) - handler(w, r, params) -} - -func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) { - if router, found := mux.routers[method]; found { - if handler, params, found := router.Lookup(path); found { - return handler.(HandlerFunc), params - } - } - return NotFound, nil -} - -// NotFound replies to the request with an HTTP 404 not found error. -// NotFound is called when unknown HTTP method or a handler not found. -// If you want to use the your own NotFound handler, please overwrite this variable. -var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) { - http.NotFound(w, r) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go deleted file mode 100644 index edc1f6ab8..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go +++ /dev/null @@ -1,12 +0,0 @@ -package denco - -// NextSeparator returns an index of next separator in path. -func NextSeparator(path string, start int) int { - for start < len(path) { - if c := path[start]; c == '/' || c == TerminationCharacter { - break - } - start++ - } - return start -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go deleted file mode 100644 index eaf90606a..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/*Package middleware provides the library with helper functions for serving swagger APIs. - -Pseudo middleware handler - - import ( - "net/http" - - "github.com/go-openapi/errors" - ) - - func newCompleteMiddleware(ctx *Context) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // use context to lookup routes - if matched, ok := ctx.RouteInfo(r); ok { - - if matched.NeedsAuth() { - if _, err := ctx.Authorize(r, matched); err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - } - - bound, validation := ctx.BindAndValidate(r, matched) - if validation != nil { - ctx.Respond(rw, r, matched.Produces, matched, validation) - return - } - - result, err := matched.Handler.Handle(bound) - if err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - - ctx.Respond(rw, r, matched.Produces, matched, result) - return - } - - // Not found, check if it exists in the other methods first - if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) - return - } - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) - }) - } -*/ -package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/go18.go b/vendor/github.com/go-openapi/runtime/middleware/go18.go deleted file mode 100644 index 75c762c09..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.8 - -package middleware - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go deleted file mode 100644 index e069743e3..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/header/header.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// this file was taken from the github.com/golang/gddo repository - -// Package header provides functions for parsing HTTP headers. -package header - -import ( - "net/http" - "strings" - "time" -) - -// Octet types from RFC 2616. -var octetTypes [256]octetType - -type octetType byte - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// Copy returns a shallow copy of the header. -func Copy(header http.Header) http.Header { - h := make(http.Header) - for k, vs := range header { - h[k] = vs - } - return h -} - -var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} - -// ParseTime parses the header as time. The zero value is returned if the -// header is not present or there is an error parsing the -// header. -func ParseTime(header http.Header, key string) time.Time { - if s := header.Get(key); s != "" { - for _, layout := range timeLayouts { - if t, err := time.Parse(layout, s); err == nil { - return t.UTC() - } - } - } - return time.Time{} -} - -// ParseList parses a comma separated list of values. Commas are ignored in -// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is -// trimmed. -func ParseList(header http.Header, key string) []string { - var result []string - for _, s := range header[http.CanonicalHeaderKey(key)] { - begin := 0 - end := 0 - escape := false - quote := false - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - end = i + 1 - case quote: - switch b { - case '\\': - escape = true - case '"': - quote = false - } - end = i + 1 - case b == '"': - quote = true - end = i + 1 - case octetTypes[b]&isSpace != 0: - if begin == end { - begin = i + 1 - end = begin - } - case b == ',': - if begin < end { - result = append(result, s[begin:end]) - } - begin = i + 1 - end = begin - default: - end = i + 1 - } - } - if begin < end { - result = append(result, s[begin:end]) - } - } - return result -} - -// ParseValueAndParams parses a comma separated list of values with optional -// semicolon separated name-value pairs. Content-Type and Content-Disposition -// headers are in this format. -func ParseValueAndParams(header http.Header, key string) (string, map[string]string) { - return parseValueAndParams(header.Get(key)) -} - -func parseValueAndParams(s string) (value string, params map[string]string) { - params = make(map[string]string) - value, s = expectTokenSlash(s) - if value == "" { - return - } - value = strings.ToLower(value) - s = skipSpace(s) - for strings.HasPrefix(s, ";") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -// AcceptSpec ... -type AcceptSpec struct { - Value string - Q float64 -} - -// ParseAccept2 ... -func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) { - for _, en := range ParseList(header, key) { - v, p := parseValueAndParams(en) - var spec AcceptSpec - spec.Value = v - spec.Q = 1.0 - if p != nil { - if q, ok := p["q"]; ok { - spec.Q, _ = expectQuality(q) - } - } - if spec.Q < 0.0 { - continue - } - specs = append(specs, spec) - } - - return -} - -// ParseAccept parses Accept* headers. -func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { -loop: - for _, s := range header[key] { - for { - var spec AcceptSpec - spec.Value, s = expectTokenSlash(s) - if spec.Value == "" { - continue loop - } - spec.Q = 1.0 - s = skipSpace(s) - if strings.HasPrefix(s, ";") { - s = skipSpace(s[1:]) - for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") { - s = skipSpace(s[1:]) - } - if strings.HasPrefix(s, "q=") { - spec.Q, s = expectQuality(s[2:]) - if spec.Q < 0.0 { - continue loop - } - } - } - specs = append(specs, spec) - s = skipSpace(s) - if !strings.HasPrefix(s, ",") { - continue loop - } - s = skipSpace(s[1:]) - } - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenSlash(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - b := s[i] - if (octetTypes[b]&isToken == 0) && b != '/' { - break - } - } - return s[:i], s[i:] -} - -func expectQuality(s string) (q float64, rest string) { - switch { - case len(s) == 0: - return -1, "" - case s[0] == '0': - // q is already 0 - s = s[1:] - case s[0] == '1': - s = s[1:] - q = 1 - case s[0] == '.': - // q is already 0 - default: - return -1, "" - } - if !strings.HasPrefix(s, ".") { - return q, s - } - s = s[1:] - i := 0 - n := 0 - d := 1 - for ; i < len(s); i++ { - b := s[i] - if b < '0' || b > '9' { - break - } - n = n*10 + int(b) - '0' - d *= 10 - } - return q + float64(n)/float64(d), s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go deleted file mode 100644 index a9b6f27d3..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// this file was taken from the github.com/golang/gddo repository - -package middleware - -import ( - "net/http" - "strings" - - "github.com/go-openapi/runtime/middleware/header" -) - -// NegotiateContentEncoding returns the best offered content encoding for the -// request's Accept-Encoding header. If two offers match with equal weight and -// then the offer earlier in the list is preferred. If no offers are -// acceptable, then "" is returned. -func NegotiateContentEncoding(r *http.Request, offers []string) string { - bestOffer := "identity" - bestQ := -1.0 - specs := header.ParseAccept(r.Header, "Accept-Encoding") - for _, offer := range offers { - for _, spec := range specs { - if spec.Q > bestQ && - (spec.Value == "*" || spec.Value == offer) { - bestQ = spec.Q - bestOffer = offer - } - } - } - if bestQ == 0 { - bestOffer = "" - } - return bestOffer -} - -// NegotiateContentType returns the best offered content type for the request's -// Accept header. If two offers match with equal weight, then the more specific -// offer is preferred. For example, text/* trumps */*. If two offers match -// with equal weight and specificity, then the offer earlier in the list is -// preferred. If no offers match, then defaultOffer is returned. -func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { - bestOffer := defaultOffer - bestQ := -1.0 - bestWild := 3 - specs := header.ParseAccept(r.Header, "Accept") - for _, rawOffer := range offers { - offer := normalizeOffer(rawOffer) - // No Accept header: just return the first offer. - if len(specs) == 0 { - return rawOffer - } - for _, spec := range specs { - switch { - case spec.Q == 0.0: - // ignore - case spec.Q < bestQ: - // better match found - case spec.Value == "*/*": - if spec.Q > bestQ || bestWild > 2 { - bestQ = spec.Q - bestWild = 2 - bestOffer = rawOffer - } - case strings.HasSuffix(spec.Value, "/*"): - if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && - (spec.Q > bestQ || bestWild > 1) { - bestQ = spec.Q - bestWild = 1 - bestOffer = rawOffer - } - default: - if spec.Value == offer && - (spec.Q > bestQ || bestWild > 0) { - bestQ = spec.Q - bestWild = 0 - bestOffer = rawOffer - } - } - } - } - return bestOffer -} - -func normalizeOffers(orig []string) (norm []string) { - for _, o := range orig { - norm = append(norm, normalizeOffer(o)) - } - return -} - -func normalizeOffer(orig string) string { - return strings.SplitN(orig, ";", 2)[0] -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go deleted file mode 100644 index bc6942a0f..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - - "github.com/go-openapi/runtime" -) - -type errorResp struct { - code int - response interface{} - headers http.Header -} - -func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { - for k, v := range e.headers { - for _, val := range v { - rw.Header().Add(k, val) - } - } - if e.code > 0 { - rw.WriteHeader(e.code) - } else { - rw.WriteHeader(http.StatusInternalServerError) - } - if err := producer.Produce(rw, e.response); err != nil { - Logger.Printf("failed to write error response: %v", err) - } -} - -// NotImplemented the error response when the response is not implemented -func NotImplemented(message string) Responder { - return Error(http.StatusNotImplemented, message) -} - -// Error creates a generic responder for returning errors, the data will be serialized -// with the matching producer for the request -func Error(code int, data interface{}, headers ...http.Header) Responder { - var hdr http.Header - for _, h := range headers { - for k, v := range h { - if hdr == nil { - hdr = make(http.Header) - } - hdr[k] = v - } - } - return &errorResp{ - code: code, - response: data, - headers: hdr, - } -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go deleted file mode 100644 index 1175a63cf..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/operation.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import "net/http" - -// NewOperationExecutor creates a context aware middleware that handles the operations after routing -func NewOperationExecutor(ctx *Context) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // use context to lookup routes - route, rCtx, _ := ctx.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - - route.Handler.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go deleted file mode 100644 index 8fa0cf4e4..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/parameter.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" - - "github.com/go-openapi/runtime" -) - -const defaultMaxMemory = 32 << 20 - -var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() - -func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { - binder := new(untypedParamBinder) - binder.Name = param.Name - binder.parameter = ¶m - binder.formats = formats - if param.In != "body" { - binder.validator = validate.NewParamValidator(¶m, formats) - } else { - binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats) - } - - return binder -} - -type untypedParamBinder struct { - parameter *spec.Parameter - formats strfmt.Registry - Name string - validator validate.EntityValidator -} - -func (p *untypedParamBinder) Type() reflect.Type { - return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items) -} - -func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type { - switch tpe { - case "boolean": - return reflect.TypeOf(true) - - case "string": - if tt, ok := p.formats.GetType(format); ok { - return tt - } - return reflect.TypeOf("") - - case "integer": - switch format { - case "int8": - return reflect.TypeOf(int8(0)) - case "int16": - return reflect.TypeOf(int16(0)) - case "int32": - return reflect.TypeOf(int32(0)) - case "int64": - return reflect.TypeOf(int64(0)) - default: - return reflect.TypeOf(int64(0)) - } - - case "number": - switch format { - case "float": - return reflect.TypeOf(float32(0)) - case "double": - return reflect.TypeOf(float64(0)) - } - - case "array": - if items == nil { - return nil - } - itemsType := p.typeForSchema(items.Type, items.Format, items.Items) - if itemsType == nil { - return nil - } - return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type() - - case "file": - return reflect.TypeOf(&runtime.File{}).Elem() - - case "object": - return reflect.TypeOf(map[string]interface{}{}) - } - return nil -} - -func (p *untypedParamBinder) allowsMulti() bool { - return p.parameter.In == "query" || p.parameter.In == "formData" -} - -func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) { - name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type - if tpe == "array" { - if cf == "multi" { - if !p.allowsMulti() { - return nil, false, false, errors.InvalidCollectionFormat(name, in, cf) - } - vv, hasKey, _ := values.GetOK(name) - return vv, false, hasKey, nil - } - - v, hk, hv := values.GetOK(name) - if !hv { - return nil, false, hk, nil - } - d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target) - return d, c, hk, e - } - - vv, hk, _ := values.GetOK(name) - return vv, false, hk, nil -} - -func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error { - // fmt.Println("binding", p.name, "as", p.Type()) - switch p.parameter.In { - case "query": - data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target) - if err != nil { - return err - } - if custom { - return nil - } - - return p.bindValue(data, hasKey, target) - - case "header": - data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "path": - data, custom, hasKey, err := p.readValue(routeParams, target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "formData": - var err error - var mt string - - mt, _, e := runtime.ContentType(request.Header) - if e != nil { - // because of the interface conversion go thinks the error is not nil - // so we first check for nil and then set the err var if it's not nil - err = e - } - - if err != nil { - return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"}) - } - - if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" { - return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"}) - } - - if mt == "multipart/form-data" { - if err = request.ParseMultipartForm(defaultMaxMemory); err != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", err) - } - } - - if err = request.ParseForm(); err != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", err) - } - - if p.parameter.Type == "file" { - file, header, ffErr := request.FormFile(p.parameter.Name) - if ffErr != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) - } - target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) - return nil - } - - if request.MultipartForm != nil { - data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target) - if rvErr != nil { - return rvErr - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - } - data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "body": - newValue := reflect.New(target.Type()) - if !runtime.HasBody(request) { - if p.parameter.Default != nil { - target.Set(reflect.ValueOf(p.parameter.Default)) - } - - return nil - } - if err := consumer.Consume(request.Body, newValue.Interface()); err != nil { - if err == io.EOF && p.parameter.Default != nil { - target.Set(reflect.ValueOf(p.parameter.Default)) - return nil - } - tpe := p.parameter.Type - if p.parameter.Format != "" { - tpe = p.parameter.Format - } - return errors.InvalidType(p.Name, p.parameter.In, tpe, nil) - } - target.Set(reflect.Indirect(newValue)) - return nil - default: - return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In)) - } -} - -func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error { - if p.parameter.Type == "array" { - return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey) - } - var d string - if len(data) > 0 { - d = data[len(data)-1] - } - return p.setFieldValue(target, p.parameter.Default, d, hasKey) -} - -func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { - tpe := p.parameter.Type - if p.parameter.Format != "" { - tpe = p.parameter.Format - } - - if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil { - return errors.Required(p.Name, p.parameter.In, data) - } - - ok, err := p.tryUnmarshaler(target, defaultValue, data) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if ok { - return nil - } - - defVal := reflect.Zero(target.Type()) - if defaultValue != nil { - defVal = reflect.ValueOf(defaultValue) - } - - if tpe == "byte" { - if data == "" { - if target.CanSet() { - target.SetBytes(defVal.Bytes()) - } - return nil - } - - b, err := base64.StdEncoding.DecodeString(data) - if err != nil { - b, err = base64.URLEncoding.DecodeString(data) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - } - if target.CanSet() { - target.SetBytes(b) - } - return nil - } - - switch target.Kind() { - case reflect.Bool: - if data == "" { - if target.CanSet() { - target.SetBool(defVal.Bool()) - } - return nil - } - b, err := swag.ConvertBool(data) - if err != nil { - return err - } - if target.CanSet() { - target.SetBool(b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(int64(0))) - target.SetInt(rd.Int()) - } - return nil - } - i, err := strconv.ParseInt(data, 10, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowInt(i) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetInt(i) - } - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(uint64(0))) - target.SetUint(rd.Uint()) - } - return nil - } - u, err := strconv.ParseUint(data, 10, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowUint(u) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetUint(u) - } - - case reflect.Float32, reflect.Float64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(float64(0))) - target.SetFloat(rd.Float()) - } - return nil - } - f, err := strconv.ParseFloat(data, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowFloat(f) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetFloat(f) - } - - case reflect.String: - value := data - if value == "" { - value = defVal.String() - } - // validate string - if target.CanSet() { - target.SetString(value) - } - - case reflect.Ptr: - if data == "" && defVal.Kind() == reflect.Ptr { - if target.CanSet() { - target.Set(defVal) - } - return nil - } - newVal := reflect.New(target.Type().Elem()) - if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil { - return err - } - if target.CanSet() { - target.Set(newVal) - } - - default: - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - return nil -} - -func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) { - if !target.CanSet() { - return false, nil - } - // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more - if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) { - if defaultValue != nil && len(data) == 0 { - target.Set(reflect.ValueOf(defaultValue)) - return true, nil - } - value := reflect.New(target.Type()) - if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil { - return true, err - } - target.Set(reflect.Indirect(value)) - return true, nil - } - return false, nil -} - -func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) { - ok, err := p.tryUnmarshaler(target, p.parameter.Default, data) - if err != nil { - return nil, true, err - } - if ok { - return nil, true, nil - } - - return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil -} - -func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error { - sz := len(data) - if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil { - return errors.Required(p.Name, p.parameter.In, data) - } - - defVal := reflect.Zero(target.Type()) - if defaultValue != nil { - defVal = reflect.ValueOf(defaultValue) - } - - if !target.CanSet() { - return nil - } - if sz == 0 { - target.Set(defVal) - return nil - } - - value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz) - - for i := 0; i < sz; i++ { - if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil { - return err - } - } - - target.Set(value) - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go deleted file mode 100644 index 03385251e..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.8 - -package middleware - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go deleted file mode 100644 index 4be330d6d..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go +++ /dev/null @@ -1,90 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "html/template" - "net/http" - "path" -) - -// RapiDocOpts configures the RapiDoc middlewares -type RapiDocOpts struct { - // BasePath for the UI path, defaults to: / - BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs - Path string - // SpecURL the url to find the spec for - SpecURL string - // RapiDocURL for the js that generates the rapidoc site, defaults to: https://cdn.jsdelivr.net/npm/rapidoc/bundles/rapidoc.standalone.js - RapiDocURL string - // Title for the documentation site, default to: API documentation - Title string -} - -// EnsureDefaults in case some options are missing -func (r *RapiDocOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } - if r.RapiDocURL == "" { - r.RapiDocURL = rapidocLatest - } - if r.Title == "" { - r.Title = "API documentation" - } -} - -// RapiDoc creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. -// -func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("rapidoc").Parse(rapidocTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - rapidocLatest = "https://unpkg.com/rapidoc/dist/rapidoc-min.js" - rapidocTemplate = ` - - - {{ .Title }} - - - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go deleted file mode 100644 index 019c85429..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/redoc.go +++ /dev/null @@ -1,103 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "html/template" - "net/http" - "path" -) - -// RedocOpts configures the Redoc middlewares -type RedocOpts struct { - // BasePath for the UI path, defaults to: / - BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs - Path string - // SpecURL the url to find the spec for - SpecURL string - // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js - RedocURL string - // Title for the documentation site, default to: API documentation - Title string -} - -// EnsureDefaults in case some options are missing -func (r *RedocOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } - if r.RedocURL == "" { - r.RedocURL = redocLatest - } - if r.Title == "" { - r.Title = "API documentation" - } -} - -// Redoc creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. -// -func Redoc(opts RedocOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" - redocTemplate = ` - - - {{ .Title }} - - - - - - - - - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go deleted file mode 100644 index 760c37861..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/request.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - "reflect" - - "github.com/go-openapi/errors" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// UntypedRequestBinder binds and validates the data from a http request -type UntypedRequestBinder struct { - Spec *spec.Swagger - Parameters map[string]spec.Parameter - Formats strfmt.Registry - paramBinders map[string]*untypedParamBinder -} - -// NewUntypedRequestBinder creates a new binder for reading a request. -func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder { - binders := make(map[string]*untypedParamBinder) - for fieldName, param := range parameters { - binders[fieldName] = newUntypedParamBinder(param, spec, formats) - } - return &UntypedRequestBinder{ - Parameters: parameters, - paramBinders: binders, - Spec: spec, - Formats: formats, - } -} - -// Bind perform the databinding and validation -func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error { - val := reflect.Indirect(reflect.ValueOf(data)) - isMap := val.Kind() == reflect.Map - var result []error - debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) - for fieldName, param := range o.Parameters { - binder := o.paramBinders[fieldName] - debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) - var target reflect.Value - if !isMap { - binder.Name = fieldName - target = val.FieldByName(fieldName) - } - - if isMap { - tpe := binder.Type() - if tpe == nil { - if param.Schema.Type.Contains("array") { - tpe = reflect.TypeOf([]interface{}{}) - } else { - tpe = reflect.TypeOf(map[string]interface{}{}) - } - } - target = reflect.Indirect(reflect.New(tpe)) - } - - if !target.IsValid() { - result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name)) - continue - } - - if err := binder.Bind(request, routeParams, consumer, target); err != nil { - result = append(result, err) - continue - } - - if binder.validator != nil { - rr := binder.validator.Validate(target.Interface()) - if rr != nil && rr.HasErrors() { - result = append(result, rr.AsError()) - } - } - - if isMap { - val.SetMapIndex(reflect.ValueOf(param.Name), target) - } - } - - if len(result) > 0 { - return errors.CompositeValidationError(result...) - } - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go deleted file mode 100644 index 5052031c8..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/router.go +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "fmt" - "net/http" - fpath "path" - "regexp" - "strings" - - "github.com/go-openapi/runtime/security" - "github.com/go-openapi/swag" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/middleware/denco" -) - -// RouteParam is a object to capture route params in a framework agnostic way. -// implementations of the muxer should use these route params to communicate with the -// swagger framework -type RouteParam struct { - Name string - Value string -} - -// RouteParams the collection of route params -type RouteParams []RouteParam - -// Get gets the value for the route param for the specified key -func (r RouteParams) Get(name string) string { - vv, _, _ := r.GetOK(name) - if len(vv) > 0 { - return vv[len(vv)-1] - } - return "" -} - -// GetOK gets the value but also returns booleans to indicate if a key or value -// is present. This aids in validation and satisfies an interface in use there -// -// The returned values are: data, has key, has value -func (r RouteParams) GetOK(name string) ([]string, bool, bool) { - for _, p := range r { - if p.Name == name { - return []string{p.Value}, true, p.Value != "" - } - } - return nil, false, false -} - -// NewRouter creates a new context aware router middleware -func NewRouter(ctx *Context, next http.Handler) http.Handler { - if ctx.router == nil { - ctx.router = DefaultRouter(ctx.spec, ctx.api) - } - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if _, rCtx, ok := ctx.RouteInfo(r); ok { - next.ServeHTTP(rw, rCtx) - return - } - - // Not found, check if it exists in the other methods first - if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) - return - } - - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) - }) -} - -// RoutableAPI represents an interface for things that can serve -// as a provider of implementations for the swagger router -type RoutableAPI interface { - HandlerFor(string, string) (http.Handler, bool) - ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) - ConsumersFor([]string) map[string]runtime.Consumer - ProducersFor([]string) map[string]runtime.Producer - AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator - Authorizer() runtime.Authorizer - Formats() strfmt.Registry - DefaultProduces() string - DefaultConsumes() string -} - -// Router represents a swagger aware router -type Router interface { - Lookup(method, path string) (*MatchedRoute, bool) - OtherMethods(method, path string) []string -} - -type defaultRouteBuilder struct { - spec *loads.Document - analyzer *analysis.Spec - api RoutableAPI - records map[string][]denco.Record -} - -type defaultRouter struct { - spec *loads.Document - routers map[string]*denco.Router -} - -func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { - return &defaultRouteBuilder{ - spec: spec, - analyzer: analysis.New(spec.Spec()), - api: api, - records: make(map[string][]denco.Record), - } -} - -// DefaultRouter creates a default implemenation of the router -func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { - builder := newDefaultRouteBuilder(spec, api) - if spec != nil { - for method, paths := range builder.analyzer.Operations() { - for path, operation := range paths { - fp := fpath.Join(spec.BasePath(), path) - debugLog("adding route %s %s %q", method, fp, operation.ID) - builder.AddRoute(method, fp, operation) - } - } - } - return builder.Build() -} - -// RouteAuthenticator is an authenticator that can compose several authenticators together. -// It also knows when it contains an authenticator that allows for anonymous pass through. -// Contains a group of 1 or more authenticators that have a logical AND relationship -type RouteAuthenticator struct { - Authenticator map[string]runtime.Authenticator - Schemes []string - Scopes map[string][]string - allScopes []string - commonScopes []string - allowAnonymous bool -} - -func (ra *RouteAuthenticator) AllowsAnonymous() bool { - return ra.allowAnonymous -} - -// AllScopes returns a list of unique scopes that is the combination -// of all the scopes in the requirements -func (ra *RouteAuthenticator) AllScopes() []string { - return ra.allScopes -} - -// CommonScopes returns a list of unique scopes that are common in all the -// scopes in the requirements -func (ra *RouteAuthenticator) CommonScopes() []string { - return ra.commonScopes -} - -// Authenticate Authenticator interface implementation -func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { - if ra.allowAnonymous { - route.Authenticator = ra - return true, nil, nil - } - // iterate in proper order - var lastResult interface{} - for _, scheme := range ra.Schemes { - if authenticator, ok := ra.Authenticator[scheme]; ok { - applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ - Request: req, - RequiredScopes: ra.Scopes[scheme], - }) - if !applies { - return false, nil, nil - } - if err != nil { - route.Authenticator = ra - return true, nil, err - } - lastResult = princ - } - } - route.Authenticator = ra - return true, lastResult, nil -} - -func stringSliceUnion(slices ...[]string) []string { - unique := make(map[string]struct{}) - var result []string - for _, slice := range slices { - for _, entry := range slice { - if _, ok := unique[entry]; ok { - continue - } - unique[entry] = struct{}{} - result = append(result, entry) - } - } - return result -} - -func stringSliceIntersection(slices ...[]string) []string { - unique := make(map[string]int) - var intersection []string - - total := len(slices) - var emptyCnt int - for _, slice := range slices { - if len(slice) == 0 { - emptyCnt++ - continue - } - - for _, entry := range slice { - unique[entry]++ - if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices - intersection = append(intersection, entry) - } - } - } - - return intersection -} - -// RouteAuthenticators represents a group of authenticators that represent a logical OR -type RouteAuthenticators []RouteAuthenticator - -// AllowsAnonymous returns true when there is an authenticator that means optional auth -func (ras RouteAuthenticators) AllowsAnonymous() bool { - for _, ra := range ras { - if ra.AllowsAnonymous() { - return true - } - } - return false -} - -// Authenticate method implemention so this collection can be used as authenticator -func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { - var lastError error - var allowsAnon bool - var anonAuth RouteAuthenticator - - for _, ra := range ras { - if ra.AllowsAnonymous() { - anonAuth = ra - allowsAnon = true - continue - } - applies, usr, err := ra.Authenticate(req, route) - if !applies || err != nil || usr == nil { - if err != nil { - lastError = err - } - continue - } - return applies, usr, nil - } - - if allowsAnon && lastError == nil { - route.Authenticator = &anonAuth - return true, nil, lastError - } - return lastError != nil, nil, lastError -} - -type routeEntry struct { - PathPattern string - BasePath string - Operation *spec.Operation - Consumes []string - Consumers map[string]runtime.Consumer - Produces []string - Producers map[string]runtime.Producer - Parameters map[string]spec.Parameter - Handler http.Handler - Formats strfmt.Registry - Binder *UntypedRequestBinder - Authenticators RouteAuthenticators - Authorizer runtime.Authorizer -} - -// MatchedRoute represents the route that was matched in this request -type MatchedRoute struct { - routeEntry - Params RouteParams - Consumer runtime.Consumer - Producer runtime.Producer - Authenticator *RouteAuthenticator -} - -// HasAuth returns true when the route has a security requirement defined -func (m *MatchedRoute) HasAuth() bool { - return len(m.Authenticators) > 0 -} - -// NeedsAuth returns true when the request still -// needs to perform authentication -func (m *MatchedRoute) NeedsAuth() bool { - return m.HasAuth() && m.Authenticator == nil -} - -func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { - mth := strings.ToUpper(method) - debugLog("looking up route for %s %s", method, path) - if Debug { - if len(d.routers) == 0 { - debugLog("there are no known routers") - } - for meth := range d.routers { - debugLog("got a router for %s", meth) - } - } - if router, ok := d.routers[mth]; ok { - if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { - if entry, ok := m.(*routeEntry); ok { - debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) - var params RouteParams - for _, p := range rp { - v, err := pathUnescape(p.Value) - if err != nil { - debugLog("failed to escape %q: %v", p.Value, err) - v = p.Value - } - // a workaround to handle fragment/composing parameters until they are supported in denco router - // check if this parameter is a fragment within a path segment - if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { - // extract fragment parameters - ep := strings.Split(entry.PathPattern[xpos:], "/")[0] - pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) - for i, pname := range pnames { - params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) - } - } else { - // use the parameter directly - params = append(params, RouteParam{Name: p.Name, Value: v}) - } - } - return &MatchedRoute{routeEntry: *entry, Params: params}, true - } - } else { - debugLog("couldn't find a route by path for %s %s", method, path) - } - } else { - debugLog("couldn't find a route by method for %s %s", method, path) - } - return nil, false -} - -func (d *defaultRouter) OtherMethods(method, path string) []string { - mn := strings.ToUpper(method) - var methods []string - for k, v := range d.routers { - if k != mn { - if _, _, ok := v.Lookup(fpath.Clean(path)); ok { - methods = append(methods, k) - continue - } - } - } - return methods -} - -// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco -var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) - -func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { - pleft := strings.Index(pattern, "{") - names = append(names, name) - if pleft < 0 { - if strings.HasSuffix(value, pattern) { - values = append(values, value[:len(value)-len(pattern)]) - } else { - values = append(values, "") - } - } else { - toskip := pattern[:pleft] - pright := strings.Index(pattern, "}") - vright := strings.Index(value, toskip) - if vright >= 0 { - values = append(values, value[:vright]) - } else { - values = append(values, "") - value = "" - } - return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) - } - return names, values -} - -func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { - mn := strings.ToUpper(method) - - bp := fpath.Clean(d.spec.BasePath()) - if len(bp) > 0 && bp[len(bp)-1] == '/' { - bp = bp[:len(bp)-1] - } - - debugLog("operation: %#v", *operation) - if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { - consumes := d.analyzer.ConsumesFor(operation) - produces := d.analyzer.ProducesFor(operation) - parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) - - // add API defaults if not part of the spec - if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !swag.ContainsStringsCI(consumes, defConsumes) { - consumes = append(consumes, defConsumes) - } - - if defProduces := d.api.DefaultProduces(); defProduces != "" && !swag.ContainsStringsCI(produces, defProduces) { - produces = append(produces, defProduces) - } - - record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ - BasePath: bp, - PathPattern: path, - Operation: operation, - Handler: handler, - Consumes: consumes, - Produces: produces, - Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), - Producers: d.api.ProducersFor(normalizeOffers(produces)), - Parameters: parameters, - Formats: d.api.Formats(), - Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), - Authenticators: d.buildAuthenticators(operation), - Authorizer: d.api.Authorizer(), - }) - d.records[mn] = append(d.records[mn], record) - } -} - -func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { - requirements := d.analyzer.SecurityRequirementsFor(operation) - var auths []RouteAuthenticator - for _, reqs := range requirements { - var schemes []string - scopes := make(map[string][]string, len(reqs)) - var scopeSlices [][]string - for _, req := range reqs { - schemes = append(schemes, req.Name) - scopes[req.Name] = req.Scopes - scopeSlices = append(scopeSlices, req.Scopes) - } - - definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) - authenticators := d.api.AuthenticatorsFor(definitions) - auths = append(auths, RouteAuthenticator{ - Authenticator: authenticators, - Schemes: schemes, - Scopes: scopes, - allScopes: stringSliceUnion(scopeSlices...), - commonScopes: stringSliceIntersection(scopeSlices...), - allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", - }) - } - return auths -} - -func (d *defaultRouteBuilder) Build() *defaultRouter { - routers := make(map[string]*denco.Router) - for method, records := range d.records { - router := denco.New() - _ = router.Build(records) - routers[method] = router - } - return &defaultRouter{ - spec: d.spec, - routers: routers, - } -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go deleted file mode 100644 index 2b061caef..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/security.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import "net/http" - -func newSecureAPI(ctx *Context, next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - route, rCtx, _ := ctx.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - if route != nil && !route.NeedsAuth() { - next.ServeHTTP(rw, r) - return - } - - _, rCtx, err := ctx.Authorize(r, route) - if err != nil { - ctx.Respond(rw, r, route.Produces, route, err) - return - } - r = rCtx - - next.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go deleted file mode 100644 index f02914298..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/spec.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - "path" -) - -// Spec creates a middleware to serve a swagger spec. -// This allows for altering the spec before starting the http listener. -// This can be useful if you want to serve the swagger spec from another path than /swagger.json -// -func Spec(basePath string, b []byte, next http.Handler) http.Handler { - if basePath == "" { - basePath = "/" - } - pth := path.Join(basePath, "swagger.json") - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusOK) - //#nosec - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusNotFound) - return - } - next.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go deleted file mode 100644 index 2c92f5c91..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go +++ /dev/null @@ -1,162 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "html/template" - "net/http" - "path" -) - -// SwaggerUIOpts configures the Swaggerui middlewares -type SwaggerUIOpts struct { - // BasePath for the UI path, defaults to: / - BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs - Path string - // SpecURL the url to find the spec for - SpecURL string - - // The three components needed to embed swagger-ui - SwaggerURL string - SwaggerPresetURL string - SwaggerStylesURL string - - Favicon32 string - Favicon16 string - - // Title for the documentation site, default to: API documentation - Title string -} - -// EnsureDefaults in case some options are missing -func (r *SwaggerUIOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } - if r.SwaggerURL == "" { - r.SwaggerURL = swaggerLatest - } - if r.SwaggerPresetURL == "" { - r.SwaggerPresetURL = swaggerPresetLatest - } - if r.SwaggerStylesURL == "" { - r.SwaggerStylesURL = swaggerStylesLatest - } - if r.Favicon16 == "" { - r.Favicon16 = swaggerFavicon16Latest - } - if r.Favicon32 == "" { - r.Favicon32 = swaggerFavicon32Latest - } - if r.Title == "" { - r.Title = "API documentation" - } -} - -// SwaggerUI creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. -func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, &opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js" - swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js" - swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css" - swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png" - swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png" - swaggeruiTemplate = ` - - - - - {{ .Title }} - - - - - - - - -
- - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go deleted file mode 100644 index 39a85f7d9..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package untyped - -import ( - "fmt" - "net/http" - "sort" - "strings" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// NewAPI creates the default untyped API -func NewAPI(spec *loads.Document) *API { - var an *analysis.Spec - if spec != nil && spec.Spec() != nil { - an = analysis.New(spec.Spec()) - } - api := &API{ - spec: spec, - analyzer: an, - consumers: make(map[string]runtime.Consumer, 10), - producers: make(map[string]runtime.Producer, 10), - authenticators: make(map[string]runtime.Authenticator), - operations: make(map[string]map[string]runtime.OperationHandler), - ServeError: errors.ServeError, - Models: make(map[string]func() interface{}), - formats: strfmt.NewFormats(), - } - return api.WithJSONDefaults() -} - -// API represents an untyped mux for a swagger spec -type API struct { - spec *loads.Document - analyzer *analysis.Spec - DefaultProduces string - DefaultConsumes string - consumers map[string]runtime.Consumer - producers map[string]runtime.Producer - authenticators map[string]runtime.Authenticator - authorizer runtime.Authorizer - operations map[string]map[string]runtime.OperationHandler - ServeError func(http.ResponseWriter, *http.Request, error) - Models map[string]func() interface{} - formats strfmt.Registry -} - -// WithJSONDefaults loads the json defaults for this api -func (d *API) WithJSONDefaults() *API { - d.DefaultConsumes = runtime.JSONMime - d.DefaultProduces = runtime.JSONMime - d.consumers[runtime.JSONMime] = runtime.JSONConsumer() - d.producers[runtime.JSONMime] = runtime.JSONProducer() - return d -} - -// WithoutJSONDefaults clears the json defaults for this api -func (d *API) WithoutJSONDefaults() *API { - d.DefaultConsumes = "" - d.DefaultProduces = "" - delete(d.consumers, runtime.JSONMime) - delete(d.producers, runtime.JSONMime) - return d -} - -// Formats returns the registered string formats -func (d *API) Formats() strfmt.Registry { - if d.formats == nil { - d.formats = strfmt.NewFormats() - } - return d.formats -} - -// RegisterFormat registers a custom format validator -func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { - if d.formats == nil { - d.formats = strfmt.NewFormats() - } - d.formats.Add(name, format, validator) -} - -// RegisterAuth registers an auth handler in this api -func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { - if d.authenticators == nil { - d.authenticators = make(map[string]runtime.Authenticator) - } - d.authenticators[scheme] = handler -} - -// RegisterAuthorizer registers an authorizer handler in this api -func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { - d.authorizer = handler -} - -// RegisterConsumer registers a consumer for a media type. -func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { - if d.consumers == nil { - d.consumers = make(map[string]runtime.Consumer, 10) - } - d.consumers[strings.ToLower(mediaType)] = handler -} - -// RegisterProducer registers a producer for a media type -func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { - if d.producers == nil { - d.producers = make(map[string]runtime.Producer, 10) - } - d.producers[strings.ToLower(mediaType)] = handler -} - -// RegisterOperation registers an operation handler for an operation name -func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { - if d.operations == nil { - d.operations = make(map[string]map[string]runtime.OperationHandler, 30) - } - um := strings.ToUpper(method) - if b, ok := d.operations[um]; !ok || b == nil { - d.operations[um] = make(map[string]runtime.OperationHandler) - } - d.operations[um][path] = handler -} - -// OperationHandlerFor returns the operation handler for the specified id if it can be found -func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { - if d.operations == nil { - return nil, false - } - if pi, ok := d.operations[strings.ToUpper(method)]; ok { - h, ok := pi[path] - return h, ok - } - return nil, false -} - -// ConsumersFor gets the consumers for the specified media types -func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { - result := make(map[string]runtime.Consumer) - for _, mt := range mediaTypes { - if consumer, ok := d.consumers[mt]; ok { - result[mt] = consumer - } - } - return result -} - -// ProducersFor gets the producers for the specified media types -func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { - result := make(map[string]runtime.Producer) - for _, mt := range mediaTypes { - if producer, ok := d.producers[mt]; ok { - result[mt] = producer - } - } - return result -} - -// AuthenticatorsFor gets the authenticators for the specified security schemes -func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { - result := make(map[string]runtime.Authenticator) - for k := range schemes { - if a, ok := d.authenticators[k]; ok { - result[k] = a - } - } - return result -} - -// Authorizer returns the registered authorizer -func (d *API) Authorizer() runtime.Authorizer { - return d.authorizer -} - -// Validate validates this API for any missing items -func (d *API) Validate() error { - return d.validate() -} - -// validateWith validates the registrations in this API against the provided spec analyzer -func (d *API) validate() error { - var consumes []string - for k := range d.consumers { - consumes = append(consumes, k) - } - - var produces []string - for k := range d.producers { - produces = append(produces, k) - } - - var authenticators []string - for k := range d.authenticators { - authenticators = append(authenticators, k) - } - - var operations []string - for m, v := range d.operations { - for p := range v { - operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) - } - } - - var definedAuths []string - for k := range d.spec.Spec().SecurityDefinitions { - definedAuths = append(definedAuths, k) - } - - if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { - return err - } - if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { - return err - } - if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { - return err - } - - requiredAuths := d.analyzer.RequiredSecuritySchemes() - if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { - return err - } - if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { - return err - } - return nil -} - -func (d *API) verify(name string, registrations []string, expectations []string) error { - sort.Strings(registrations) - sort.Strings(expectations) - - expected := map[string]struct{}{} - seen := map[string]struct{}{} - - for _, v := range expectations { - expected[v] = struct{}{} - } - - var unspecified []string - for _, v := range registrations { - seen[v] = struct{}{} - if _, ok := expected[v]; !ok { - unspecified = append(unspecified, v) - } - } - - for k := range seen { - delete(expected, k) - } - - var unregistered []string - for k := range expected { - unregistered = append(unregistered, k) - } - sort.Strings(unspecified) - sort.Strings(unregistered) - - if len(unregistered) > 0 || len(unspecified) > 0 { - return &errors.APIVerificationFailed{ - Section: name, - MissingSpecification: unspecified, - MissingRegistration: unregistered, - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go deleted file mode 100644 index 1f0135b57..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/validation.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "mime" - "net/http" - "strings" - - "github.com/go-openapi/errors" - "github.com/go-openapi/swag" - - "github.com/go-openapi/runtime" -) - -type validation struct { - context *Context - result []error - request *http.Request - route *MatchedRoute - bound map[string]interface{} -} - -// ContentType validates the content type of a request -func validateContentType(allowed []string, actual string) error { - debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) - if len(allowed) == 0 { - return nil - } - mt, _, err := mime.ParseMediaType(actual) - if err != nil { - return errors.InvalidContentType(actual, allowed) - } - if swag.ContainsStringsCI(allowed, mt) { - return nil - } - if swag.ContainsStringsCI(allowed, "*/*") { - return nil - } - parts := strings.Split(actual, "/") - if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") { - return nil - } - return errors.InvalidContentType(actual, allowed) -} - -func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { - debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) - validate := &validation{ - context: ctx, - request: request, - route: route, - bound: make(map[string]interface{}), - } - - validate.contentType() - if len(validate.result) == 0 { - validate.responseFormat() - } - if len(validate.result) == 0 { - validate.parameters() - } - - return validate -} - -func (v *validation) parameters() { - debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) - if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { - if result.Error() == "validation failure list" { - for _, e := range result.(*errors.Validation).Value.([]interface{}) { - v.result = append(v.result, e.(error)) - } - return - } - v.result = append(v.result, result) - } -} - -func (v *validation) contentType() { - if len(v.result) == 0 && runtime.HasBody(v.request) { - debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) - ct, _, req, err := v.context.ContentType(v.request) - if err != nil { - v.result = append(v.result, err) - } else { - v.request = req - } - - if len(v.result) == 0 { - if err := validateContentType(v.route.Consumes, ct); err != nil { - v.result = append(v.result, err) - } - } - if ct != "" && v.route.Consumer == nil { - cons, ok := v.route.Consumers[ct] - if !ok { - v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct)) - } else { - v.route.Consumer = cons - } - } - } -} - -func (v *validation) responseFormat() { - // if the route provides values for Produces and no format could be identify then return an error. - // if the route does not specify values for Produces then treat request as valid since the API designer - // choose not to specify the format for responses. - if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 { - v.request = rCtx - v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) - } -} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go deleted file mode 100644 index 078fda173..000000000 --- a/vendor/github.com/go-openapi/runtime/request.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bufio" - "io" - "net/http" - "strings" - - "github.com/go-openapi/swag" -) - -// CanHaveBody returns true if this method can have a body -func CanHaveBody(method string) bool { - mn := strings.ToUpper(method) - return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" -} - -// IsSafe returns true if this is a request with a safe method -func IsSafe(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn == "GET" || mn == "HEAD" -} - -// AllowsBody returns true if the request allows for a body -func AllowsBody(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn != "HEAD" -} - -// HasBody returns true if this method needs a content-type -func HasBody(r *http.Request) bool { - // happy case: we have a content length set - if r.ContentLength > 0 { - return true - } - - if r.Header.Get("content-length") != "" { - // in this case, no Transfer-Encoding should be present - // we have a header set but it was explicitly set to 0, so we assume no body - return false - } - - rdr := newPeekingReader(r.Body) - r.Body = rdr - return rdr.HasContent() -} - -func newPeekingReader(r io.ReadCloser) *peekingReader { - if r == nil { - return nil - } - return &peekingReader{ - underlying: bufio.NewReader(r), - orig: r, - } -} - -type peekingReader struct { - underlying interface { - Buffered() int - Peek(int) ([]byte, error) - Read([]byte) (int, error) - } - orig io.ReadCloser -} - -func (p *peekingReader) HasContent() bool { - if p == nil { - return false - } - if p.underlying.Buffered() > 0 { - return true - } - b, err := p.underlying.Peek(1) - if err != nil { - return false - } - return len(b) > 0 -} - -func (p *peekingReader) Read(d []byte) (int, error) { - if p == nil { - return 0, io.EOF - } - return p.underlying.Read(d) -} - -func (p *peekingReader) Close() error { - p.underlying = nil - if p.orig != nil { - return p.orig.Close() - } - return nil -} - -// JSONRequest creates a new http request with json headers set -func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - req.Header.Add(HeaderContentType, JSONMime) - req.Header.Add(HeaderAccept, JSONMime) - return req, nil -} - -// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) -type Gettable interface { - GetOK(string) ([]string, bool, bool) -} - -// ReadSingleValue reads a single value from the source -func ReadSingleValue(values Gettable, name string) string { - vv, _, hv := values.GetOK(name) - if hv { - return vv[len(vv)-1] - } - return "" -} - -// ReadCollectionValue reads a collection value from a string data source -func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { - v := ReadSingleValue(values, name) - return swag.SplitByFormat(v, collectionFormat) -} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go deleted file mode 100644 index 476d26c3e..000000000 --- a/vendor/github.com/go-openapi/runtime/security/authenticator.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package security - -import ( - "context" - "net/http" - "strings" - - "github.com/go-openapi/errors" - - "github.com/go-openapi/runtime" -) - -const ( - query = "query" - header = "header" -) - -// HttpAuthenticator is a function that authenticates a HTTP request -func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { - return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { - if request, ok := params.(*http.Request); ok { - return handler(request) - } - if scoped, ok := params.(*ScopedAuthRequest); ok { - return handler(scoped.Request) - } - return false, nil, nil - }) -} - -// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes -func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator { - return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { - if request, ok := params.(*ScopedAuthRequest); ok { - return handler(request) - } - return false, nil, nil - }) -} - -// UserPassAuthentication authentication function -type UserPassAuthentication func(string, string) (interface{}, error) - -// UserPassAuthenticationCtx authentication function with context.Context -type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error) - -// TokenAuthentication authentication function -type TokenAuthentication func(string) (interface{}, error) - -// TokenAuthenticationCtx authentication function with context.Context -type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error) - -// ScopedTokenAuthentication authentication function -type ScopedTokenAuthentication func(string, []string) (interface{}, error) - -// ScopedTokenAuthenticationCtx authentication function with context.Context -type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error) - -var DefaultRealmName = "API" - -type secCtxKey uint8 - -const ( - failedBasicAuth secCtxKey = iota - oauth2SchemeName -) - -func FailedBasicAuth(r *http.Request) string { - return FailedBasicAuthCtx(r.Context()) -} - -func FailedBasicAuthCtx(ctx context.Context) string { - v, ok := ctx.Value(failedBasicAuth).(string) - if !ok { - return "" - } - return v -} - -func OAuth2SchemeName(r *http.Request) string { - return OAuth2SchemeNameCtx(r.Context()) -} - -func OAuth2SchemeNameCtx(ctx context.Context) string { - v, ok := ctx.Value(oauth2SchemeName).(string) - if !ok { - return "" - } - return v -} - -// BasicAuth creates a basic auth authenticator with the provided authentication function -func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { - return BasicAuthRealm(DefaultRealmName, authenticate) -} - -// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name -func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { - if realm == "" { - realm = DefaultRealmName - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - if usr, pass, ok := r.BasicAuth(); ok { - p, err := authenticate(usr, pass) - if err != nil { - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - } - return true, p, err - } - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - return false, nil, nil - }) -} - -// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context -func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { - return BasicAuthRealmCtx(DefaultRealmName, authenticate) -} - -// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context -func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { - if realm == "" { - realm = DefaultRealmName - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - if usr, pass, ok := r.BasicAuth(); ok { - ctx, p, err := authenticate(r.Context(), usr, pass) - if err != nil { - ctx = context.WithValue(ctx, failedBasicAuth, realm) - } - *r = *r.WithContext(ctx) - return true, p, err - } - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - return false, nil, nil - }) -} - -// APIKeyAuth creates an authenticator that uses a token for authorization. -// This token can be obtained from either a header or a query string -func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { - inl := strings.ToLower(in) - if inl != query && inl != header { - // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) - } - - var getToken func(*http.Request) string - switch inl { - case header: - getToken = func(r *http.Request) string { return r.Header.Get(name) } - case query: - getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - token := getToken(r) - if token == "" { - return false, nil, nil - } - - p, err := authenticate(token) - return true, p, err - }) -} - -// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. -// This token can be obtained from either a header or a query string -func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { - inl := strings.ToLower(in) - if inl != query && inl != header { - // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) - } - - var getToken func(*http.Request) string - switch inl { - case header: - getToken = func(r *http.Request) string { return r.Header.Get(name) } - case query: - getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - token := getToken(r) - if token == "" { - return false, nil, nil - } - - ctx, p, err := authenticate(r.Context(), token) - *r = *r.WithContext(ctx) - return true, p, err - }) -} - -// ScopedAuthRequest contains both a http request and the required scopes for a particular operation -type ScopedAuthRequest struct { - Request *http.Request - RequiredScopes []string -} - -// BearerAuth for use with oauth2 flows -func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { - const prefix = "Bearer " - return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { - var token string - hdr := r.Request.Header.Get("Authorization") - if strings.HasPrefix(hdr, prefix) { - token = strings.TrimPrefix(hdr, prefix) - } - if token == "" { - qs := r.Request.URL.Query() - token = qs.Get("access_token") - } - //#nosec - ct, _, _ := runtime.ContentType(r.Request.Header) - if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") - } - - if token == "" { - return false, nil, nil - } - - rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) - *r.Request = *r.Request.WithContext(rctx) - p, err := authenticate(token, r.RequiredScopes) - return true, p, err - }) -} - -// BearerAuthCtx for use with oauth2 flows with support for context.Context. -func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { - const prefix = "Bearer " - return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { - var token string - hdr := r.Request.Header.Get("Authorization") - if strings.HasPrefix(hdr, prefix) { - token = strings.TrimPrefix(hdr, prefix) - } - if token == "" { - qs := r.Request.URL.Query() - token = qs.Get("access_token") - } - //#nosec - ct, _, _ := runtime.ContentType(r.Request.Header) - if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") - } - - if token == "" { - return false, nil, nil - } - - rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) - ctx, p, err := authenticate(rctx, token, r.RequiredScopes) - *r.Request = *r.Request.WithContext(ctx) - return true, p, err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go deleted file mode 100644 index 00c1a4d6a..000000000 --- a/vendor/github.com/go-openapi/runtime/security/authorizer.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package security - -import ( - "net/http" - - "github.com/go-openapi/runtime" -) - -// Authorized provides a default implementation of the Authorizer interface where all -// requests are authorized (successful) -func Authorized() runtime.Authorizer { - return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil }) -} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go deleted file mode 100644 index 3b011a0bf..000000000 --- a/vendor/github.com/go-openapi/runtime/statuses.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -// Statuses lists the most common HTTP status codes to default message -// taken from https://httpstatuses.com/ -var Statuses = map[int]string{ - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 103: "Checkpoint", - 122: "URI too long", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Request Processed", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request-URI Too Long", - 415: "Unsupported Media Type", - 416: "Request Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 444: "No Response", - 449: "Retry With", - 450: "Blocked by Windows Parental Controls", - 451: "Wrong Exchange Server", - 499: "Client Closed Request", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 509: "Bandwidth Limit Exceeded", - 510: "Not Extended", - 511: "Network Authentication Required", - 598: "Network read timeout error", - 599: "Network connect timeout error", -} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go deleted file mode 100644 index f33320b7d..000000000 --- a/vendor/github.com/go-openapi/runtime/text.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -// TextConsumer creates a new text consumer -func TextConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("TextConsumer requires a reader") // early exit - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - // If the buffer is empty, no need to unmarshal it, which causes a panic. - if len(b) == 0 { - return nil - } - - if tu, ok := data.(encoding.TextUnmarshaler); ok { - err := tu.UnmarshalText(b) - if err != nil { - return fmt.Errorf("text consumer: %v", err) - } - - return nil - } - - t := reflect.TypeOf(data) - if data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t.Elem().Kind() == reflect.String { - v.SetString(string(b)) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", - data, data, "can be resolved by supporting TextUnmarshaler interface") - }) -} - -// TextProducer creates a new text producer -func TextProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("TextProducer requires a writer") // early exit - } - - if data == nil { - return errors.New("no data given to produce text from") - } - - if tm, ok := data.(encoding.TextMarshaler); ok { - txt, err := tm.MarshalText() - if err != nil { - return fmt.Errorf("text producer: %v", err) - } - _, err = writer.Write(txt) - return err - } - - if str, ok := data.(error); ok { - _, err := writer.Write([]byte(str.Error())) - return err - } - - if str, ok := data.(fmt.Stringer); ok { - _, err := writer.Write([]byte(str.String())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - if v.Kind() != reflect.String { - return fmt.Errorf("%T is not a supported type by the TextProducer", data) - } - - _, err := writer.Write([]byte(v.String())) - return err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go deleted file mode 100644 index 11f5732af..000000000 --- a/vendor/github.com/go-openapi/runtime/values.go +++ /dev/null @@ -1,19 +0,0 @@ -package runtime - -// Values typically represent parameters on a http request. -type Values map[string][]string - -// GetOK returns the values collection for the given key. -// When the key is present in the map it will return true for hasKey. -// When the value is not empty it will return true for hasValue. -func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { - value, hasKey = v[key] - if !hasKey { - return - } - if len(value) == 0 { - return - } - hasValue = true - return -} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go deleted file mode 100644 index 821c7393d..000000000 --- a/vendor/github.com/go-openapi/runtime/xml.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/xml" - "io" -) - -// XMLConsumer creates a new XML consumer -func XMLConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := xml.NewDecoder(reader) - return dec.Decode(data) - }) -} - -// XMLProducer creates a new XML producer -func XMLProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := xml.NewEncoder(writer) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/spec/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml deleted file mode 100644 index 835d55e74..000000000 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ /dev/null @@ -1,42 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 2 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml deleted file mode 100644 index 2281a07b0..000000000 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.16.x -- 1.x -arch: - - amd64 -jobs: - include: - # only run fast tests on ppc64le - - go: 1.x - arch: ppc64le - script: - - gotestsum -f short-verbose -- ./... - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master - -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md deleted file mode 100644 index 18782c6da..000000000 --- a/vendor/github.com/go-openapi/spec/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# OAI object model - -[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) - -[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) - -The object model for OpenAPI specification documents. - -### FAQ - -* What does this do? - -> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model -> 2. It knows how to resolve $ref and expand them to make a single root document - -* How does it play with the rest of the go-openapi packages ? - -> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) -> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations -> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it -> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents - -* Does this library support OpenAPI 3? - -> No. -> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). -> There is no plan to make it evolve toward supporting OpenAPI 3.x. -> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. -> -> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml deleted file mode 100644 index 090359391..000000000 --- a/vendor/github.com/go-openapi/spec/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\spec -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index afc83850c..000000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-bindata. DO NOT EDIT. -// sources: -// schemas/jsonschema-draft-04.json (4.357kB) -// schemas/v2/schema.json (40.248kB) - -package spec - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo - digest [sha256.Size]byte -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") - -func jsonschemaDraft04JsonBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04Json, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04Json() (*asset, error) { - bytes, err := jsonschemaDraft04JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} - return a, nil -} - -var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") - -func v2SchemaJsonBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJson, - "v2/schema.json", - ) -} - -func v2SchemaJson() (*asset, error) { - bytes, err := v2SchemaJsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetString returns the asset contents as a string (instead of a []byte). -func AssetString(name string) (string, error) { - data, err := Asset(name) - return string(data), err -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// MustAssetString is like AssetString but panics when Asset would return an -// error. It simplifies safe initialization of global variables. -func MustAssetString(name string) string { - return string(MustAsset(name)) -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetDigest returns the digest of the file with the given name. It returns an -// error if the asset could not be found or the digest could not be loaded. -func AssetDigest(name string) ([sha256.Size]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) - } - return a.digest, nil - } - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) -} - -// Digests returns a map of all known files and their checksums. -func Digests() (map[string][sha256.Size]byte, error) { - mp := make(map[string][sha256.Size]byte, len(_bindata)) - for name := range _bindata { - a, err := _bindata[name]() - if err != nil { - return nil, err - } - mp[name] = a.digest - } - return mp, nil -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04Json, - - "v2/schema.json": v2SchemaJson, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"}, -// AssetDir("data/img") would return []string{"a.png", "b.png"}, -// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - canonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(canonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, - "v2": {nil, map[string]*bintree{ - "schema.json": {v2SchemaJson, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory. -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) -} - -// RestoreAssets restores an asset under the given directory recursively. -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - canonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) -} diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go deleted file mode 100644 index 122993b44..000000000 --- a/vendor/github.com/go-openapi/spec/cache.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "sync" -) - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.RWMutex - store map[string]interface{} -} - -func (s *simpleCache) ShallowClone() ResolutionCache { - store := make(map[string]interface{}, len(s.store)) - s.lock.RLock() - for k, v := range s.store { - store[k] = v - } - s.lock.RUnlock() - - return &simpleCache{ - store: store, - } -} - -// Get retrieves a cached URI -func (s *simpleCache) Get(uri string) (interface{}, bool) { - s.lock.RLock() - v, ok := s.store[uri] - - s.lock.RUnlock() - return v, ok -} - -// Set caches a URI -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - -var ( - // resCache is a package level cache for $ref resolution and expansion. - // It is initialized lazily by methods that have the need for it: no - // memory is allocated unless some expander methods are called. - // - // It is initialized with JSON schema and swagger schema, - // which do not mutate during normal operations. - // - // All subsequent utilizations of this cache are produced from a shallow - // clone of this initial version. - resCache *simpleCache - onceCache sync.Once - - _ ResolutionCache = &simpleCache{} -) - -// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. -func initResolutionCache() { - resCache = defaultResolutionCache() -} - -func defaultResolutionCache() *simpleCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -func cacheOrDefault(cache ResolutionCache) ResolutionCache { - onceCache.Do(initResolutionCache) - - if cache != nil { - return cache - } - - // get a shallow clone of the base cache with swagger and json schema - return resCache.ShallowClone() -} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go deleted file mode 100644 index 2f7bb219b..000000000 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - ContactInfoProps - VendorExtensible -} - -// ContactInfoProps hold the properties of a ContactInfo object -type ContactInfoProps struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} - -// UnmarshalJSON hydrates ContactInfo from json -func (c *ContactInfo) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { - return err - } - return json.Unmarshal(data, &c.VendorExtensible) -} - -// MarshalJSON produces ContactInfo as json -func (c ContactInfo) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(c.ContactInfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(c.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go deleted file mode 100644 index fc889f6d0..000000000 --- a/vendor/github.com/go-openapi/spec/debug.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" - "log" - "os" - "path" - "runtime" -) - -// Debug is true when the SWAGGER_DEBUG env var is not empty. -// -// It enables a more verbose logging of this package. -var Debug = os.Getenv("SWAGGER_DEBUG") != "" - -var ( - // specLogger is a debug logger for this package - specLogger *log.Logger -) - -func init() { - debugOptions() -} - -func debugOptions() { - specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) -} - -func debugLog(msg string, args ...interface{}) { - // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() - if Debug { - _, file1, pos1, _ := runtime.Caller(1) - specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go deleted file mode 100644 index 6992c7ba7..000000000 --- a/vendor/github.com/go-openapi/spec/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package spec - -import "errors" - -// Error codes -var ( - // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type - ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") - - // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer - ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") - - // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. - // At the moment, $ref are supported only inside: schemas, parameters, responses, path items - ErrDerefUnsupportedType = errors.New("deref: unsupported type") - - // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type - ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") -) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go deleted file mode 100644 index d4ea889d4..000000000 --- a/vendor/github.com/go-openapi/spec/expander.go +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" -) - -// ExpandOptions provides options for the spec expander. -// -// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. -// -// If left empty, the root document is assumed to be located in the current working directory: -// all relative $ref's will be resolved from there. -// -// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. -// -type ExpandOptions struct { - RelativeBase string // the path to the root document to expand. This is a file, not a directory - SkipSchemas bool // do not expand schemas, just paths, parameters and responses - ContinueOnError bool // continue expanding even after and error is found - PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document - AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs -} - -func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { - if opts != nil { - clone := *opts // shallow clone to avoid internal changes to be propagated to the caller - if clone.RelativeBase != "" { - clone.RelativeBase = normalizeBase(clone.RelativeBase) - } - // if the relative base is empty, let the schema loader choose a pseudo root document - return &clone - } - return &ExpandOptions{} -} - -// ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - options = optionsOrDefault(options) - resolver := defaultSchemaLoader(spec, options, nil, nil) - - specBasePath := options.RelativeBase - - if !options.SkipSchemas { - for key, definition := range spec.Definitions { - parentRefs := make([]string, 0, 10) - parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) - - def, err := expandSchema(definition, parentRefs, resolver, specBasePath) - if resolver.shouldStopOnError(err) { - return err - } - if def != nil { - spec.Definitions[key] = *def - } - } - } - - for key := range spec.Parameters { - parameter := spec.Parameters[key] - if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Parameters[key] = parameter - } - - for key := range spec.Responses { - response := spec.Responses[key] - if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Responses[key] = response - } - - if spec.Paths != nil { - for key := range spec.Paths.Paths { - pth := spec.Paths.Paths[key] - if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Paths.Paths[key] = pth - } - } - - return nil -} - -const rootBase = ".root" - -// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry -// for further $ref resolution -// -// Setting the cache is optional and this parameter may safely be left to nil. -func baseForRoot(root interface{}, cache ResolutionCache) string { - if root == nil { - return "" - } - - // cache the root document to resolve $ref's - normalizedBase := normalizeBase(rootBase) - cache.Set(normalizedBase, root) - - return normalizedBase -} - -// ExpandSchema expands the refs in the schema object with reference to the root object. -// -// go-openapi/validate uses this function. -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandSchemaWithBasePath to resolve external references). -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - if root == nil { - root = schema - } - - opts := &ExpandOptions{ - // when a root is specified, cache the root as an in-memory document for $ref retrieval - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - } - - return ExpandSchemaWithBasePath(schema, cache, opts) -} - -// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { - if schema == nil { - return nil - } - - cache = cacheOrDefault(cache) - - opts = optionsOrDefault(opts) - - resolver := defaultSchemaLoader(nil, opts, cache, nil) - - parentRefs := make([]string, 0, 10) - s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) - if err != nil { - return err - } - if s != nil { - // guard for when continuing on error - *schema = *s - } - - return nil -} - -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Items == nil { - return &target, nil - } - - // array - if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - *target.Items.Schema = *t - } - - // tuple - for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - target.Items.Schemas[i] = *t - } - - return &target, nil -} - -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Ref.String() == "" && target.Ref.IsRoot() { - newRef := normalizeRef(&target.Ref, basePath) - target.Ref = *newRef - return &target, nil - } - - // change the base path of resolution when an ID is encountered - // otherwise the basePath should inherit the parent's - if target.ID != "" { - basePath, _ = resolver.setSchemaID(target, target.ID, basePath) - } - - if target.Ref.String() != "" { - return expandSchemaRef(target, parentRefs, resolver, basePath) - } - - for k := range target.Definitions { - tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if tt != nil { - target.Definitions[k] = *tt - } - } - - t, err := expandItems(target, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target = *t - } - - for i := range target.AllOf { - t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.AllOf[i] = *t - } - } - - for i := range target.AnyOf { - t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.AnyOf[i] = *t - } - } - - for i := range target.OneOf { - t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.OneOf[i] = *t - } - } - - if target.Not != nil { - t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Not = *t - } - } - - for k := range target.Properties { - t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.Properties[k] = *t - } - } - - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalProperties.Schema = *t - } - } - - for k := range target.PatternProperties { - t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.PatternProperties[k] = *t - } - } - - for k := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Dependencies[k].Schema = *t - } - } - } - - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalItems.Schema = *t - } - } - return &target, nil -} - -func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - // if a Ref is found, all sibling fields are skipped - // Ref also changes the resolution scope of children expandSchema - - // here the resolution scope is changed because a $ref was encountered - normalizedRef := normalizeRef(&target.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - // this means there is a cycle in the recursion tree: return the Ref - // - circular refs cannot be expanded. We leave them as ref. - // - denormalization means that a new local file ref is set relative to the original basePath - debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", - basePath, normalizedBasePath, normalizedRef.String()) - if !resolver.options.AbsoluteCircularRef { - target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) - } else { - target.Ref = *normalizedRef - } - return &target, nil - } - - var t *Schema - err := resolver.Resolve(&target.Ref, &t, basePath) - if resolver.shouldStopOnError(err) { - return nil, err - } - - if t == nil { - // guard for when continuing on error - return &target, nil - } - - parentRefs = append(parentRefs, normalizedRef.String()) - transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) - - basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) - - return expandSchema(*t, parentRefs, transitiveResolver, basePath) -} - -func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { - if pathItem == nil { - return nil - } - - parentRefs := make([]string, 0, 10) - if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - - if pathItem.Ref.String() != "" { - transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) - basePath = transitiveResolver.updateBasePath(resolver, basePath) - resolver = transitiveResolver - } - - pathItem.Ref = Ref{} - for i := range pathItem.Parameters { - if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - - ops := []*Operation{ - pathItem.Get, - pathItem.Head, - pathItem.Options, - pathItem.Put, - pathItem.Post, - pathItem.Patch, - pathItem.Delete, - } - for _, op := range ops { - if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - - return nil -} - -func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { - if op == nil { - return nil - } - - for i := range op.Parameters { - param := op.Parameters[i] - if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - op.Parameters[i] = param - } - - if op.Responses == nil { - return nil - } - - responses := op.Responses - if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - - for code := range responses.StatusCodeResponses { - response := responses.StatusCodeResponses[code] - if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - responses.StatusCodeResponses[code] = response - } - - return nil -} - -// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandResponse to resolve external references). -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - } - resolver := defaultSchemaLoader(root, opts, cache, nil) - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandResponse expands a response based on a basepath -// -// All refs inside response will be resolved relative to basePath -func ExpandResponse(response *Response, basePath string) error { - opts := optionsOrDefault(&ExpandOptions{ - RelativeBase: basePath, - }) - resolver := defaultSchemaLoader(nil, opts, nil, nil) - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandParameter to resolve external references). -func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - } - resolver := defaultSchemaLoader(root, opts, cache, nil) - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -// ExpandParameter expands a parameter based on a basepath. -// This is the exported version of expandParameter -// all refs inside parameter will be resolved relative to basePath -func ExpandParameter(parameter *Parameter, basePath string) error { - opts := optionsOrDefault(&ExpandOptions{ - RelativeBase: basePath, - }) - resolver := defaultSchemaLoader(nil, opts, nil, nil) - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { - var ( - ref *Ref - sch *Schema - ) - - switch refable := input.(type) { - case *Parameter: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - case *Response: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - default: - return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) - } - - return ref, sch, nil -} - -func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, _, err := getRefAndSchema(input) - if err != nil { - return err - } - - if ref == nil { - return nil - } - - parentRefs := make([]string, 0, 10) - if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - - ref, sch, _ := getRefAndSchema(input) - if ref.String() != "" { - transitiveResolver := resolver.transitiveResolver(basePath, *ref) - basePath = resolver.updateBasePath(transitiveResolver, basePath) - resolver = transitiveResolver - } - - if sch == nil { - // nothing to be expanded - if ref != nil { - *ref = Ref{} - } - return nil - } - - if sch.Ref.String() != "" { - rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) - if ern != nil { - return ern - } - - switch { - case resolver.isCircular(&rebasedRef, basePath, parentRefs...): - // this is a circular $ref: stop expansion - if !resolver.options.AbsoluteCircularRef { - sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) - } else { - sch.Ref = rebasedRef - } - case !resolver.options.SkipSchemas: - // schema expanded to a $ref in another root - sch.Ref = rebasedRef - debugLog("rebased to: %s", sch.Ref.String()) - default: - // skip schema expansion but rebase $ref to schema - sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) - } - } - - if ref != nil { - *ref = Ref{} - } - - // expand schema - if !resolver.options.SkipSchemas { - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - if s == nil { - // guard for when continuing on error - return nil - } - *sch = *s - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go deleted file mode 100644 index 88add91b2..000000000 --- a/vendor/github.com/go-openapi/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go deleted file mode 100644 index 9dfd17b18..000000000 --- a/vendor/github.com/go-openapi/spec/header.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonArray = "array" -) - -// HeaderProps describes a response header -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - VendorExtensible - HeaderProps -} - -// ResponseHeader creates a new header instance for use in a response -func ResponseHeader() *Header { - return new(Header) -} - -// WithDescription sets the description on this response, allows for chaining -func (h *Header) WithDescription(description string) *Header { - h.Description = description - return h -} - -// Typed a fluent builder method for the type of parameter -func (h *Header) Typed(tpe, format string) *Header { - h.Type = tpe - h.Format = format - return h -} - -// CollectionOf a fluent builder method for an array item -func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = jsonArray - h.Items = items - h.CollectionFormat = format - return h -} - -// WithDefault sets the default value on this item -func (h *Header) WithDefault(defaultValue interface{}) *Header { - h.Default = defaultValue - return h -} - -// WithMaxLength sets a max length value -func (h *Header) WithMaxLength(max int64) *Header { - h.MaxLength = &max - return h -} - -// WithMinLength sets a min length value -func (h *Header) WithMinLength(min int64) *Header { - h.MinLength = &min - return h -} - -// WithPattern sets a pattern value -func (h *Header) WithPattern(pattern string) *Header { - h.Pattern = pattern - return h -} - -// WithMultipleOf sets a multiple of value -func (h *Header) WithMultipleOf(number float64) *Header { - h.MultipleOf = &number - return h -} - -// WithMaximum sets a maximum number value -func (h *Header) WithMaximum(max float64, exclusive bool) *Header { - h.Maximum = &max - h.ExclusiveMaximum = exclusive - return h -} - -// WithMinimum sets a minimum number value -func (h *Header) WithMinimum(min float64, exclusive bool) *Header { - h.Minimum = &min - h.ExclusiveMinimum = exclusive - return h -} - -// WithEnum sets a the enum values (replace) -func (h *Header) WithEnum(values ...interface{}) *Header { - h.Enum = append([]interface{}{}, values...) - return h -} - -// WithMaxItems sets the max items -func (h *Header) WithMaxItems(size int64) *Header { - h.MaxItems = &size - return h -} - -// WithMinItems sets the min items -func (h *Header) WithMinItems(size int64) *Header { - h.MinItems = &size - return h -} - -// UniqueValues dictates that this array can only have unique items -func (h *Header) UniqueValues() *Header { - h.UniqueItems = true - return h -} - -// AllowDuplicates this array can have duplicates -func (h *Header) AllowDuplicates() *Header { - h.UniqueItems = false - return h -} - -// WithValidations is a fluent method to set header validations -func (h *Header) WithValidations(val CommonValidations) *Header { - h.SetValidations(SchemaValidations{CommonValidations: val}) - return h -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON unmarshals this header from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &h.HeaderProps) -} - -// JSONLookup look up a value by the json property name -func (h Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := h.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go deleted file mode 100644 index c458b49b2..000000000 --- a/vendor/github.com/go-openapi/spec/info.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, isSlice := v.([]interface{}) - if !isSlice { - return nil, false - } - var strs []string - for _, iface := range arr { - str, isString := iface.(string) - if !isString { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// JSONLookup look up a value by the json property name -func (i Info) JSONLookup(token string) (interface{}, error) { - if ex, ok := i.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(i.InfoProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - return json.Unmarshal(data, &i.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go deleted file mode 100644 index e2afb2133..000000000 --- a/vendor/github.com/go-openapi/spec/items.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonRef = "$ref" -) - -// SimpleSchema describe swagger simple schemas for parameters and headers -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// TypeName return the type (or format) of a simple schema -func (s *SimpleSchema) TypeName() string { - if s.Format != "" { - return s.Format - } - return s.Type -} - -// ItemsTypeName yields the type of items in a simple schema array -func (s *SimpleSchema) ItemsTypeName() string { - if s.Items == nil { - return "" - } - return s.Items.TypeName() -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object -type Items struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible -} - -// NewItems creates a new instance of items -func NewItems() *Items { - return &Items{} -} - -// Typed a fluent builder method for the type of item -func (i *Items) Typed(tpe, format string) *Items { - i.Type = tpe - i.Format = format - return i -} - -// AsNullable flags this schema as nullable. -func (i *Items) AsNullable() *Items { - i.Nullable = true - return i -} - -// CollectionOf a fluent builder method for an array item -func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = jsonArray - i.Items = items - i.CollectionFormat = format - return i -} - -// WithDefault sets the default value on this item -func (i *Items) WithDefault(defaultValue interface{}) *Items { - i.Default = defaultValue - return i -} - -// WithMaxLength sets a max length value -func (i *Items) WithMaxLength(max int64) *Items { - i.MaxLength = &max - return i -} - -// WithMinLength sets a min length value -func (i *Items) WithMinLength(min int64) *Items { - i.MinLength = &min - return i -} - -// WithPattern sets a pattern value -func (i *Items) WithPattern(pattern string) *Items { - i.Pattern = pattern - return i -} - -// WithMultipleOf sets a multiple of value -func (i *Items) WithMultipleOf(number float64) *Items { - i.MultipleOf = &number - return i -} - -// WithMaximum sets a maximum number value -func (i *Items) WithMaximum(max float64, exclusive bool) *Items { - i.Maximum = &max - i.ExclusiveMaximum = exclusive - return i -} - -// WithMinimum sets a minimum number value -func (i *Items) WithMinimum(min float64, exclusive bool) *Items { - i.Minimum = &min - i.ExclusiveMinimum = exclusive - return i -} - -// WithEnum sets a the enum values (replace) -func (i *Items) WithEnum(values ...interface{}) *Items { - i.Enum = append([]interface{}{}, values...) - return i -} - -// WithMaxItems sets the max items -func (i *Items) WithMaxItems(size int64) *Items { - i.MaxItems = &size - return i -} - -// WithMinItems sets the min items -func (i *Items) WithMinItems(size int64) *Items { - i.MinItems = &size - return i -} - -// UniqueValues dictates that this array can only have unique items -func (i *Items) UniqueValues() *Items { - i.UniqueItems = true - return i -} - -// AllowDuplicates this array can have duplicates -func (i *Items) AllowDuplicates() *Items { - i.UniqueItems = false - return i -} - -// WithValidations is a fluent method to set Items validations -func (i *Items) WithValidations(val CommonValidations) *Items { - i.SetValidations(SchemaValidations{CommonValidations: val}) - return i -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - var vendorExtensible VendorExtensible - if err := json.Unmarshal(data, &vendorExtensible); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - i.VendorExtensible = vendorExtensible - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b4, b3, b1, b2), nil -} - -// JSONLookup look up a value by the json property name -func (i Items) JSONLookup(token string) (interface{}, error) { - if token == jsonRef { - return &i.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go deleted file mode 100644 index b42f80368..000000000 --- a/vendor/github.com/go-openapi/spec/license.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - LicenseProps - VendorExtensible -} - -// LicenseProps holds the properties of a License object -type LicenseProps struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} - -// UnmarshalJSON hydrates License from json -func (l *License) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &l.LicenseProps); err != nil { - return err - } - return json.Unmarshal(data, &l.VendorExtensible) -} - -// MarshalJSON produces License as json -func (l License) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(l.LicenseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(l.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go deleted file mode 100644 index d6c483971..000000000 --- a/vendor/github.com/go-openapi/spec/normalizer.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "path" - "strings" -) - -const fileScheme = "file" - -// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. -// -// NOTE(windows): there is a tolerance over the strict URI format on windows. -// -// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like -// 'C:\Path\file.Yaml'. -// -// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: -// 'file:///c:/path/file.yaml' -// -// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or -// 'file:///c:\folder\File.json'. -// -// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" -// is attempted. -// -// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). -func normalizeURI(refPath, base string) string { - refURL, err := url.Parse(refPath) - if err != nil { - specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) - refURL, refPath = repairURI(refPath) - } - - fixWindowsURI(refURL, refPath) // noop on non-windows OS - - refURL.Path = path.Clean(refURL.Path) - if refURL.Path == "." { - refURL.Path = "" - } - - r := MustCreateRef(refURL.String()) - if r.IsCanonical() { - return refURL.String() - } - - baseURL, _ := url.Parse(base) - if path.IsAbs(refURL.Path) { - baseURL.Path = refURL.Path - } else if refURL.Path != "" { - baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) - } - // copying fragment from ref to base - baseURL.Fragment = refURL.Fragment - - return baseURL.String() -} - -// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. -// -// When calling this, we assume that: -// * $ref is a canonical URI -// * originalRelativeBase is a canonical URI -// -// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. -// In this case, expansion stops and normally renders the internal canonical $ref. -// -// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. -// -// There is a special case for schemas that are anchored with an "id": -// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. -// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. -// -func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { - debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) - - if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { - // short circuit: $ref to current doc - return *ref - } - - if id != "" { - idBaseURL, err := url.Parse(id) - if err == nil { // if the schema id is not usable as a URI, ignore it - if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") - // $ref relative to the ID of the schema in the root document - return ref - } - } - } - - originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) - - r, _ := rebase(ref, originalRelativeBaseURL, false) - - return r -} - -func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { - var newBase url.URL - - u := ref.GetURL() - - if u.Scheme != v.Scheme || u.Host != v.Host { - return *ref, false - } - - docPath := v.Path - v.Path = path.Dir(v.Path) - - if v.Path == "." { - v.Path = "" - } else if !strings.HasSuffix(v.Path, "/") { - v.Path += "/" - } - - newBase.Fragment = u.Fragment - - if strings.HasPrefix(u.Path, docPath) { - newBase.Path = strings.TrimPrefix(u.Path, docPath) - } else { - newBase.Path = strings.TrimPrefix(u.Path, v.Path) - } - - if notEqual && newBase.Path == "" && newBase.Fragment == "" { - // do not want rebasing to end up in an empty $ref - return *ref, false - } - - if path.IsAbs(newBase.Path) { - // whenever we end up with an absolute path, specify the scheme and host - newBase.Scheme = v.Scheme - newBase.Host = v.Host - } - - return MustCreateRef(newBase.String()), true -} - -// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor -func normalizeRef(ref *Ref, relativeBase string) *Ref { - r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) - return &r -} - -// normalizeBase performs a normalization of the input base path. -// -// This always yields a canonical URI (absolute), usable for the document cache. -// -// It ensures that all further internal work on basePath may safely assume -// a non-empty, cross-platform, canonical URI (i.e. absolute). -// -// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this -// in a file:// URL with lower cased drive letter and path. -// -// See also: https://en.wikipedia.org/wiki/File_URI_scheme -func normalizeBase(in string) string { - u, err := url.Parse(in) - if err != nil { - specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) - u, in = repairURI(in) - } - - u.Fragment = "" // any fragment in the base is irrelevant - - fixWindowsURI(u, in) // noop on non-windows OS - - u.Path = path.Clean(u.Path) - if u.Path == "." { // empty after Clean() - u.Path = "" - } - - if u.Scheme != "" { - if path.IsAbs(u.Path) || u.Scheme != fileScheme { - // this is absolute or explicitly not a local file: we're good - return u.String() - } - } - - // no scheme or file scheme with relative path: assume file and make it absolute - // enforce scheme file://... with absolute path. - // - // If the input path is relative, we anchor the path to the current working directory. - // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json - - u.Scheme = fileScheme - u.Path = absPath(u.Path) // platform-dependent - u.RawQuery = "" // any query component is irrelevant for a base - return u.String() -} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go deleted file mode 100644 index c8a064534..000000000 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build !windows - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "path/filepath" -) - -// absPath makes a file path absolute and compatible with a URI path component. -// -// The parameter must be a path, not an URI. -func absPath(in string) string { - anchored, err := filepath.Abs(in) - if err != nil { - specLogger.Printf("warning: could not resolve current working directory: %v", err) - return in - } - return anchored -} - -func repairURI(in string) (*url.URL, string) { - u, _ := url.Parse("") - debugLog("repaired URI: original: %q, repaired: %q", in, "") - return u, "" -} - -func fixWindowsURI(u *url.URL, in string) { -} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go deleted file mode 100644 index fe2d1ecd4..000000000 --- a/vendor/github.com/go-openapi/spec/normalizer_windows.go +++ /dev/null @@ -1,154 +0,0 @@ -// -build windows - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "os" - "path" - "path/filepath" - "strings" -) - -// absPath makes a file path absolute and compatible with a URI path component -// -// The parameter must be a path, not an URI. -func absPath(in string) string { - // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. - // See https://github.com/golang/go/issues/24441 - if in == "" { - in = "." - } - - anchored, err := filepath.Abs(in) - if err != nil { - specLogger.Printf("warning: could not resolve current working directory: %v", err) - return in - } - - pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) - if !strings.HasPrefix(pth, "/") { - pth = "/" + pth - } - - return path.Clean(pth) -} - -// repairURI tolerates invalid file URIs with common typos -// such as 'file://E:\folder\file', that break the regular URL parser. -// -// Adopting the same defaults as for unixes (e.g. return an empty path) would -// result into a counter-intuitive result for that case (e.g. E:\folder\file is -// eventually resolved as the current directory). The repair will detect the missing "/". -// -// Note that this only works for the file scheme. -func repairURI(in string) (*url.URL, string) { - const prefix = fileScheme + "://" - if !strings.HasPrefix(in, prefix) { - // giving up: resolve to empty path - u, _ := url.Parse("") - - return u, "" - } - - // attempt the repair, stripping the scheme should be sufficient - u, _ := url.Parse(strings.TrimPrefix(in, prefix)) - debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) - - return u, u.String() -} - -// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml -// and makes it a canonical URI: file:///c:/base/file.yaml -// -// Catch 22 notes for Windows: -// -// * There may be a drive letter on windows (it is lower-cased) -// * There may be a share UNC, e.g. \\server\folder\data.xml -// * Paths are case insensitive -// * Paths may already contain slashes -// * Paths must be slashed -// -// NOTE: there is no escaping. "/" may be valid separators just like "\". -// We don't use ToSlash() (which escapes everything) because windows now also -// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. -func fixWindowsURI(u *url.URL, in string) { - drive := filepath.VolumeName(in) - - if len(drive) > 0 { - if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter - u.Scheme = fileScheme - u.Host = "" - u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) - } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume - // NOTE: the special host@port syntax for UNC is not supported (yet) - u.Scheme = fileScheme - - // this is a modified version of filepath.Dir() to apply on the VolumeName itself - i := len(drive) - 1 - for i >= 0 && !os.IsPathSeparator(drive[i]) { - i-- - } - host := drive[:i] // \\host\share => host - - u.Path = strings.TrimPrefix(u.Path, host) - u.Host = strings.TrimPrefix(host, `\\`) - } - - u.Opaque = "" - u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) - - // ensure we form an absolute path - if !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - - u.Path = path.Clean(u.Path) - - return - } - - if u.Scheme == fileScheme { - // Handle dodgy cases for file://{...} URIs on windows. - // A canonical URI should always be followed by an absolute path. - // - // Examples: - // * file:///folder/file => valid, unchanged - // * file:///c:\folder\file => slashed - // * file:///./folder/file => valid, cleaned to remove the dot - // * file:///.\folder\file => remapped to cwd - // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) - // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) - if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { - // ensure we form an absolute path - u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) - if !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - } - u.Path = strings.ToLower(u.Path) - } - - // NOTE: lower case normalization does not propagate to inner resources, - // generated when rebasing: when joining a relative URI with a file to an absolute base, - // only the base is currently lower-cased. - // - // For now, we assume this is good enough for most use cases - // and try not to generate too many differences - // between the output produced on different platforms. - u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) -} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go deleted file mode 100644 index 995ce6acb..000000000 --- a/vendor/github.com/go-openapi/spec/operation.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "sort" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -func init() { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// OperationProps describes an operation -// -// NOTES: -// - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// MarshalJSON takes care of serializing operation properties to JSON -// -// We use a custom marhaller here to handle a special cases related to -// the Security field. We need to preserve zero length slice -// while omitting the field when the value is nil/unset. -func (op OperationProps) MarshalJSON() ([]byte, error) { - type Alias OperationProps - if op.Security == nil { - return json.Marshal(&struct { - Security []map[string][]string `json:"security,omitempty"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) - } - return json.Marshal(&struct { - Security []map[string][]string `json:"security"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// SuccessResponse gets a success response model -func (o *Operation) SuccessResponse() (*Response, int, bool) { - if o.Responses == nil { - return nil, 0, false - } - - responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) - for k := range o.Responses.StatusCodeResponses { - if k >= 200 && k < 300 { - responseCodes = append(responseCodes, k) - } - } - if len(responseCodes) > 0 { - sort.Ints(responseCodes) - v := o.Responses.StatusCodeResponses[responseCodes[0]] - return &v, responseCodes[0], true - } - - return o.Responses.Default, 0, false -} - -// JSONLookup look up a value by the json property name -func (o Operation) JSONLookup(token string) (interface{}, error) { - if ex, ok := o.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(o.OperationProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// NewOperation creates a new operation instance. -// It expects an ID as parameter but not passing an ID is also valid. -func NewOperation(id string) *Operation { - op := new(Operation) - op.ID = id - return op -} - -// WithID sets the ID property on this operation, allows for chaining. -func (o *Operation) WithID(id string) *Operation { - o.ID = id - return o -} - -// WithDescription sets the description on this operation, allows for chaining -func (o *Operation) WithDescription(description string) *Operation { - o.Description = description - return o -} - -// WithSummary sets the summary on this operation, allows for chaining -func (o *Operation) WithSummary(summary string) *Operation { - o.Summary = summary - return o -} - -// WithExternalDocs sets/removes the external docs for/from this operation. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (o *Operation) WithExternalDocs(description, url string) *Operation { - if description == "" && url == "" { - o.ExternalDocs = nil - return o - } - - if o.ExternalDocs == nil { - o.ExternalDocs = &ExternalDocumentation{} - } - o.ExternalDocs.Description = description - o.ExternalDocs.URL = url - return o -} - -// Deprecate marks the operation as deprecated -func (o *Operation) Deprecate() *Operation { - o.Deprecated = true - return o -} - -// Undeprecate marks the operation as not deprected -func (o *Operation) Undeprecate() *Operation { - o.Deprecated = false - return o -} - -// WithConsumes adds media types for incoming body values -func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { - o.Consumes = append(o.Consumes, mediaTypes...) - return o -} - -// WithProduces adds media types for outgoing body values -func (o *Operation) WithProduces(mediaTypes ...string) *Operation { - o.Produces = append(o.Produces, mediaTypes...) - return o -} - -// WithTags adds tags for this operation -func (o *Operation) WithTags(tags ...string) *Operation { - o.Tags = append(o.Tags, tags...) - return o -} - -// AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced -func (o *Operation) AddParam(param *Parameter) *Operation { - if param == nil { - return o - } - - for i, p := range o.Parameters { - if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) - params = append(params, o.Parameters[i+1:]...) - o.Parameters = params - return o - } - } - - o.Parameters = append(o.Parameters, *param) - return o -} - -// RemoveParam removes a parameter from the operation -func (o *Operation) RemoveParam(name, in string) *Operation { - for i, p := range o.Parameters { - if p.Name == name && p.In == in { - o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) - return o - } - } - return o -} - -// SecuredWith adds a security scope to this operation. -func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { - o.Security = append(o.Security, map[string][]string{name: scopes}) - return o -} - -// WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response -func (o *Operation) WithDefaultResponse(response *Response) *Operation { - return o.RespondsWith(0, response) -} - -// RespondsWith adds a status code response to the operation. -// When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation -func (o *Operation) RespondsWith(code int, response *Response) *Operation { - if o.Responses == nil { - o.Responses = new(Responses) - } - if code == 0 { - o.Responses.Default = response - return o - } - if response == nil { - delete(o.Responses.StatusCodeResponses, code) - return o - } - if o.Responses.StatusCodeResponses == nil { - o.Responses.StatusCodeResponses = make(map[int]Response) - } - o.Responses.StatusCodeResponses[code] = *response - return o -} - -type opsAlias OperationProps - -type gobAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *opsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (o Operation) GobEncode() ([]byte, error) { - raw := struct { - Ext VendorExtensible - Props OperationProps - }{ - Ext: o.VendorExtensible, - Props: o.OperationProps, - } - var b bytes.Buffer - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (o *Operation) GobDecode(b []byte) error { - var raw struct { - Ext VendorExtensible - Props OperationProps - } - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - o.VendorExtensible = raw.Ext - o.OperationProps = raw.Props - return nil -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (op OperationProps) GobEncode() ([]byte, error) { - raw := gobAlias{ - Alias: (*opsAlias)(&op), - } - - var b bytes.Buffer - if op.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(op.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(op.Security)) - for _, req := range op.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (op *OperationProps) GobDecode(b []byte) error { - var raw gobAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *op = *(*OperationProps)(raw.Alias) - return nil -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go deleted file mode 100644 index 2b2b89b67..000000000 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// QueryParam creates a query parameter -func QueryParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} -} - -// HeaderParam creates a header parameter, this is always required by default -func HeaderParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} -} - -// PathParam creates a path parameter, this is always required -func PathParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} -} - -// BodyParam creates a body parameter -func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} -} - -// FormDataParam creates a body parameter -func FormDataParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} -} - -// FileParam creates a body parameter -func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, - SimpleSchema: SimpleSchema{Type: "file"}} -} - -// SimpleArrayParam creates a param for a simple array (string, int, date etc) -func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, - SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", - Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} -} - -// ParamRef creates a parameter that's a json reference -func ParamRef(uri string) *Parameter { - p := new(Parameter) - p.Ref = MustCreateRef(uri) - return p -} - -// ParamProps describes the specific attributes of an operation parameter -// -// NOTE: -// - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// JSONLookup look up a value by the json property name -func (p Parameter) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.ParamProps, token) - return r, err -} - -// WithDescription a fluent builder method for the description of the parameter -func (p *Parameter) WithDescription(description string) *Parameter { - p.Description = description - return p -} - -// Named a fluent builder method to override the name of the parameter -func (p *Parameter) Named(name string) *Parameter { - p.Name = name - return p -} - -// WithLocation a fluent builder method to override the location of the parameter -func (p *Parameter) WithLocation(in string) *Parameter { - p.In = in - return p -} - -// Typed a fluent builder method for the type of the parameter value -func (p *Parameter) Typed(tpe, format string) *Parameter { - p.Type = tpe - p.Format = format - return p -} - -// CollectionOf a fluent builder method for an array parameter -func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = jsonArray - p.Items = items - p.CollectionFormat = format - return p -} - -// WithDefault sets the default value on this parameter -func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { - p.AsOptional() // with default implies optional - p.Default = defaultValue - return p -} - -// AllowsEmptyValues flags this parameter as being ok with empty values -func (p *Parameter) AllowsEmptyValues() *Parameter { - p.AllowEmptyValue = true - return p -} - -// NoEmptyValues flags this parameter as not liking empty values -func (p *Parameter) NoEmptyValues() *Parameter { - p.AllowEmptyValue = false - return p -} - -// AsOptional flags this parameter as optional -func (p *Parameter) AsOptional() *Parameter { - p.Required = false - return p -} - -// AsRequired flags this parameter as required -func (p *Parameter) AsRequired() *Parameter { - if p.Default != nil { // with a default required makes no sense - return p - } - p.Required = true - return p -} - -// WithMaxLength sets a max length value -func (p *Parameter) WithMaxLength(max int64) *Parameter { - p.MaxLength = &max - return p -} - -// WithMinLength sets a min length value -func (p *Parameter) WithMinLength(min int64) *Parameter { - p.MinLength = &min - return p -} - -// WithPattern sets a pattern value -func (p *Parameter) WithPattern(pattern string) *Parameter { - p.Pattern = pattern - return p -} - -// WithMultipleOf sets a multiple of value -func (p *Parameter) WithMultipleOf(number float64) *Parameter { - p.MultipleOf = &number - return p -} - -// WithMaximum sets a maximum number value -func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { - p.Maximum = &max - p.ExclusiveMaximum = exclusive - return p -} - -// WithMinimum sets a minimum number value -func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { - p.Minimum = &min - p.ExclusiveMinimum = exclusive - return p -} - -// WithEnum sets a the enum values (replace) -func (p *Parameter) WithEnum(values ...interface{}) *Parameter { - p.Enum = append([]interface{}{}, values...) - return p -} - -// WithMaxItems sets the max items -func (p *Parameter) WithMaxItems(size int64) *Parameter { - p.MaxItems = &size - return p -} - -// WithMinItems sets the min items -func (p *Parameter) WithMinItems(size int64) *Parameter { - p.MinItems = &size - return p -} - -// UniqueValues dictates that this array can only have unique items -func (p *Parameter) UniqueValues() *Parameter { - p.UniqueItems = true - return p -} - -// AllowDuplicates this array can have duplicates -func (p *Parameter) AllowDuplicates() *Parameter { - p.UniqueItems = false - return p -} - -// WithValidations is a fluent method to set parameter validations -func (p *Parameter) WithValidations(val CommonValidations) *Parameter { - p.SetValidations(SchemaValidations{CommonValidations: val}) - return p -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.ParamProps) -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go deleted file mode 100644 index 68fc8e901..000000000 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// PathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// JSONLookup look up a value by the json property name -func (p PathItem) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.PathItemProps) -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go deleted file mode 100644 index 9dc82a290..000000000 --- a/vendor/github.com/go-openapi/spec/paths.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// JSONLookup look up a value by the json property name -func (p Paths) JSONLookup(token string) (interface{}, error) { - if pi, ok := p.Paths[token]; ok { - return &pi, nil - } - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go deleted file mode 100644 index 2af13787a..000000000 --- a/vendor/github.com/go-openapi/spec/properties.go +++ /dev/null @@ -1,91 +0,0 @@ -package spec - -import ( - "bytes" - "encoding/json" - "reflect" - "sort" -) - -// OrderSchemaItem holds a named schema (e.g. from a property of an object) -type OrderSchemaItem struct { - Name string - Schema -} - -// OrderSchemaItems is a sortable slice of named schemas. -// The ordering is defined by the x-order schema extension. -type OrderSchemaItems []OrderSchemaItem - -// MarshalJSON produces a json object with keys defined by the name schemas -// of the OrderSchemaItems slice, keeping the original order of the slice. -func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { - buf := bytes.NewBuffer(nil) - buf.WriteString("{") - for i := range items { - if i > 0 { - buf.WriteString(",") - } - buf.WriteString("\"") - buf.WriteString(items[i].Name) - buf.WriteString("\":") - bs, err := json.Marshal(&items[i].Schema) - if err != nil { - return nil, err - } - buf.Write(bs) - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -func (items OrderSchemaItems) Len() int { return len(items) } -func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } -func (items OrderSchemaItems) Less(i, j int) (ret bool) { - ii, oki := items[i].Extensions.GetString("x-order") - ij, okj := items[j].Extensions.GetString("x-order") - if oki { - if okj { - defer func() { - if err := recover(); err != nil { - defer func() { - if err = recover(); err != nil { - ret = items[i].Name < items[j].Name - } - }() - ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() - } - }() - return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int() - } - return true - } else if okj { - return false - } - return items[i].Name < items[j].Name -} - -// SchemaProperties is a map representing the properties of a Schema object. -// It knows how to transform its keys into an ordered slice. -type SchemaProperties map[string]Schema - -// ToOrderedSchemaItems transforms the map of properties into a sortable slice -func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { - items := make(OrderSchemaItems, 0, len(properties)) - for k, v := range properties { - items = append(items, OrderSchemaItem{ - Name: k, - Schema: v, - }) - } - sort.Sort(items) - return items -} - -// MarshalJSON produces properties as json, keeping their order. -func (properties SchemaProperties) MarshalJSON() ([]byte, error) { - if properties == nil { - return []byte("null"), nil - } - return json.Marshal(properties.ToOrderedSchemaItems()) -} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go deleted file mode 100644 index b0ef9bd9c..000000000 --- a/vendor/github.com/go-openapi/spec/ref.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return "" - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - //nolint:noctx,gosec - rr, err := http.Get(v) - if err != nil { - return false - } - defer rr.Body.Close() - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(filepath.ToSlash(pth)) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but panics when refURI is invalid. -// Use the NewRef method for a version that returns an error. -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":""}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - return r.fromMap(v) -} - -// GobEncode provides a safe gob encoder for Ref -func (r Ref) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw, err := r.MarshalJSON() - if err != nil { - return nil, err - } - err = gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Ref -func (r *Ref) GobDecode(b []byte) error { - var raw []byte - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - return json.Unmarshal(raw, r) -} - -func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go deleted file mode 100644 index 47d1ee13f..000000000 --- a/vendor/github.com/go-openapi/spec/resolver.go +++ /dev/null @@ -1,127 +0,0 @@ -package spec - -import ( - "fmt" - - "github.com/go-openapi/swag" -) - -func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { - options = optionsOrDefault(options) - resolver := defaultSchemaLoader(root, options, nil, nil) - - if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { - return err - } - - return nil -} - -// ResolveRefWithBase resolves a reference against a context root with preservation of base path -func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { - result := new(Schema) - - if err := resolveAnyWithBase(root, ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveRef resolves a reference for a schema against a context root -// ref is guaranteed to be in root (no need to go to external files) -// -// ResolveRef is ONLY called from the code generation module -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - res, _, err := ref.GetPointer().Get(root) - if err != nil { - return nil, err - } - - switch sch := res.(type) { - case Schema: - return &sch, nil - case *Schema: - return sch, nil - case map[string]interface{}: - newSch := new(Schema) - if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { - return nil, err - } - return newSch, nil - default: - return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) - } -} - -// ResolveParameterWithBase resolves a parameter reference against a context root and base path -func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { - result := new(Parameter) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveParameter resolves a parameter reference against a context root -func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - return ResolveParameterWithBase(root, ref, nil) -} - -// ResolveResponseWithBase resolves response a reference against a context root and base path -func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { - result := new(Response) - - err := resolveAnyWithBase(root, &ref, result, options) - if err != nil { - return nil, err - } - - return result, nil -} - -// ResolveResponse resolves response a reference against a context root -func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - return ResolveResponseWithBase(root, ref, nil) -} - -// ResolvePathItemWithBase resolves response a path item against a context root and base path -func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { - result := new(PathItem) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolvePathItem resolves response a path item against a context root and base path -// -// Deprecated: use ResolvePathItemWithBase instead -func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { - return ResolvePathItemWithBase(root, ref, options) -} - -// ResolveItemsWithBase resolves parameter items reference against a context root and base path. -// -// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. -// Similarly, $ref are forbidden in response headers. -func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { - result := new(Items) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveItems resolves parameter items reference against a context root and base path. -// -// Deprecated: use ResolveItemsWithBase instead -func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { - return ResolveItemsWithBase(root, ref, options) -} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go deleted file mode 100644 index 0340b60d8..000000000 --- a/vendor/github.com/go-openapi/spec/response.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps - VendorExtensible -} - -// JSONLookup look up a value by the json property name -func (r Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &r.Ref, nil - } - ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) - return ptr, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return json.Unmarshal(data, &r.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - var ( - b1 []byte - err error - ) - - if r.Ref.String() == "" { - // when there is no $ref, empty description is rendered as an empty string - b1, err = json.Marshal(r.ResponseProps) - } else { - // when there is $ref inside the schema, description should be omitempty-ied - b1, err = json.Marshal(struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` - }{ - Description: r.ResponseProps.Description, - Schema: r.ResponseProps.Schema, - Examples: r.ResponseProps.Examples, - }) - } - if err != nil { - return nil, err - } - - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} - -// WithDescription sets the description on this response, allows for chaining -func (r *Response) WithDescription(description string) *Response { - r.Description = description - return r -} - -// WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response -func (r *Response) WithSchema(schema *Schema) *Response { - r.Schema = schema - return r -} - -// AddHeader adds a header to this response -func (r *Response) AddHeader(name string, header *Header) *Response { - if header == nil { - return r.RemoveHeader(name) - } - if r.Headers == nil { - r.Headers = make(map[string]Header) - } - r.Headers[name] = *header - return r -} - -// RemoveHeader removes a header from this response -func (r *Response) RemoveHeader(name string) *Response { - delete(r.Headers, name) - return r -} - -// AddExample adds an example to this response -func (r *Response) AddExample(mediaType string, example interface{}) *Response { - if r.Examples == nil { - r.Examples = make(map[string]interface{}) - } - r.Examples[mediaType] = example - return r -} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go deleted file mode 100644 index 4efb6f868..000000000 --- a/vendor/github.com/go-openapi/spec/responses.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (r Responses) JSONLookup(token string) (interface{}, error) { - if token == "default" { - return r.Default, nil - } - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if i, err := strconv.Atoi(token); err == nil { - if scr, ok := r.StatusCodeResponses[i]; ok { - return scr, nil - } - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// ResponsesProps describes all responses for an operation. -// It tells what is the default response and maps all responses with a -// HTTP status code. -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -// MarshalJSON marshals responses as JSON -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -// UnmarshalJSON unmarshals responses from JSON -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = &v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go deleted file mode 100644 index a8d0f737a..000000000 --- a/vendor/github.com/go-openapi/spec/schema.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - return r.fromMap(v) -} - -func (r *SchemaURL) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := url.Parse(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// SchemaProps describes a JSON schema (draft 4) -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-"` - Schema SchemaURL `json:"-"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties SchemaProperties `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties SchemaProperties `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - XML *XMLObject `json:"xml,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s Schema) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - if ex, ok := s.ExtraProps[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { - return r, err - } - r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) - return r, err -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// AsNullable flags this schema as nullable. -func (s *Schema) AsNullable() *Schema { - s.Nullable = true - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{jsonArray} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// WithXMLName sets the xml name for the object -func (s *Schema) WithXMLName(name string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Name = name - return s -} - -// WithXMLNamespace sets the xml namespace for the object -func (s *Schema) WithXMLNamespace(namespace string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Namespace = namespace - return s -} - -// WithXMLPrefix sets the xml prefix for the object -func (s *Schema) WithXMLPrefix(prefix string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Prefix = prefix - return s -} - -// AsXMLAttribute flags this object as xml attribute -func (s *Schema) AsXMLAttribute() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = true - return s -} - -// AsXMLElement flags this object as an xml node -func (s *Schema) AsXMLElement() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = false - return s -} - -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types -func (s *Schema) AsWrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = true - return s -} - -// AsUnwrappedXML flags this object as an xml node -func (s *Schema) AsUnwrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = false - return s -} - -// SetValidations defines all schema validations. -// -// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. -func (s *Schema) SetValidations(val SchemaValidations) { - s.Maximum = val.Maximum - s.ExclusiveMaximum = val.ExclusiveMaximum - s.Minimum = val.Minimum - s.ExclusiveMinimum = val.ExclusiveMinimum - s.MaxLength = val.MaxLength - s.MinLength = val.MinLength - s.Pattern = val.Pattern - s.MaxItems = val.MaxItems - s.MinItems = val.MinItems - s.UniqueItems = val.UniqueItems - s.MultipleOf = val.MultipleOf - s.Enum = val.Enum - s.MinProperties = val.MinProperties - s.MaxProperties = val.MaxProperties - s.PatternProperties = val.PatternProperties -} - -// WithValidations is a fluent method to set schema validations -func (s *Schema) WithValidations(val SchemaValidations) *Schema { - s.SetValidations(val) - return s -} - -// Validations returns a clone of the validations for this schema -func (s Schema) Validations() SchemaValidations { - return SchemaValidations{ - CommonValidations: CommonValidations{ - Maximum: s.Maximum, - ExclusiveMaximum: s.ExclusiveMaximum, - Minimum: s.Minimum, - ExclusiveMinimum: s.ExclusiveMinimum, - MaxLength: s.MaxLength, - MinLength: s.MinLength, - Pattern: s.Pattern, - MaxItems: s.MaxItems, - MinItems: s.MinItems, - UniqueItems: s.UniqueItems, - MultipleOf: s.MultipleOf, - Enum: s.Enum, - }, - MinProperties: s.MinProperties, - MaxProperties: s.MaxProperties, - PatternProperties: s.PatternProperties, - } -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - props := struct { - SchemaProps - SwaggerSchemaProps - }{} - if err := json.Unmarshal(data, &props); err != nil { - return err - } - - sch := Schema{ - SchemaProps: props.SchemaProps, - SwaggerSchemaProps: props.SwaggerSchemaProps, - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - _ = sch.Ref.fromMap(d) - _ = sch.Schema.fromMap(d) - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go deleted file mode 100644 index b81175afd..000000000 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strings" - - "github.com/go-openapi/swag" -) - -// PathLoader is a function to use when loading remote refs. -// -// This is a package level default. It may be overridden or bypassed by -// specifying the loader in ExpandOptions. -// -// NOTE: if you are using the go-openapi/loads package, it will override -// this value with its own default (a loader to retrieve YAML documents as -// well as JSON ones). -var PathLoader = func(pth string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(pth) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// resolverContext allows to share a context during spec processing. -// At the moment, it just holds the index of circular references found. -type resolverContext struct { - // circulars holds all visited circular references, to shortcircuit $ref resolution. - // - // This structure is privately instantiated and needs not be locked against - // concurrent access, unless we chose to implement a parallel spec walking. - circulars map[string]bool - basePath string - loadDoc func(string) (json.RawMessage, error) - rootID string -} - -func newResolverContext(options *ExpandOptions) *resolverContext { - expandOptions := optionsOrDefault(options) - - // path loader may be overridden by options - var loader func(string) (json.RawMessage, error) - if expandOptions.PathLoader == nil { - loader = PathLoader - } else { - loader = expandOptions.PathLoader - } - - return &resolverContext{ - circulars: make(map[string]bool), - basePath: expandOptions.RelativeBase, // keep the root base path in context - loadDoc: loader, - } -} - -type schemaLoader struct { - root interface{} - options *ExpandOptions - cache ResolutionCache - context *resolverContext -} - -func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { - if ref.IsRoot() || ref.HasFragmentOnly { - return r - } - - baseRef := MustCreateRef(basePath) - currentRef := normalizeRef(&ref, basePath) - if strings.HasPrefix(currentRef.String(), baseRef.String()) { - return r - } - - // set a new root against which to resolve - rootURL := currentRef.GetURL() - rootURL.Fragment = "" - root, _ := r.cache.Get(rootURL.String()) - - // shallow copy of resolver options to set a new RelativeBase when - // traversing multiple documents - newOptions := r.options - newOptions.RelativeBase = rootURL.String() - - return defaultSchemaLoader(root, newOptions, r.cache, r.context) -} - -func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { - if transitive != r { - if transitive.options != nil && transitive.options.RelativeBase != "" { - return normalizeBase(transitive.options.RelativeBase) - } - } - - return basePath -} - -func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return ErrResolveRefNeedsAPointer - } - - if ref.GetURL() == nil { - return nil - } - - var ( - res interface{} - data interface{} - err error - ) - - // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means - // it is pointing somewhere in the root. - root := r.root - if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) - } - } - - if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { - data = root - } else { - baseRef := normalizeRef(ref, basePath) - data, _, _, err = r.load(baseRef.GetURL()) - if err != nil { - return err - } - } - - res = data - if ref.String() != "" { - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - return swag.DynamicJSONToStruct(res, target) -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - var err error - pth := toFetch.String() - normalized := normalizeBase(pth) - debugLog("loading doc from: %s", normalized) - - unescaped, err := url.PathUnescape(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - u := url.URL{Path: unescaped} - - data, fromCache := r.cache.Get(u.RequestURI()) - if fromCache { - return data, toFetch, fromCache, nil - } - - b, err := r.context.loadDoc(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - var doc interface{} - if err := json.Unmarshal(b, &doc); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(normalized, doc) - - return doc, toFetch, fromCache, nil -} - -// isCircular detects cycles in sequences of $ref. -// -// It relies on a private context (which needs not be locked). -func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { - normalizedRef := normalizeURI(ref.String(), basePath) - if _, ok := r.context.circulars[normalizedRef]; ok { - // circular $ref has been already detected in another explored cycle - foundCycle = true - return - } - foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased - if foundCycle { - r.context.circulars[normalizedRef] = true - } - return -} - -// Resolve resolves a reference against basePath and stores the result in target. -// -// Resolve is not in charge of following references: it only resolves ref by following its URL. -// -// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. -// -// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct -func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { - return r.resolveRef(ref, target, basePath) -} - -func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { - var ref *Ref - switch refable := input.(type) { - case *Schema: - ref = &refable.Ref - case *Parameter: - ref = &refable.Ref - case *Response: - ref = &refable.Ref - case *PathItem: - ref = &refable.Ref - default: - return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) - } - - curRef := ref.String() - if curRef == "" { - return nil - } - - normalizedRef := normalizeRef(ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if r.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { - return err - } - - if ref.String() == "" || ref.String() == curRef { - // done with rereferencing - return nil - } - - parentRefs = append(parentRefs, normalizedRef.String()) - return r.deref(input, parentRefs, normalizedBasePath) -} - -func (r *schemaLoader) shouldStopOnError(err error) bool { - if err != nil && !r.options.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - -func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { - debugLog("schema has ID: %s", id) - - // handling the case when id is a folder - // remember that basePath has to point to a file - var refPath string - if strings.HasSuffix(id, "/") { - // ensure this is detected as a file, not a folder - refPath = fmt.Sprintf("%s%s", id, "placeholder.json") - } else { - refPath = id - } - - // updates the current base path - // * important: ID can be a relative path - // * registers target to be fetchable from the new base proposed by this id - newBasePath := normalizeURI(refPath, basePath) - - // store found IDs for possible future reuse in $ref - r.cache.Set(newBasePath, target) - - // the root document has an ID: all $ref relative to that ID may - // be rebased relative to the root document - if basePath == r.context.basePath { - debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) - r.context.rootID = newBasePath - } - - return newBasePath, refPath -} - -func defaultSchemaLoader( - root interface{}, - expandOptions *ExpandOptions, - cache ResolutionCache, - context *resolverContext) *schemaLoader { - - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - - cache = cacheOrDefault(cache) - - if expandOptions.RelativeBase == "" { - // if no relative base is provided, assume the root document - // contains all $ref, or at least, that the relative documents - // may be resolved from the current working directory. - expandOptions.RelativeBase = baseForRoot(root, cache) - } - debugLog("effective expander options: %#v", expandOptions) - - if context == nil { - context = newResolverContext(expandOptions) - } - - return &schemaLoader{ - root: root, - options: expandOptions, - cache: cache, - context: context, - } -} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go deleted file mode 100644 index 9d0bdae90..000000000 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - basic = "basic" - apiKey = "apiKey" - oauth2 = "oauth2" - implicit = "implicit" - password = "password" - application = "application" - accessCode = "accessCode" -) - -// BasicAuth creates a basic auth security scheme -func BasicAuth() *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} -} - -// APIKeyAuth creates an api key auth security scheme -func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} -} - -// OAuth2Implicit creates an implicit flow oauth2 security scheme -func OAuth2Implicit(authorizationURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: implicit, - AuthorizationURL: authorizationURL, - }} -} - -// OAuth2Password creates a password flow oauth2 security scheme -func OAuth2Password(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: password, - TokenURL: tokenURL, - }} -} - -// OAuth2Application creates an application flow oauth2 security scheme -func OAuth2Application(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: application, - TokenURL: tokenURL, - }} -} - -// OAuth2AccessToken creates an access token flow oauth2 security scheme -func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: accessCode, - AuthorizationURL: authorizationURL, - TokenURL: tokenURL, - }} -} - -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// AddScope adds a scope to this security scheme -func (s *SecuritySchemeProps) AddScope(scope, description string) { - if s.Scopes == nil { - s.Scopes = make(map[string]string) - } - s.Scopes[scope] = description -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - var ( - b1 []byte - err error - ) - - if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { - // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string - b1, err = json.Marshal(s.SecuritySchemeProps) - } else { - // when not oauth2, empty AuthorizationURL should be omitted - b1, err = json.Marshal(struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 - }{ - Description: s.Description, - Type: s.Type, - Name: s.Name, - In: s.In, - Flow: s.Flow, - AuthorizationURL: s.AuthorizationURL, - TokenURL: s.TokenURL, - Scopes: s.Scopes, - }) - } - if err != nil { - return nil, err - } - - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go deleted file mode 100644 index 7d38b6e62..000000000 --- a/vendor/github.com/go-openapi/spec/spec.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" -) - -//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json -//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema -//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... -//go:generate perl -pi -e s,Json,JSON,g bindata.go - -const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs - SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema - JSONSchemaURL = "http://json-schema.org/draft-04/schema#" -) - -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error -func MustLoadJSONSchemaDraft04() *Schema { - d, e := JSONSchemaDraft04() - if e != nil { - panic(e) - } - return d -} - -// JSONSchemaDraft04 loads the json schema document for json shema draft04 -func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} - -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error -func MustLoadSwagger20Schema() *Schema { - d, e := Swagger20Schema() - if e != nil { - panic(e) - } - return d -} - -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets -func Swagger20Schema() (*Schema, error) { - - b, err := Asset("v2/schema.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go deleted file mode 100644 index 44722ffd5..000000000 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "strconv" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) -// together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// JSONLookup look up a value by the json property name -func (s Swagger) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) - return r, err -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -// GobEncode provides a safe gob encoder for Swagger, including extensions -func (s Swagger) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw := struct { - Props SwaggerProps - Ext VendorExtensible - }{ - Props: s.SwaggerProps, - Ext: s.VendorExtensible, - } - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Swagger, including extensions -func (s *Swagger) GobDecode(b []byte) error { - var raw struct { - Props SwaggerProps - Ext VendorExtensible - } - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - s.SwaggerProps = raw.Props - s.VendorExtensible = raw.Ext - return nil -} - -// SwaggerProps captures the top-level properties of an Api specification -// -// NOTE: validation rules -// - the scheme, when present must be from [http, https, ws, wss] -// - BasePath must start with a leading "/" -// - Paths is required -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Paths *Paths `json:"paths"` - Definitions Definitions `json:"definitions,omitempty"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -type swaggerPropsAlias SwaggerProps - -type gobSwaggerPropsAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *swaggerPropsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements -func (o SwaggerProps) GobEncode() ([]byte, error) { - raw := gobSwaggerPropsAlias{ - Alias: (*swaggerPropsAlias)(&o), - } - - var b bytes.Buffer - if o.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(o.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(o.Security)) - for _, req := range o.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements -func (o *SwaggerProps) GobDecode(b []byte) error { - var raw gobSwaggerPropsAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *o = *(*SwaggerProps)(raw.Alias) - return nil -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { - if token == "allows" { - return s.Allows, nil - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { - if _, err := strconv.Atoi(token); err == nil { - r, _, err := jsonpointer.GetForToken(s.Schemas, token) - return r, err - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch v := single.(type) { - case string: - *s = StringOrArray([]string{v}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} - -// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go deleted file mode 100644 index faa3d3de1..000000000 --- a/vendor/github.com/go-openapi/spec/tag.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// TagProps describe a tag entry in the top level tags section of a swagger spec -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// NewTag creates a new tag -func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} -} - -// Tag allows adding meta data to a single tag that is used by the -// [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (t Tag) JSONLookup(token string) (interface{}, error) { - if ex, ok := t.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(t.TagProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go deleted file mode 100644 index 6360a8ea7..000000000 --- a/vendor/github.com/go-openapi/spec/validations.go +++ /dev/null @@ -1,215 +0,0 @@ -package spec - -// CommonValidations describe common JSON-schema validations -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// SetValidations defines all validations for a simple schema. -// -// NOTE: the input is the larger set of validations available for schemas. -// For simple schemas, MinProperties and MaxProperties are ignored. -func (v *CommonValidations) SetValidations(val SchemaValidations) { - v.Maximum = val.Maximum - v.ExclusiveMaximum = val.ExclusiveMaximum - v.Minimum = val.Minimum - v.ExclusiveMinimum = val.ExclusiveMinimum - v.MaxLength = val.MaxLength - v.MinLength = val.MinLength - v.Pattern = val.Pattern - v.MaxItems = val.MaxItems - v.MinItems = val.MinItems - v.UniqueItems = val.UniqueItems - v.MultipleOf = val.MultipleOf - v.Enum = val.Enum -} - -type clearedValidation struct { - Validation string - Value interface{} -} - -type clearedValidations []clearedValidation - -func (c clearedValidations) apply(cbs []func(string, interface{})) { - for _, cb := range cbs { - for _, cleared := range c { - cb(cleared.Validation, cleared.Value) - } - } -} - -// ClearNumberValidations clears all number validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 5) - defer func() { - done.apply(cbs) - }() - - if v.Minimum != nil { - done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) - v.Minimum = nil - } - if v.Maximum != nil { - done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) - v.Maximum = nil - } - if v.ExclusiveMaximum { - done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) - v.ExclusiveMaximum = false - } - if v.ExclusiveMinimum { - done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) - v.ExclusiveMinimum = false - } - if v.MultipleOf != nil { - done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) - v.MultipleOf = nil - } -} - -// ClearStringValidations clears all string validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.Pattern != "" { - done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) - v.Pattern = "" - } - if v.MinLength != nil { - done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) - v.MinLength = nil - } - if v.MaxLength != nil { - done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) - v.MaxLength = nil - } -} - -// ClearArrayValidations clears all array validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.MaxItems != nil { - done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) - v.MaxItems = nil - } - if v.MinItems != nil { - done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) - v.MinItems = nil - } - if v.UniqueItems { - done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) - v.UniqueItems = false - } -} - -// Validations returns a clone of the validations for a simple schema. -// -// NOTE: in the context of simple schema objects, MinProperties, MaxProperties -// and PatternProperties remain unset. -func (v CommonValidations) Validations() SchemaValidations { - return SchemaValidations{ - CommonValidations: v, - } -} - -// HasNumberValidations indicates if the validations are for numbers or integers -func (v CommonValidations) HasNumberValidations() bool { - return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil -} - -// HasStringValidations indicates if the validations are for strings -func (v CommonValidations) HasStringValidations() bool { - return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" -} - -// HasArrayValidations indicates if the validations are for arrays -func (v CommonValidations) HasArrayValidations() bool { - return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems -} - -// HasEnum indicates if the validation includes some enum constraint -func (v CommonValidations) HasEnum() bool { - return len(v.Enum) > 0 -} - -// SchemaValidations describes the validation properties of a schema -// -// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change -// in the exported members: all initializers using litterals would fail. -type SchemaValidations struct { - CommonValidations - - PatternProperties SchemaProperties `json:"patternProperties,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` -} - -// HasObjectValidations indicates if the validations are for objects -func (v SchemaValidations) HasObjectValidations() bool { - return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil -} - -// SetValidations for schema validations -func (v *SchemaValidations) SetValidations(val SchemaValidations) { - v.CommonValidations.SetValidations(val) - v.PatternProperties = val.PatternProperties - v.MaxProperties = val.MaxProperties - v.MinProperties = val.MinProperties -} - -// Validations for a schema -func (v SchemaValidations) Validations() SchemaValidations { - val := v.CommonValidations.Validations() - val.PatternProperties = v.PatternProperties - val.MinProperties = v.MinProperties - val.MaxProperties = v.MaxProperties - return val -} - -// ClearObjectValidations returns a clone of the validations with all object validations cleared. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.MaxProperties != nil { - done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) - v.MaxProperties = nil - } - if v.MinProperties != nil { - done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) - v.MinProperties = nil - } - if v.PatternProperties != nil { - done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) - v.PatternProperties = nil - } -} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go deleted file mode 100644 index 945a46703..000000000 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// XMLObject a metadata object that allows for more fine-tuned XML model definitions. -// -// For more information: http://goo.gl/8us55a#xmlObject -type XMLObject struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Prefix string `json:"prefix,omitempty"` - Attribute bool `json:"attribute,omitempty"` - Wrapped bool `json:"wrapped,omitempty"` -} - -// WithName sets the xml name for the object -func (x *XMLObject) WithName(name string) *XMLObject { - x.Name = name - return x -} - -// WithNamespace sets the xml namespace for the object -func (x *XMLObject) WithNamespace(namespace string) *XMLObject { - x.Namespace = namespace - return x -} - -// WithPrefix sets the xml prefix for the object -func (x *XMLObject) WithPrefix(prefix string) *XMLObject { - x.Prefix = prefix - return x -} - -// AsAttribute flags this object as xml attribute -func (x *XMLObject) AsAttribute() *XMLObject { - x.Attribute = true - return x -} - -// AsElement flags this object as an xml node -func (x *XMLObject) AsElement() *XMLObject { - x.Attribute = false - return x -} - -// AsWrapped flags this object as wrapped, this is mostly useful for array types -func (x *XMLObject) AsWrapped() *XMLObject { - x.Wrapped = true - return x -} - -// AsUnwrapped flags this object as an xml node -func (x *XMLObject) AsUnwrapped() *XMLObject { - x.Wrapped = false - return x -} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/strfmt/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes deleted file mode 100644 index d020be8ea..000000000 --- a/vendor/github.com/go-openapi/strfmt/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go text eol=lf - diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/strfmt/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml deleted file mode 100644 index da12d5e3b..000000000 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ /dev/null @@ -1,49 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 31 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - wrapcheck - - testpackage - - nlreturn - - gofumpt - - goerr113 - - gci - - gomnd - - godot - - exhaustivestruct - - paralleltest - - varnamelen - - ireturn - #- thelper - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - - stylecheck - diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/strfmt/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md deleted file mode 100644 index 0cf89d776..000000000 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) - -This package exposes a registry of data types to support string formats in the go-openapi toolkit. - -strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. - -## Supported data formats -go-openapi/strfmt follows the swagger 2.0 specification with the following formats -defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). - -It also provides convenient extensions to go-openapi users. - -- [x] JSON-schema draft 4 formats - - date-time - - email - - hostname - - ipv4 - - ipv6 - - uri -- [x] swagger 2.0 format extensions - - binary - - byte (e.g. base64 encoded string) - - date (e.g. "1970-01-01") - - password -- [x] go-openapi custom format extensions - - bsonobjectid (BSON objectID) - - creditcard - - duration (e.g. "3 weeks", "1ms") - - hexcolor (e.g. "#FFFFFF") - - isbn, isbn10, isbn13 - - mac (e.g "01:02:03:04:05:06") - - rgbcolor (e.g. "rgb(100,100,100)") - - ssn - - uuid, uuid3, uuid4, uuid5 - - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") - - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) - -> NOTE: as the name stands for, this package is intended to support string formatting only. -> It does not provide validation for numerical values with swagger format extension for JSON types "number" or -> "integer" (e.g. float, double, int32...). - -## Type conversion - -All types defined here are stringers and may be converted to strings with `.String()`. -Note that most types defined by this package may be converted directly to string like `string(Email{})`. - -`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. -Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` - -## Using pointers - -The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does -with primitive types. - -## Format types -Types defined in strfmt expose marshaling and validation capabilities. - -List of defined types: -- Base64 -- CreditCard -- Date -- DateTime -- Duration -- Email -- HexColor -- Hostname -- IPv4 -- IPv6 -- CIDR -- ISBN -- ISBN10 -- ISBN13 -- MAC -- ObjectId -- Password -- RGBColor -- SSN -- URI -- UUID -- UUID3 -- UUID4 -- UUID5 -- [ULID](https://github.com/ulid/spec) diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go deleted file mode 100644 index 8740b1505..000000000 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "fmt" - - "go.mongodb.org/mongo-driver/bson" - - "go.mongodb.org/mongo-driver/bson/bsontype" - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" -) - -func init() { - var id ObjectId - // register this format in the default registry - Default.Add("bsonobjectid", &id, IsBSONObjectID) -} - -// IsBSONObjectID returns true when the string is a valid BSON.ObjectId -func IsBSONObjectID(str string) bool { - _, err := bsonprim.ObjectIDFromHex(str) - return err == nil -} - -// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) -// -// swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive - -// NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { //nolint:revive - oid, err := bsonprim.ObjectIDFromHex(hex) - if err != nil { - panic(err) - } - return ObjectId(oid) -} - -// MarshalText turns this instance into text -func (id ObjectId) MarshalText() ([]byte, error) { - oid := bsonprim.ObjectID(id) - if oid == bsonprim.NilObjectID { - return nil, nil - } - return []byte(oid.Hex()), nil -} - -// UnmarshalText hydrates this instance from text -func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on - if len(data) == 0 { - *id = ObjectId(bsonprim.NilObjectID) - return nil - } - oidstr := string(data) - oid, err := bsonprim.ObjectIDFromHex(oidstr) - if err != nil { - return err - } - *id = ObjectId(oid) - return nil -} - -// Scan read a value from a database driver -func (id *ObjectId) Scan(raw interface{}) error { - var data []byte - switch v := raw.(type) { - case []byte: - data = v - case string: - data = []byte(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return id.UnmarshalText(data) -} - -// Value converts a value to a database driver value -func (id ObjectId) Value() (driver.Value, error) { - return driver.Value(bsonprim.ObjectID(id).Hex()), nil -} - -func (id ObjectId) String() string { - return bsonprim.ObjectID(id).Hex() -} - -// MarshalJSON returns the ObjectId as JSON -func (id ObjectId) MarshalJSON() ([]byte, error) { - return bsonprim.ObjectID(id).MarshalJSON() -} - -// UnmarshalJSON sets the ObjectId from JSON -func (id *ObjectId) UnmarshalJSON(data []byte) error { - var obj bsonprim.ObjectID - if err := obj.UnmarshalJSON(data); err != nil { - return err - } - *id = ObjectId(obj) - return nil -} - -// MarshalBSON renders the object id as a BSON document -func (id ObjectId) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) -} - -// UnmarshalBSON reads the objectId from a BSON document -func (id *ObjectId) UnmarshalBSON(data []byte) error { - var obj struct { - Data bsonprim.ObjectID - } - if err := bson.Unmarshal(data, &obj); err != nil { - return err - } - *id = ObjectId(obj.Data) - return nil -} - -// MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { - oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil -} - -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - var oid bsonprim.ObjectID - copy(oid[:], data) - *id = ObjectId(oid) - return nil -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (id *ObjectId) DeepCopyInto(out *ObjectId) { - *out = *id -} - -// DeepCopy copies the receiver into a new ObjectId. -func (id *ObjectId) DeepCopy() *ObjectId { - if id == nil { - return nil - } - out := new(ObjectId) - id.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go deleted file mode 100644 index f0b310964..000000000 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "time" - - "go.mongodb.org/mongo-driver/bson" -) - -func init() { - d := Date{} - // register this format in the default registry - Default.Add("date", &d, IsDate) -} - -// IsDate returns true when the string is a valid date -func IsDate(str string) bool { - _, err := time.Parse(RFC3339FullDate, str) - return err == nil -} - -const ( - // RFC3339FullDate represents a full-date as specified by RFC3339 - // See: http://goo.gl/xXOvVd - RFC3339FullDate = "2006-01-02" -) - -// Date represents a date from the API -// -// swagger:strfmt date -type Date time.Time - -// String converts this date into a string -func (d Date) String() string { - return time.Time(d).Format(RFC3339FullDate) -} - -// UnmarshalText parses a text representation into a date type -func (d *Date) UnmarshalText(text []byte) error { - if len(text) == 0 { - return nil - } - dd, err := time.Parse(RFC3339FullDate, string(text)) - if err != nil { - return err - } - *d = Date(dd) - return nil -} - -// MarshalText serializes this date type to string -func (d Date) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// Scan scans a Date value from database driver type. -func (d *Date) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - return d.UnmarshalText(v) - case string: - return d.UnmarshalText([]byte(v)) - case time.Time: - *d = Date(v) - return nil - case nil: - *d = Date{} - return nil - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) - } -} - -// Value converts Date to a primitive value ready to written to a database. -func (d Date) Value() (driver.Value, error) { - return driver.Value(d.String()), nil -} - -// MarshalJSON returns the Date as JSON -func (d Date) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Time(d).Format(RFC3339FullDate)) -} - -// UnmarshalJSON sets the Date from JSON -func (d *Date) UnmarshalJSON(data []byte) error { - if string(data) == jsonNull { - return nil - } - var strdate string - if err := json.Unmarshal(data, &strdate); err != nil { - return err - } - tt, err := time.Parse(RFC3339FullDate, strdate) - if err != nil { - return err - } - *d = Date(tt) - return nil -} - -func (d Date) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) -} - -func (d *Date) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { - return err - } - - if data, ok := m["data"].(string); ok { - rd, err := time.Parse(RFC3339FullDate, data) - if err != nil { - return err - } - *d = Date(rd) - return nil - } - - return errors.New("couldn't unmarshal bson bytes value as Date") -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (d *Date) DeepCopyInto(out *Date) { - *out = *d -} - -// DeepCopy copies the receiver into a new Date. -func (d *Date) DeepCopy() *Date { - if d == nil { - return nil - } - out := new(Date) - d.DeepCopyInto(out) - return out -} - -// GobEncode implements the gob.GobEncoder interface. -func (d Date) GobEncode() ([]byte, error) { - return d.MarshalBinary() -} - -// GobDecode implements the gob.GobDecoder interface. -func (d *Date) GobDecode(data []byte) error { - return d.UnmarshalBinary(data) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d Date) MarshalBinary() ([]byte, error) { - return time.Time(d).MarshalBinary() -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Date) UnmarshalBinary(data []byte) error { - var original time.Time - - err := original.UnmarshalBinary(data) - if err != nil { - return err - } - - *d = Date(original) - - return nil -} - -// Equal checks if two Date instances are equal -func (d Date) Equal(d2 Date) bool { - return time.Time(d).Equal(time.Time(d2)) -} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go deleted file mode 100644 index a89a4de3f..000000000 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ /dev/null @@ -1,2035 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/mail" - "regexp" - "strings" - - "github.com/asaskevich/govalidator" - "go.mongodb.org/mongo-driver/bson" -) - -const ( - // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 - // A string instance is valid against this attribute if it is a valid - // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // http://tools.ietf.org/html/rfc1034#section-3.5 - // ::= any one of the ten digits 0 through 9 - // var digit = /[0-9]/; - // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case - // var letter = /[a-zA-Z]/; - // ::= | - // var letDig = /[0-9a-zA-Z]/; - // ::= | "-" - // var letDigHyp = /[-0-9a-zA-Z]/; - // ::= | - // var ldhStr = /[-0-9a-zA-Z]+/; - //