diff --git a/examples/helloworld_agent/agent.go b/examples/helloworld_agent/agent.go index be75b1fb9..4f80a2f18 100644 --- a/examples/helloworld_agent/agent.go +++ b/examples/helloworld_agent/agent.go @@ -18,11 +18,11 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" cmdfactory "open-cluster-management.io/addon-framework/pkg/cmd/factory" "open-cluster-management.io/addon-framework/pkg/lease" "open-cluster-management.io/addon-framework/pkg/version" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) func NewAgentCommand(addonName string) *cobra.Command { diff --git a/go.mod b/go.mod index 6bab039c6..5c046c636 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/api v0.13.0 - open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 + open-cluster-management.io/sdk-go v0.13.1-0.20240614070053-a01091a14da7 sigs.k8s.io/controller-runtime v0.17.2 ) @@ -41,8 +41,10 @@ require ( github.com/bwmarrin/snowflake v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 // indirect github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 // indirect - github.com/cloudevents/sdk-go/v2 v2.15.2 // indirect + github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf // indirect + github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect @@ -78,6 +80,8 @@ require ( github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -109,13 +113,13 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index 8235a81be..f405631f6 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,8 @@ cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiV cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -12,6 +14,10 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= @@ -28,12 +34,20 @@ github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqy github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 h1:3/pjormyqkSjF2GHQehTELZ9oqlER4GrJZiVUIk8Fy8= +github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991/go.mod h1:xiar5+gk13WqyAUQ/cpcxcjD1IhLe/PeilSfCdPcfMU= github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 h1:pXyRKZ0T5WoB6X9QnHS5cEyW0Got39bNQIECxGUKVO4= github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995/go.mod h1:mz9oS2Yhh/S7cvrrsgGMMR+6Shy0ZyL2lDN1sHQO1wE= -github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= -github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= +github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf h1:91HOb+vxZZQ1rJTJtvhJPRl2qyQa5bqh7lrIYhQSDnQ= +github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 h1:icCHutJouWlQREayFwCc7lxDAhws08td+W3/gdqgZts= +github.com/confluentinc/confluent-kafka-go/v2 v2.3.0/go.mod h1:/VTy8iEpe6mD9pkCH5BhijlUl8ulUXymKv1Qig5Rgb8= +github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= +github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -45,6 +59,14 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eclipse/paho.golang v0.11.0 h1:6Avu5dkkCfcB61/y1vx+XrPQ0oAl4TPYtY0uw3HbQdM= @@ -156,6 +178,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -163,6 +187,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= @@ -173,6 +199,12 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/mochi-mqtt/server/v2 v2.4.6 h1:3iaQLG4hD/2vSh0Rwu4+h//KUcWR2zAKQIxhJuoJmCg= github.com/mochi-mqtt/server/v2 v2.4.6/go.mod h1:M1lZnLbyowXUyQBIlHYlX1wasxXqv/qFWwQxAzfphwA= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -180,6 +212,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -195,6 +229,12 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8 h1:cu3YUMVGsKIyFyJGO3F6BZKGYQZpCKxAv9cBPgQAca8= github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/library-go v0.0.0-20240116081341-964bcb3f545c h1:gLylEQQryG+A6nqWYIwE1wUzn1eFUmthjADvflMWKnM= @@ -244,6 +284,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/testcontainers/testcontainers-go v0.14.0 h1:h0D5GaYG9mhOWr2qHdEKDXpkce/VlvaYOCzTRi6UBi8= +github.com/testcontainers/testcontainers-go v0.14.0/go.mod h1:hSRGJ1G8Q5Bw2gXgPulJOLlEBaYJHeBSOkQM5JLG+JQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -305,8 +347,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -321,8 +363,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -347,13 +389,13 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -441,8 +483,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= -open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 h1:zFmHuW+ztdfUUNslqNW+H1WEcfdEUQHoRDbmdajX340= -open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090/go.mod h1:w2OaxtCyegxeyFLU42UQ3oxUz01QdsBQkcHI17T/l48= +open-cluster-management.io/sdk-go v0.13.1-0.20240614070053-a01091a14da7 h1:/Tit/ldsK/+gwYpljBPzOGpFwdN44+yIOiHO+kja5XU= +open-cluster-management.io/sdk-go v0.13.1-0.20240614070053-a01091a14da7/go.mod h1:muWzHWsgK8IsopltwTnsBjf4DN9IcC9rF0G2uEq/Pjw= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= diff --git a/pkg/addonmanager/addontesting/helpers.go b/pkg/addonmanager/addontesting/helpers.go index c0a6c43de..4ff588451 100644 --- a/pkg/addonmanager/addontesting/helpers.go +++ b/pkg/addonmanager/addontesting/helpers.go @@ -17,7 +17,7 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/addon-framework/pkg/basecontroller/events" + "open-cluster-management.io/sdk-go/pkg/basecontroller/events" ) type FakeSyncContext struct { diff --git a/pkg/addonmanager/base_manager.go b/pkg/addonmanager/base_manager.go index 5cc585935..7e2002cc4 100644 --- a/pkg/addonmanager/base_manager.go +++ b/pkg/addonmanager/base_manager.go @@ -22,8 +22,8 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/utils" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) // BaseAddonManagerImpl is the base implementation of BaseAddonManager diff --git a/pkg/addonmanager/cloudevents/manager.go b/pkg/addonmanager/cloudevents/manager.go index 6d2b71414..5bc2915e9 100644 --- a/pkg/addonmanager/cloudevents/manager.go +++ b/pkg/addonmanager/cloudevents/manager.go @@ -2,6 +2,7 @@ package cloudevents import ( "context" + "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,8 +21,12 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager" "open-cluster-management.io/addon-framework/pkg/index" + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + "open-cluster-management.io/sdk-go/pkg/cloudevents/constants" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" ) // cloudeventsAddonManager is the implementation of AddonManager with @@ -44,11 +49,37 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { // ManifestWork client that implements the ManifestWorkInterface and ManifestWork informer based on different // driver configuration. // Refer to Event Based Manifestwork proposal in enhancements repo to get more details. - _, clientConfig, err := cloudeventswork.NewConfigLoader(a.options.WorkDriver, a.options.WorkDriverConfig). - WithKubeConfig(a.GetConfig()). - LoadConfig() - if err != nil { - return err + var workClient workclientset.Interface + var watcherStore *store.SourceInformerWatcherStore + var err error + switch a.options.WorkDriver { + case "kube": + workClient, err = workclientset.NewForConfig(config) + if err != nil { + return err + } + case constants.ConfigTypeGRPC, constants.ConfigTypeMQTT: + watcherStore = store.NewSourceInformerWatcherStore(ctx) + + _, clientConfig, err := generic.NewConfigLoader(a.options.WorkDriver, a.options.WorkDriverConfig). + LoadConfig() + if err != nil { + return err + } + + clientHolder, err := cloudeventswork.NewClientHolderBuilder(clientConfig). + WithClientID(a.options.CloudEventsClientID). + WithSourceID(a.options.SourceID). + WithCodecs(codec.NewManifestBundleCodec()). + WithWorkClientWatcherStore(watcherStore). + NewSourceClientHolder(ctx) + if err != nil { + return err + } + + workClient = clientHolder.WorkInterface() + default: + return fmt.Errorf("unsupported work driver: %s", a.options.WorkDriver) } // we need a separated filtered manifestwork informers so we only watch the manifestworks that manifestworkreplicaset cares. @@ -68,16 +99,6 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { }, ) - clientHolder, err := cloudeventswork.NewClientHolderBuilder(clientConfig). - WithClientID(a.options.CloudEventsClientID). - WithSourceID(a.options.SourceID). - WithInformerConfig(10*time.Minute, workInformOption). - WithCodecs(codec.NewManifestBundleCodec()). - NewSourceClientHolder(ctx) - if err != nil { - return err - } - kubeClient, err := kubernetes.NewForConfig(config) if err != nil { return err @@ -117,8 +138,13 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { }), ) - workClient := clientHolder.WorkInterface() - workInformers := clientHolder.ManifestWorkInformer() + factory := workinformers.NewSharedInformerFactoryWithOptions(workClient, 30*time.Minute, workInformOption) + workInformers := factory.Work().V1().ManifestWorks() + + // For cloudevents work client, we use the informer store as the client store + if watcherStore != nil { + watcherStore.SetStore(workInformers.Informer().GetStore()) + } // addonDeployController err = workInformers.Informer().AddIndexers( diff --git a/pkg/addonmanager/cloudevents/options.go b/pkg/addonmanager/cloudevents/options.go index c6657e136..43cc0d3d9 100644 --- a/pkg/addonmanager/cloudevents/options.go +++ b/pkg/addonmanager/cloudevents/options.go @@ -2,7 +2,6 @@ package cloudevents import ( "github.com/spf13/cobra" - cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" ) // CloudEventsOptions defines the flags for addon manager @@ -17,7 +16,7 @@ type CloudEventsOptions struct { func NewCloudEventsOptions() *CloudEventsOptions { return &CloudEventsOptions{ // set default work driver to kube - WorkDriver: cloudeventswork.ConfigTypeKube, + WorkDriver: "kube", } } diff --git a/pkg/addonmanager/controllers/addonconfig/controller.go b/pkg/addonmanager/controllers/addonconfig/controller.go index 5172d6b8f..687185801 100644 --- a/pkg/addonmanager/controllers/addonconfig/controller.go +++ b/pkg/addonmanager/controllers/addonconfig/controller.go @@ -13,13 +13,13 @@ import ( "k8s.io/client-go/dynamic/dynamiclister" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" "open-cluster-management.io/addon-framework/pkg/utils" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" "open-cluster-management.io/sdk-go/pkg/patcher" ) diff --git a/pkg/addonmanager/controllers/agentdeploy/controller.go b/pkg/addonmanager/controllers/agentdeploy/controller.go index 2c0ceca91..ffcf30f90 100644 --- a/pkg/addonmanager/controllers/agentdeploy/controller.go +++ b/pkg/addonmanager/controllers/agentdeploy/controller.go @@ -31,8 +31,8 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) const ( diff --git a/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go b/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go index 03e45f9b1..54cf6b45c 100644 --- a/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go @@ -11,7 +11,7 @@ import ( workapiv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) type defaultHookSyncer struct { diff --git a/pkg/addonmanager/controllers/agentdeploy/default_sync.go b/pkg/addonmanager/controllers/agentdeploy/default_sync.go index 621851726..c376a8653 100644 --- a/pkg/addonmanager/controllers/agentdeploy/default_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/default_sync.go @@ -9,7 +9,7 @@ import ( workapiv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) type defaultSyncer struct { diff --git a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go index 992646e3f..d01842e72 100644 --- a/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go @@ -13,8 +13,8 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/utils" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) type healthCheckSyncer struct { diff --git a/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go b/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go index 3ee5eb47b..fae0432b3 100644 --- a/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go @@ -14,7 +14,7 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) type hostedHookSyncer struct { diff --git a/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go b/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go index 19a9bca4d..50d72e3b9 100644 --- a/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go +++ b/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go @@ -14,7 +14,7 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) type hostedSyncer struct { diff --git a/pkg/addonmanager/controllers/certificate/csrapprove.go b/pkg/addonmanager/controllers/certificate/csrapprove.go index 214eb905f..91d52dcfa 100644 --- a/pkg/addonmanager/controllers/certificate/csrapprove.go +++ b/pkg/addonmanager/controllers/certificate/csrapprove.go @@ -27,7 +27,7 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) var ( diff --git a/pkg/addonmanager/controllers/certificate/csrsign.go b/pkg/addonmanager/controllers/certificate/csrsign.go index 07543ea83..30badd287 100644 --- a/pkg/addonmanager/controllers/certificate/csrsign.go +++ b/pkg/addonmanager/controllers/certificate/csrsign.go @@ -22,7 +22,7 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) // csrApprovingController auto approve the renewal CertificateSigningRequests for an accepted spoke cluster on the hub. diff --git a/pkg/addonmanager/controllers/cmaconfig/controller.go b/pkg/addonmanager/controllers/cmaconfig/controller.go index 55b877688..22072fa59 100644 --- a/pkg/addonmanager/controllers/cmaconfig/controller.go +++ b/pkg/addonmanager/controllers/cmaconfig/controller.go @@ -19,9 +19,9 @@ import ( addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" "open-cluster-management.io/sdk-go/pkg/patcher" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" "open-cluster-management.io/addon-framework/pkg/utils" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) const ( diff --git a/pkg/addonmanager/controllers/cmamanagedby/controller.go b/pkg/addonmanager/controllers/cmamanagedby/controller.go index a8c679094..abd459676 100644 --- a/pkg/addonmanager/controllers/cmamanagedby/controller.go +++ b/pkg/addonmanager/controllers/cmamanagedby/controller.go @@ -13,7 +13,7 @@ import ( "open-cluster-management.io/sdk-go/pkg/patcher" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) const ( diff --git a/pkg/addonmanager/controllers/registration/controller.go b/pkg/addonmanager/controllers/registration/controller.go index db9bef351..af60c33ac 100644 --- a/pkg/addonmanager/controllers/registration/controller.go +++ b/pkg/addonmanager/controllers/registration/controller.go @@ -19,8 +19,8 @@ import ( "open-cluster-management.io/sdk-go/pkg/patcher" "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/utils" + "open-cluster-management.io/sdk-go/pkg/basecontroller/factory" ) // addonRegistrationController reconciles instances of ManagedClusterAddon on the hub. diff --git a/pkg/certrotation/cabundle.go b/pkg/certrotation/cabundle.go deleted file mode 100644 index e4b7f92cf..000000000 --- a/pkg/certrotation/cabundle.go +++ /dev/null @@ -1,110 +0,0 @@ -package certrotation - -import ( - "context" - "crypto/x509" - "fmt" - "reflect" - - "github.com/openshift/library-go/pkg/crypto" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - corev1listers "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/util/cert" - - "open-cluster-management.io/addon-framework/pkg/utils" -) - -// CABundleRotation maintains a CA bundle config map, but adding new CA certs and removing expired old ones. -type CABundleRotation struct { - Namespace string - Name string - Lister corev1listers.ConfigMapLister - Client corev1client.ConfigMapsGetter -} - -func (c CABundleRotation) EnsureConfigMapCABundle(signingCertKeyPair *crypto.CA) ([]*x509.Certificate, error) { - // by this point we have current signing cert/key pair. We now need to make sure that the - // ca-bundle configmap has this cert and doesn't have any expired certs - originalCABundleConfigMap, err := c.Lister.ConfigMaps(c.Namespace).Get(c.Name) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - caBundleConfigMap := originalCABundleConfigMap.DeepCopy() - if apierrors.IsNotFound(err) { - // create an empty one - caBundleConfigMap = &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} - } - if _, err = manageCABundleConfigMap(caBundleConfigMap, signingCertKeyPair.Config.Certs[0]); err != nil { - return nil, err - } - if originalCABundleConfigMap == nil || - originalCABundleConfigMap.Data == nil || - !equality.Semantic.DeepEqual(originalCABundleConfigMap.Data, caBundleConfigMap.Data) { - actualCABundleConfigMap, _, err := utils.ApplyConfigMap(context.TODO(), c.Client, caBundleConfigMap) - if err != nil { - return nil, err - } - caBundleConfigMap = actualCABundleConfigMap - } - - caBundle := caBundleConfigMap.Data["ca-bundle.crt"] - if len(caBundle) == 0 { - return nil, fmt.Errorf("configmap/%s -n%s missing ca-bundle.crt", - caBundleConfigMap.Name, caBundleConfigMap.Namespace) - } - certificates, err := cert.ParseCertsPEM([]byte(caBundle)) - if err != nil { - return nil, err - } - - return certificates, nil -} - -// manageCABundleConfigMap adds the new certificate to the list of cabundles, eliminates duplicates, -// and prunes the list of expired certs to trust as signers -func manageCABundleConfigMap( - caBundleConfigMap *corev1.ConfigMap, currentSigner *x509.Certificate) ([]*x509.Certificate, error) { - if caBundleConfigMap.Data == nil { - caBundleConfigMap.Data = map[string]string{} - } - - certificates := []*x509.Certificate{} - caBundle := caBundleConfigMap.Data["ca-bundle.crt"] - if len(caBundle) > 0 { - var err error - certificates, err = cert.ParseCertsPEM([]byte(caBundle)) - if err != nil { - return nil, err - } - } - certificates = append([]*x509.Certificate{currentSigner}, certificates...) - certificates = crypto.FilterExpiredCerts(certificates...) - - finalCertificates := []*x509.Certificate{} - // now check for duplicates. n^2, but super simple - for i := range certificates { - found := false - for j := range finalCertificates { - if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) { - found = true - break - } - } - if !found { - finalCertificates = append(finalCertificates, certificates[i]) - } - } - - caBytes, err := crypto.EncodeCertificates(finalCertificates...) - if err != nil { - return nil, err - } - - caBundleConfigMap.Data["ca-bundle.crt"] = string(caBytes) - - return finalCertificates, nil -} diff --git a/pkg/certrotation/cabundle_test.go b/pkg/certrotation/cabundle_test.go deleted file mode 100644 index 11cd23c22..000000000 --- a/pkg/certrotation/cabundle_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package certrotation - -import ( - "crypto/x509" - "reflect" - "testing" - "time" - - "github.com/openshift/library-go/pkg/crypto" - corev1 "k8s.io/api/core/v1" -) - -func TestManageCABundleConfigMap(t *testing.T) { - caCert1, err := newCaCert("signer1", time.Hour*1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - caCert2, err := newCaCert("signer2", time.Hour*24) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - cases := []struct { - name string - caBundle []byte - existingCaCerts []*x509.Certificate - signerCert *x509.Certificate - expectErr bool - expectCaNum int - }{ - { - name: "invalid ca bundle", - caBundle: []byte("invalid data"), - expectErr: true, - }, - { - name: "without existing ca", - signerCert: caCert1, - expectCaNum: 1, - }, - { - name: "with existing ca", - signerCert: caCert1, - existingCaCerts: []*x509.Certificate{caCert2}, - expectCaNum: 2, - }, - { - name: "reduce duplicated", - signerCert: caCert1, - existingCaCerts: []*x509.Certificate{caCert1, caCert2}, - expectCaNum: 2, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - configmap := &corev1.ConfigMap{} - if len(c.caBundle) > 0 { - configmap.Data = map[string]string{ - "ca-bundle.crt": string(c.caBundle), - } - } else if len(c.existingCaCerts) > 0 { - caBytes, err := crypto.EncodeCertificates(c.existingCaCerts...) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - configmap.Data = map[string]string{ - "ca-bundle.crt": string(caBytes), - } - } - caCerts, err := manageCABundleConfigMap(configmap, c.signerCert) - switch { - case err != nil: - if !c.expectErr { - t.Fatalf("Expect no error, but got %v", err) - } - default: - if c.expectErr { - t.Fatalf("Expect an error") - } - - if len(caCerts) != c.expectCaNum { - t.Fatalf("Expect %d ca certs, but got %d", c.expectCaNum, len(caCerts)) - } - - if !reflect.DeepEqual(c.signerCert, caCerts[0]) { - t.Fatalf("Current signer cert should be put at the begining") - } - } - }) - } -} - -func newCaCert(signerName string, validity time.Duration) (*x509.Certificate, error) { - ca, err := crypto.MakeSelfSignedCAConfigForDuration(signerName, validity) - if err != nil { - return nil, err - } - - return ca.Certs[0], nil -} diff --git a/pkg/certrotation/signer.go b/pkg/certrotation/signer.go deleted file mode 100644 index 201b7d56d..000000000 --- a/pkg/certrotation/signer.go +++ /dev/null @@ -1,113 +0,0 @@ -package certrotation - -import ( - "bytes" - "context" - "fmt" - "time" - - "github.com/openshift/library-go/pkg/crypto" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - corev1listers "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/util/cert" - - "open-cluster-management.io/addon-framework/pkg/utils" -) - -// SigningRotation rotates a self-signed signing CA stored in a secret. It creates a new one when 80% -// of the lifetime of the old CA has passed. -type SigningRotation struct { - Namespace string - Name string - SignerNamePrefix string - Validity time.Duration - Lister corev1listers.SecretLister - Client corev1client.SecretsGetter -} - -func (c SigningRotation) EnsureSigningCertKeyPair() (*crypto.CA, error) { - originalSigningCertKeyPairSecret, err := c.Lister.Secrets(c.Namespace).Get(c.Name) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - signingCertKeyPairSecret := originalSigningCertKeyPairSecret.DeepCopy() - if apierrors.IsNotFound(err) { - // create an empty one - signingCertKeyPairSecret = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} - } - signingCertKeyPairSecret.Type = corev1.SecretTypeTLS - - if reason := needNewSigningCertKeyPair(signingCertKeyPairSecret); len(reason) > 0 { - if err := setSigningCertKeyPairSecret(signingCertKeyPairSecret, c.SignerNamePrefix, c.Validity); err != nil { - return nil, err - } - - actualSigningCertKeyPairSecret, _, err := utils.ApplySecret(context.TODO(), c.Client, signingCertKeyPairSecret) - if err != nil { - return nil, err - } - signingCertKeyPairSecret = actualSigningCertKeyPairSecret - } - // at this point, the secret has the correct signer, so we should read that signer to be able to sign - signingCertKeyPair, err := crypto.GetCAFromBytes(signingCertKeyPairSecret.Data["tls.crt"], signingCertKeyPairSecret.Data["tls.key"]) - if err != nil { - return nil, err - } - - return signingCertKeyPair, nil -} - -func needNewSigningCertKeyPair(secret *corev1.Secret) string { - certData := secret.Data["tls.crt"] - if len(certData) == 0 { - return "missing tls.crt" - } - - certificates, err := cert.ParseCertsPEM(certData) - if err != nil { - return "bad certificate" - } - if len(certificates) == 0 { - return "missing certificate" - } - - cert := certificates[0] - if time.Now().After(cert.NotAfter) { - return "already expired" - } - - maxWait := cert.NotAfter.Sub(cert.NotBefore) / 5 - latestTime := cert.NotAfter.Add(-maxWait) - now := time.Now() - if now.After(latestTime) { - return fmt.Sprintf("expired in %6.3f seconds", cert.NotAfter.Sub(now).Seconds()) - } - - return "" -} - -// setSigningCertKeyPairSecret creates a new signing cert/key pair and sets them in the secret -func setSigningCertKeyPairSecret(signingCertKeyPairSecret *corev1.Secret, signerNamePrefix string, validity time.Duration) error { - signerName := fmt.Sprintf("%s@%d", signerNamePrefix, time.Now().Unix()) - ca, err := crypto.MakeSelfSignedCAConfigForDuration(signerName, validity) - if err != nil { - return err - } - - certBytes := &bytes.Buffer{} - keyBytes := &bytes.Buffer{} - if err := ca.WriteCertConfig(certBytes, keyBytes); err != nil { - return err - } - - if signingCertKeyPairSecret.Data == nil { - signingCertKeyPairSecret.Data = map[string][]byte{} - } - signingCertKeyPairSecret.Data["tls.crt"] = certBytes.Bytes() - signingCertKeyPairSecret.Data["tls.key"] = keyBytes.Bytes() - - return nil -} diff --git a/pkg/certrotation/signer_test.go b/pkg/certrotation/signer_test.go deleted file mode 100644 index d006da6b8..000000000 --- a/pkg/certrotation/signer_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package certrotation - -import ( - "bytes" - "testing" - "time" - - "github.com/openshift/library-go/pkg/crypto" - corev1 "k8s.io/api/core/v1" -) - -func TestNeedNewSigningCertKeyPair(t *testing.T) { - certData1, keyData1, err := newSigningCertKeyPair("signer1", time.Hour*-1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - certData2, keyData2, err := newSigningCertKeyPair("signer2", time.Hour*1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - cases := []struct { - name string - secret *corev1.Secret - validateReason validateReasonFunc - }{ - { - name: "missing tls.crt", - secret: &corev1.Secret{}, - validateReason: expectReason("missing tls.crt"), - }, - { - name: "bad certificate", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": []byte("invalid data"), - }, - }, - validateReason: expectReason("bad certificate"), - }, - { - name: "expired", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData1, - "tls.key": keyData1, - }, - }, - validateReason: expectReason("already expired"), - }, - { - name: "no new cert needed", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData2, - "tls.key": keyData2, - }, - }, - validateReason: expectReason(""), - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - actual := needNewSigningCertKeyPair(c.secret) - c.validateReason(t, actual) - }) - } -} - -func newSigningCertKeyPair(signerName string, validity time.Duration) (certData, keyData []byte, err error) { - ca, err := crypto.MakeSelfSignedCAConfigForDuration(signerName, validity) - if err != nil { - return nil, nil, err - } - - certBytes := &bytes.Buffer{} - keyBytes := &bytes.Buffer{} - if err := ca.WriteCertConfig(certBytes, keyBytes); err != nil { - return nil, nil, err - } - - return certBytes.Bytes(), keyBytes.Bytes(), nil -} diff --git a/pkg/certrotation/target.go b/pkg/certrotation/target.go deleted file mode 100644 index 7d7211434..000000000 --- a/pkg/certrotation/target.go +++ /dev/null @@ -1,173 +0,0 @@ -package certrotation - -import ( - "context" - "crypto/x509" - "fmt" - "time" - - "github.com/openshift/library-go/pkg/certs" - "github.com/openshift/library-go/pkg/crypto" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - corev1listers "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/util/cert" - - "open-cluster-management.io/addon-framework/pkg/utils" -) - -// TargetRotation rotates a key and cert signed by a CA. It creates a new one when 80% -// of the lifetime of the old cert has passed, or the CA used to signed the old cert is -// gone from the CA bundle. -type TargetRotation struct { - Namespace string - Name string - Validity time.Duration - HostNames []string - Lister corev1listers.SecretLister - Client corev1client.SecretsGetter -} - -func (c TargetRotation) EnsureTargetCertKeyPair(signingCertKeyPair *crypto.CA, caBundleCerts []*x509.Certificate, - fns ...crypto.CertificateExtensionFunc) error { - originalTargetCertKeyPairSecret, err := c.Lister.Secrets(c.Namespace).Get(c.Name) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - targetCertKeyPairSecret := originalTargetCertKeyPairSecret.DeepCopy() - if apierrors.IsNotFound(err) { - // create an empty one - targetCertKeyPairSecret = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} - } - targetCertKeyPairSecret.Type = corev1.SecretTypeTLS - - reason := needNewTargetCertKeyPair(targetCertKeyPairSecret, caBundleCerts, c.HostNames) - if len(reason) == 0 { - return nil - } - - if err := c.setTargetCertKeyPairSecret( - targetCertKeyPairSecret, c.Validity, signingCertKeyPair, fns...); err != nil { - return err - } - - if _, _, err = utils.ApplySecret(context.TODO(), c.Client, targetCertKeyPairSecret); err != nil { - return err - } - return nil -} - -// needNewTargetCertKeyPair returns a reason for creating a new target cert/key pair. -// Return empty if a valid cert/key pair is in place and no need to rotate it yet. -// -// We create a new target cert/key pair if -// 1. no cert/key pair exits -// 2. or the cert expired (then we are also pretty late) -// 3. or we are over the renewal percentage of the validity -// 4. or the CA bundle doesn't contain a CA cert that matches exiting secret's common name. -// 5. or the CA bundle doesn't contain the parent CA cert of the exiting secret. -// 6. or the previously signed SANs in the CA bundle doesn't match expectation -func needNewTargetCertKeyPair(secret *corev1.Secret, caBundleCerts []*x509.Certificate, hostnames []string) string { - certData := secret.Data["tls.crt"] - if len(certData) == 0 { - return "missing tls.crt" - } - - certificates, err := cert.ParseCertsPEM(certData) - if err != nil { - return "bad certificate" - } - if len(certificates) == 0 { - return "missing certificate" - } - - cert := certificates[0] - if time.Now().After(cert.NotAfter) { - return "already expired" - } - - maxWait := cert.NotAfter.Sub(cert.NotBefore) / 5 - latestTime := cert.NotAfter.Add(-maxWait) - now := time.Now() - if now.After(latestTime) { - return fmt.Sprintf("expired in %6.3f seconds", cert.NotAfter.Sub(now).Seconds()) - } - - // check the signer common name against all the common names in our ca bundle so we don't refresh early - containsIssuer := false - for _, caCert := range caBundleCerts { - if cert.Issuer.CommonName != caCert.Subject.CommonName { - continue - } - if err := cert.CheckSignatureFrom(caCert); err != nil { - continue - } - containsIssuer = true - } - if !containsIssuer { - return fmt.Sprintf("issuer %q not in ca bundle:\n%s", - cert.Issuer.CommonName, certs.CertificateBundleToString(caBundleCerts)) - } - - expectedIPs, expectedHosts := crypto.IPAddressesDNSNames(hostnames) - currentNames := sets.NewString(cert.DNSNames...) - if !sets.NewString(expectedHosts...).Equal(currentNames) { - return fmt.Sprintf("issued hostnames mismatch in ca bundle: (current) %v, (expected) %v", - currentNames, expectedHosts) - } - currentIPs := sets.NewString() - for _, ip := range cert.IPAddresses { - currentIPs.Insert(ip.String()) - } - expectedStrIPs := sets.NewString() - for _, ip := range expectedIPs { - expectedStrIPs.Insert(ip.String()) - } - if !expectedStrIPs.Equal(currentIPs) { - return fmt.Sprintf("issued ip addresses mismatch in ca bundle: (current) %v, (expected) %v", - currentIPs, expectedIPs) - } - - return "" -} - -// setTargetCertKeyPairSecret creates a new cert/key pair and sets them in the secret. -func (c TargetRotation) setTargetCertKeyPairSecret(targetCertKeyPairSecret *corev1.Secret, validity time.Duration, - signer *crypto.CA, fns ...crypto.CertificateExtensionFunc) error { - if targetCertKeyPairSecret.Data == nil { - targetCertKeyPairSecret.Data = map[string][]byte{} - } - - // make sure that we don't specify something past our signer - targetValidity := validity - // TODO: When creating a certificate, crypto.MakeServerCertForDuration accetps validity as input parameter, - // It calls time.Now() as the current time to calculate NotBefore/NotAfter of new certificate, which might - // be little later than the returned value of time.Now() call in the line below to get remainingSignerValidity. - // 2 more seconds is added here as a buffer to make sure NotAfter of the new certificate does not past NotAfter - // of the signing certificate. We may need a better way to handle this. - remainingSignerValidity := signer.Config.Certs[0].NotAfter.Sub(time.Now().Add(time.Second * 2)) - if remainingSignerValidity < validity { - targetValidity = remainingSignerValidity - } - certKeyPair, err := c.NewCertificate(signer, targetValidity, fns...) - if err != nil { - return err - } - targetCertKeyPairSecret.Data["tls.crt"], targetCertKeyPairSecret.Data["tls.key"], err = certKeyPair.GetPEMBytes() - if err != nil { - return err - } - - return nil -} - -func (c TargetRotation) NewCertificate(signer *crypto.CA, validity time.Duration, - fns ...crypto.CertificateExtensionFunc) (*crypto.TLSCertificateConfig, error) { - if len(c.HostNames) == 0 { - return nil, fmt.Errorf("no hostnames set") - } - return signer.MakeServerCertForDuration(sets.NewString(c.HostNames...), validity, fns...) -} diff --git a/pkg/certrotation/target_test.go b/pkg/certrotation/target_test.go deleted file mode 100644 index 418c7ebb4..000000000 --- a/pkg/certrotation/target_test.go +++ /dev/null @@ -1,303 +0,0 @@ -package certrotation - -import ( - "bytes" - "crypto/x509" - "fmt" - "strings" - "testing" - "time" - - "github.com/openshift/library-go/pkg/crypto" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/cert" -) - -func TestSetTargetCertKeyPairSecret(t *testing.T) { - ca1, err := crypto.MakeSelfSignedCAConfigForDuration("signer1", time.Hour*24) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - cases := []struct { - name string - validity time.Duration - signer *crypto.CA - hostNames []string - targetSecret *corev1.Secret - expectErr bool - }{ - { - name: "no hostname", - validity: time.Hour * 1, - signer: &crypto.CA{ - SerialGenerator: &crypto.RandomSerialGenerator{}, - Config: ca1, - }, - targetSecret: &corev1.Secret{}, - expectErr: true, - }, - { - name: "create cert/key pair", - validity: time.Hour * 1, - signer: &crypto.CA{ - SerialGenerator: &crypto.RandomSerialGenerator{}, - Config: ca1, - }, - hostNames: []string{"service1.ns1.svc"}, - targetSecret: &corev1.Secret{}, - }, - { - name: "truncate validity", - validity: time.Hour * 100, - signer: &crypto.CA{ - SerialGenerator: &crypto.RandomSerialGenerator{}, - Config: ca1, - }, - hostNames: []string{"service1.ns1.svc"}, - targetSecret: &corev1.Secret{}, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - tr := &TargetRotation{ - HostNames: c.hostNames, - } - err = tr.setTargetCertKeyPairSecret(c.targetSecret, c.validity, c.signer) - - switch { - case err != nil: - if !c.expectErr { - t.Fatalf("Expect no error, but got %v", err) - } - default: - if c.expectErr { - t.Fatalf("Expect an error") - } - - certData := c.targetSecret.Data["tls.crt"] - if len(certData) == 0 { - t.Fatalf("missing tls.crt") - } - - certificates, err := cert.ParseCertsPEM(certData) - if err != nil { - t.Fatalf("bad certificate") - } - if len(certificates) == 0 { - t.Fatalf("missing certificate") - } - - cert := certificates[0] - if cert.NotAfter.After(c.signer.Config.Certs[0].NotAfter) { - t.Fatalf("NotAfter of cert should not over NotAfter of ca") - } - } - }) - } -} - -type validateReasonFunc func(t *testing.T, actualReason string) - -func TestNeedNewTargetCertKeyPair(t *testing.T) { - correctHostname := "service1.ns1.svc" - certData1, keyData1, caData1, _, err := newServingCertKeyPair(correctHostname, "signer1", time.Hour*-1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - certData2, keyData2, caData2, signer2, err := newServingCertKeyPair(correctHostname, "signer2", time.Hour*1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - _, _, caData3, _, err := newServingCertKeyPair(correctHostname, "signer2", time.Hour*1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - wrongHostname := "service2.ns1.svc" - certData4, keyData4, _, err := newServingCertKeyPairWithSigner(signer2, wrongHostname, "signer2", time.Hour*1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - caCert1, err := cert.ParseCertsPEM(caData1) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - caCert2, err := cert.ParseCertsPEM(caData2) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - caCert3, err := cert.ParseCertsPEM(caData3) - if err != nil { - t.Fatalf("Expected no error, but got: %v", err) - } - - cases := []struct { - name string - secret *corev1.Secret - caBundle []*x509.Certificate - validateReason validateReasonFunc - }{ - { - name: "missing tls.crt", - secret: &corev1.Secret{}, - validateReason: expectReason("missing tls.crt"), - }, - { - name: "bad certificate", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": []byte("invalid data"), - }, - }, - validateReason: expectReason("bad certificate"), - }, - { - name: "expired", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData1, - "tls.key": keyData1, - }, - }, - validateReason: expectReason("already expired"), - }, - { - name: "no new cert needed", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData2, - "tls.key": keyData2, - }, - }, - caBundle: caCert2, - validateReason: expectReason(""), - }, - { - name: "no new cert needed - mixed 1", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData2, - "tls.key": keyData2, - }, - }, - caBundle: append(caCert1, caCert2...), - validateReason: expectReason(""), - }, - { - name: "no new cert needed - mixed 2", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData2, - "tls.key": keyData2, - }, - }, - caBundle: append(caCert2, caCert3...), - validateReason: expectReason(""), - }, - { - name: "no issuer found - empty", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData2, - "tls.key": keyData2, - }, - }, - validateReason: startsWith(fmt.Sprintf("issuer %q not in ca bundle:\n", "signer2")), - }, - { - name: "no issuer found - cn mismatch", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData2, - "tls.key": keyData2, - }, - }, - caBundle: caCert1, - validateReason: startsWith(fmt.Sprintf("issuer %q not in ca bundle:\n", "signer2")), - }, - { - name: "no issuer found - parent mismatch", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData2, - "tls.key": keyData2, - }, - }, - caBundle: caCert3, - validateReason: startsWith(fmt.Sprintf("issuer %q not in ca bundle:\n", "signer2")), - }, - { - name: "issued SAN mismatch", - secret: &corev1.Secret{ - Data: map[string][]byte{ - "tls.crt": certData4, - "tls.key": keyData4, - }, - }, - caBundle: caCert2, - validateReason: startsWith("issued hostnames mismatch in ca bundle:"), - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - actual := needNewTargetCertKeyPair(c.secret, c.caBundle, []string{correctHostname}) - c.validateReason(t, actual) - }) - } -} - -func newServingCertKeyPair(hostName, signerName string, validity time.Duration) (certData, keyData, caData []byte, signer *crypto.CA, err error) { - ca, err := crypto.MakeSelfSignedCAConfigForDuration(signerName, validity) - if err != nil { - return nil, nil, nil, nil, err - } - signer = &crypto.CA{ - SerialGenerator: &crypto.RandomSerialGenerator{}, - Config: ca, - } - certData, keyData, caData, err = newServingCertKeyPairWithSigner(signer, hostName, signerName, validity) - if err != nil { - return nil, nil, nil, nil, err - } - return certData, keyData, caData, signer, nil -} -func newServingCertKeyPairWithSigner(signer *crypto.CA, hostName, signerName string, validity time.Duration) (certData, keyData, caData []byte, err error) { - caCertBytes := &bytes.Buffer{} - caKeyBytes := &bytes.Buffer{} - if err := signer.Config.WriteCertConfig(caCertBytes, caKeyBytes); err != nil { - return nil, nil, nil, err - } - - certKeyPair, err := signer.MakeServerCertForDuration(sets.NewString(hostName), validity) - if err != nil { - return nil, nil, nil, err - } - certData, keyData, err = certKeyPair.GetPEMBytes() - if err != nil { - return nil, nil, nil, err - } - - return certData, keyData, caCertBytes.Bytes(), nil -} - -func expectReason(expectedReason string) validateReasonFunc { - return func(t *testing.T, actualReason string) { - if actualReason != expectedReason { - t.Fatalf("Expect %q, but got %q", expectedReason, actualReason) - } - } -} - -func startsWith(suffix string) validateReasonFunc { - return func(t *testing.T, actualReason string) { - if !strings.HasPrefix(actualReason, suffix) { - t.Fatalf("Expect reason start with %q, but got %q", suffix, actualReason) - } - } -} diff --git a/test/integration/cloudevents/agent_deploy_test.go b/test/integration/cloudevents/agent_deploy_test.go index cc7d5f634..109a8741a 100644 --- a/test/integration/cloudevents/agent_deploy_test.go +++ b/test/integration/cloudevents/agent_deploy_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "time" jsonpatch "github.com/evanphx/json-patch" @@ -156,6 +157,7 @@ var _ = ginkgo.Describe("Agent deploy", func() { var agentWorkClient workv1client.ManifestWorkInterface var agentWorkInformer workv1informers.ManifestWorkInformer var agentWorkLister workv1listers.ManifestWorkNamespaceLister + var clientHolder *work.ClientHolder ginkgo.BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) @@ -184,9 +186,9 @@ var _ = ginkgo.Describe("Agent deploy", func() { // start work agent for the managed cluster ginkgo.By("Start agent for managed cluster") - clientHolder, err := startWorkAgent(ctx, managedClusterName) + clientHolder, agentWorkInformer, err = startWorkAgent(ctx, managedClusterName) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - agentWorkInformer = clientHolder.ManifestWorkInformer() + agentWorkLister = agentWorkInformer.Lister().ManifestWorks(managedClusterName) agentWorkClient = clientHolder.ManifestWorks(managedClusterName) }) diff --git a/test/integration/cloudevents/agent_hook_deploy_test.go b/test/integration/cloudevents/agent_hook_deploy_test.go index 6ff707e92..17eaa8862 100644 --- a/test/integration/cloudevents/agent_hook_deploy_test.go +++ b/test/integration/cloudevents/agent_hook_deploy_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "time" jsonpatch "github.com/evanphx/json-patch" @@ -86,6 +87,7 @@ var _ = ginkgo.Describe("Agent hook deploy", func() { var agentWorkClient workv1client.ManifestWorkInterface var agentWorkInformer workv1informers.ManifestWorkInformer var agentWorkLister workv1listers.ManifestWorkNamespaceLister + var clientHolder *work.ClientHolder ginkgo.BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) @@ -114,9 +116,8 @@ var _ = ginkgo.Describe("Agent hook deploy", func() { // start work agent for the managed cluster ginkgo.By("Start agent for managed cluster") - clientHolder, err := startWorkAgent(ctx, managedClusterName) + clientHolder, agentWorkInformer, err = startWorkAgent(ctx, managedClusterName) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - agentWorkInformer = clientHolder.ManifestWorkInformer() agentWorkLister = agentWorkInformer.Lister().ManifestWorks(managedClusterName) agentWorkClient = clientHolder.ManifestWorks(managedClusterName) }) diff --git a/test/integration/cloudevents/agent_hosting_deploy_test.go b/test/integration/cloudevents/agent_hosting_deploy_test.go index 1e8d3f074..712d3a252 100644 --- a/test/integration/cloudevents/agent_hosting_deploy_test.go +++ b/test/integration/cloudevents/agent_hosting_deploy_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "time" jsonpatch "github.com/evanphx/json-patch" @@ -82,6 +83,7 @@ var _ = ginkgo.Describe("Agent deploy", func() { var agentWorkClient workv1client.ManifestWorkInterface var agentWorkInformer workv1informers.ManifestWorkInformer var agentWorkLister workv1listers.ManifestWorkNamespaceLister + var clientHolder *work.ClientHolder ginkgo.BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) @@ -128,9 +130,8 @@ var _ = ginkgo.Describe("Agent deploy", func() { // start work agent for the hosting cluster ginkgo.By("Start agent for hosting cluster") - clientHolder, err := startWorkAgent(ctx, hostingClusterName) + clientHolder, agentWorkInformer, err = startWorkAgent(ctx, hostingClusterName) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - agentWorkInformer = clientHolder.ManifestWorkInformer() agentWorkLister = agentWorkInformer.Lister().ManifestWorks(hostingClusterName) agentWorkClient = clientHolder.ManifestWorks(hostingClusterName) }) diff --git a/test/integration/cloudevents/agent_hosting_hook_deploy_test.go b/test/integration/cloudevents/agent_hosting_hook_deploy_test.go index db119a0d5..4376736c8 100644 --- a/test/integration/cloudevents/agent_hosting_hook_deploy_test.go +++ b/test/integration/cloudevents/agent_hosting_hook_deploy_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "time" jsonpatch "github.com/evanphx/json-patch" @@ -87,6 +88,7 @@ var _ = ginkgo.Describe("Agent hook deploy", func() { var agentWorkClient workv1client.ManifestWorkInterface var agentWorkInformer workv1informers.ManifestWorkInformer var agentWorkLister workv1listers.ManifestWorkNamespaceLister + var clientHolder *work.ClientHolder ginkgo.BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) @@ -133,9 +135,8 @@ var _ = ginkgo.Describe("Agent hook deploy", func() { // start work agent for the hosting cluster ginkgo.By("Start agent for hosting cluster") - clientHolder, err := startWorkAgent(ctx, hostingClusterName) + clientHolder, agentWorkInformer, err = startWorkAgent(ctx, hostingClusterName) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - agentWorkInformer = clientHolder.ManifestWorkInformer() agentWorkLister = agentWorkInformer.Lister().ManifestWorks(hostingClusterName) agentWorkClient = clientHolder.ManifestWorks(hostingClusterName) }) diff --git a/test/integration/cloudevents/suite_test.go b/test/integration/cloudevents/suite_test.go index 97a103dfb..01a08d679 100644 --- a/test/integration/cloudevents/suite_test.go +++ b/test/integration/cloudevents/suite_test.go @@ -34,7 +34,10 @@ import ( addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" clusterv1 "open-cluster-management.io/api/cluster/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" ) const ( @@ -250,29 +253,44 @@ func newClusterManagementAddon(name string) *addonapiv1alpha1.ClusterManagementA } } -func startWorkAgent(ctx context.Context, clusterName string) (*work.ClientHolder, error) { +func startWorkAgent(ctx context.Context, clusterName string) (*work.ClientHolder, workv1informers.ManifestWorkInformer, error) { config := &mqtt.MQTTOptions{ - KeepAlive: 60, - PubQoS: 1, - SubQoS: 1, - DialTimeout: 5 * time.Second, - BrokerHost: mqttBrokerHost, + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + Dialer: &mqtt.MQTTDialer{ + Timeout: 5 * time.Second, + BrokerHost: mqttBrokerHost, + }, + Topics: types.Topics{ SourceEvents: fmt.Sprintf("sources/%s/clusters/%s/sourceevents", sourceID, clusterName), AgentEvents: fmt.Sprintf("sources/%s/clusters/%s/agentevents", sourceID, clusterName), AgentBroadcast: fmt.Sprintf("clusters/%s/agentbroadcast", clusterName), }, } + watcherStore := store.NewAgentInformerWatcherStore() + clientHolder, err := work.NewClientHolderBuilder(config). WithClientID(clusterName). WithClusterName(clusterName). WithCodecs(codec.NewManifestBundleCodec()). + WithWorkClientWatcherStore(watcherStore). NewAgentClientHolder(ctx) if err != nil { - return nil, err + return nil, nil, err + } + + workClient := clientHolder.WorkInterface() + factory := workinformers.NewSharedInformerFactoryWithOptions(workClient, 30*time.Minute) + workInformers := factory.Work().V1().ManifestWorks() + + // For cloudevents work client, we use the informer store as the client store + if watcherStore != nil { + watcherStore.SetStore(workInformers.Informer().GetStore()) } - go clientHolder.ManifestWorkInformer().Informer().Run(ctx.Done()) + go workInformers.Informer().Run(ctx.Done()) - return clientHolder, nil + return clientHolder, workInformers, nil } diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/message.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/message.go new file mode 100644 index 000000000..164879a11 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/message.go @@ -0,0 +1,156 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka_confluent + +import ( + "bytes" + "context" + "strconv" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/confluentinc/confluent-kafka-go/v2/kafka" +) + +const ( + prefix = "ce-" + contentTypeKey = "content-type" +) + +const ( + KafkaOffsetKey = "kafkaoffset" + KafkaPartitionKey = "kafkapartition" + KafkaTopicKey = "kafkatopic" + KafkaMessageKey = "kafkamessagekey" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a Kafka message. +// This message *can* be read several times safely +type Message struct { + internal *kafka.Message + properties map[string][]byte + format format.Format + version spec.Version +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +// NewMessage returns a binding.Message that holds the provided kafka.Message. +// The returned binding.Message *can* be read several times safely +// This function *doesn't* guarantee that the returned binding.Message is always a kafka_sarama.Message instance +func NewMessage(msg *kafka.Message) *Message { + if msg == nil { + panic("the kafka.Message shouldn't be nil") + } + if msg.TopicPartition.Topic == nil { + panic("the topic of kafka.Message shouldn't be nil") + } + if msg.TopicPartition.Partition < 0 || msg.TopicPartition.Offset < 0 { + panic("the partition or offset of the kafka.Message must be non-negative") + } + + var contentType, contentVersion string + properties := make(map[string][]byte, len(msg.Headers)+3) + for _, header := range msg.Headers { + k := strings.ToLower(string(header.Key)) + if k == strings.ToLower(contentTypeKey) { + contentType = string(header.Value) + } + if k == specs.PrefixedSpecVersionName() { + contentVersion = string(header.Value) + } + properties[k] = header.Value + } + + // add the kafka message key, topic, partition and partition key to the properties + properties[prefix+KafkaOffsetKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Offset), 10)) + properties[prefix+KafkaPartitionKey] = []byte(strconv.FormatInt(int64(msg.TopicPartition.Partition), 10)) + properties[prefix+KafkaTopicKey] = []byte(*msg.TopicPartition.Topic) + if msg.Key != nil { + properties[prefix+KafkaMessageKey] = msg.Key + } + + message := &Message{ + internal: msg, + properties: properties, + } + if ft := format.Lookup(contentType); ft != nil { + message.format = ft + } else if v := specs.Version(contentVersion); v != nil { + message.version = v + } + + return message +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format != nil { + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.Value)) + } + return binding.ErrNotStructured +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error { + if m.version == nil { + return binding.ErrNotBinary + } + + var err error + for k, v := range m.properties { + if strings.HasPrefix(k, prefix) { + attr := m.version.Attribute(k) + if attr != nil { + err = encoder.SetAttribute(attr, string(v)) + } else { + err = encoder.SetExtension(strings.TrimPrefix(k, prefix), string(v)) + } + } else if k == strings.ToLower(contentTypeKey) { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), string(v)) + } + if err != nil { + return err + } + } + + if m.internal.Value != nil { + err = encoder.SetData(bytes.NewBuffer(m.internal.Value)) + } + return err +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr == nil { + return nil, nil + } + return attr, m.properties[attr.PrefixedName()] +} + +func (m *Message) GetExtension(name string) interface{} { + return m.properties[prefix+name] +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/option.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/option.go new file mode 100644 index 000000000..7eef74007 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/option.go @@ -0,0 +1,151 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka_confluent + +import ( + "context" + "errors" + + "github.com/confluentinc/confluent-kafka-go/v2/kafka" +) + +// Option is the function signature required to be considered an kafka_confluent.Option. +type Option func(*Protocol) error + +// WithConfigMap sets the configMap to init the kafka client. +func WithConfigMap(config *kafka.ConfigMap) Option { + return func(p *Protocol) error { + if config == nil { + return errors.New("the kafka.ConfigMap option must not be nil") + } + p.kafkaConfigMap = config + return nil + } +} + +// WithSenderTopic sets the defaultTopic for the kafka.Producer. +func WithSenderTopic(defaultTopic string) Option { + return func(p *Protocol) error { + if defaultTopic == "" { + return errors.New("the producer topic option must not be nil") + } + p.producerDefaultTopic = defaultTopic + return nil + } +} + +// WithReceiverTopics sets the topics for the kafka.Consumer. +func WithReceiverTopics(topics []string) Option { + return func(p *Protocol) error { + if topics == nil { + return errors.New("the consumer topics option must not be nil") + } + p.consumerTopics = topics + return nil + } +} + +// WithRebalanceCallBack sets the callback for rebalancing of the consumer group. +func WithRebalanceCallBack(rebalanceCb kafka.RebalanceCb) Option { + return func(p *Protocol) error { + if rebalanceCb == nil { + return errors.New("the consumer group rebalance callback must not be nil") + } + p.consumerRebalanceCb = rebalanceCb + return nil + } +} + +// WithPollTimeout sets timeout of the consumer polling for message or events, return nil on timeout. +func WithPollTimeout(timeoutMs int) Option { + return func(p *Protocol) error { + p.consumerPollTimeout = timeoutMs + return nil + } +} + +// WithSender set a kafka.Producer instance to init the client directly. +func WithSender(producer *kafka.Producer) Option { + return func(p *Protocol) error { + if producer == nil { + return errors.New("the producer option must not be nil") + } + p.producer = producer + return nil + } +} + +// WithErrorHandler provide a func on how to handle the kafka.Error which the kafka.Consumer has polled. +func WithErrorHandler(handler func(ctx context.Context, err kafka.Error)) Option { + return func(p *Protocol) error { + p.consumerErrorHandler = handler + return nil + } +} + +// WithReceiver set a kafka.Consumer instance to init the client directly. +func WithReceiver(consumer *kafka.Consumer) Option { + return func(p *Protocol) error { + if consumer == nil { + return errors.New("the consumer option must not be nil") + } + p.consumer = consumer + return nil + } +} + +// Opaque key type used to store topicPartitionOffsets: assign them from ctx. +type topicPartitionOffsetsType struct{} + +var offsetKey = topicPartitionOffsetsType{} + +// WithTopicPartitionOffsets will set the positions where the consumer starts consuming from. +func WithTopicPartitionOffsets(ctx context.Context, topicPartitionOffsets []kafka.TopicPartition) context.Context { + if len(topicPartitionOffsets) == 0 { + panic("the topicPartitionOffsets cannot be empty") + } + for _, offset := range topicPartitionOffsets { + if offset.Topic == nil || *(offset.Topic) == "" { + panic("the kafka topic cannot be nil or empty") + } + if offset.Partition < 0 || offset.Offset < 0 { + panic("the kafka partition/offset must be non-negative") + } + } + return context.WithValue(ctx, offsetKey, topicPartitionOffsets) +} + +// TopicPartitionOffsetsFrom looks in the given context and returns []kafka.TopicPartition or nil if not set +func TopicPartitionOffsetsFrom(ctx context.Context) []kafka.TopicPartition { + c := ctx.Value(offsetKey) + if c != nil { + if s, ok := c.([]kafka.TopicPartition); ok { + return s + } + } + return nil +} + +// Opaque key type used to store message key +type messageKeyType struct{} + +var keyForMessageKey = messageKeyType{} + +// WithMessageKey returns back a new context with the given messageKey. +func WithMessageKey(ctx context.Context, messageKey string) context.Context { + return context.WithValue(ctx, keyForMessageKey, messageKey) +} + +// MessageKeyFrom looks in the given context and returns `messageKey` as a string if found and valid, otherwise "". +func MessageKeyFrom(ctx context.Context) string { + c := ctx.Value(keyForMessageKey) + if c != nil { + if s, ok := c.(string); ok { + return s + } + } + return "" +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/protocol.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/protocol.go new file mode 100644 index 000000000..c527f1061 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/protocol.go @@ -0,0 +1,247 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka_confluent + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/confluentinc/confluent-kafka-go/v2/kafka" + + cecontext "github.com/cloudevents/sdk-go/v2/context" +) + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +type Protocol struct { + kafkaConfigMap *kafka.ConfigMap + + consumer *kafka.Consumer + consumerTopics []string + consumerRebalanceCb kafka.RebalanceCb // optional + consumerPollTimeout int // optional + consumerErrorHandler func(ctx context.Context, err kafka.Error) // optional + consumerMux sync.Mutex + consumerIncoming chan *kafka.Message + consumerCtx context.Context + consumerCancel context.CancelFunc + + producer *kafka.Producer + producerDefaultTopic string // optional + + closerMux sync.Mutex +} + +func New(opts ...Option) (*Protocol, error) { + p := &Protocol{ + consumerPollTimeout: 100, + consumerIncoming: make(chan *kafka.Message), + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.kafkaConfigMap != nil { + if p.consumerTopics != nil && p.consumer == nil { + consumer, err := kafka.NewConsumer(p.kafkaConfigMap) + if err != nil { + return nil, err + } + p.consumer = consumer + } + if p.producerDefaultTopic != "" && p.producer == nil { + producer, err := kafka.NewProducer(p.kafkaConfigMap) + if err != nil { + return nil, err + } + p.producer = producer + } + if p.producer == nil && p.consumer == nil { + return nil, errors.New("at least receiver or sender topic must be set") + } + } + if p.producerDefaultTopic != "" && p.producer == nil { + return nil, fmt.Errorf("at least configmap or producer must be set for the sender topic: %s", p.producerDefaultTopic) + } + + if len(p.consumerTopics) > 0 && p.consumer == nil { + return nil, fmt.Errorf("at least configmap or consumer must be set for the receiver topics: %s", p.consumerTopics) + } + + if p.kafkaConfigMap == nil && p.producer == nil && p.consumer == nil { + return nil, errors.New("at least one of the following to initialize the protocol must be set: config, producer, or consumer") + } + return p, nil +} + +// Events returns the events channel used by Confluent Kafka to deliver the result from a produce, i.e., send, operation. +// When using this SDK to produce (send) messages, this channel must be monitored to avoid resource leaks and this channel becoming full. See Confluent SDK for Go for details on the implementation. +func (p *Protocol) Events() (chan kafka.Event, error) { + if p.producer == nil { + return nil, errors.New("producer not set") + } + return p.producer.Events(), nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +// Send message by kafka.Producer. You must monitor the Events() channel when using this function. +func (p *Protocol) Send(ctx context.Context, in binding.Message, transformers ...binding.Transformer) (err error) { + if p.producer == nil { + return errors.New("producer client must be set") + } + + p.closerMux.Lock() + defer p.closerMux.Unlock() + if p.producer.IsClosed() { + return errors.New("producer is closed") + } + + defer in.Finish(err) + + kafkaMsg := &kafka.Message{ + TopicPartition: kafka.TopicPartition{ + Topic: &p.producerDefaultTopic, + Partition: kafka.PartitionAny, + }, + } + + if topic := cecontext.TopicFrom(ctx); topic != "" { + kafkaMsg.TopicPartition.Topic = &topic + } + + if messageKey := MessageKeyFrom(ctx); messageKey != "" { + kafkaMsg.Key = []byte(messageKey) + } + + if err = WriteProducerMessage(ctx, in, kafkaMsg, transformers...); err != nil { + return fmt.Errorf("create producer message: %w", err) + } + + if err = p.producer.Produce(kafkaMsg, nil); err != nil { + return fmt.Errorf("produce message: %w", err) + } + return nil +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.consumer == nil { + return errors.New("the consumer client must be set") + } + if p.consumerTopics == nil { + return errors.New("the consumer topics must be set") + } + + p.consumerMux.Lock() + defer p.consumerMux.Unlock() + logger := cecontext.LoggerFrom(ctx) + + // Query committed offsets for each partition + if positions := TopicPartitionOffsetsFrom(ctx); positions != nil { + if err := p.consumer.Assign(positions); err != nil { + return err + } + } + + logger.Infof("Subscribing to topics: %v", p.consumerTopics) + err := p.consumer.SubscribeTopics(p.consumerTopics, p.consumerRebalanceCb) + if err != nil { + return err + } + + p.closerMux.Lock() + p.consumerCtx, p.consumerCancel = context.WithCancel(ctx) + defer p.consumerCancel() + p.closerMux.Unlock() + + defer func() { + if !p.consumer.IsClosed() { + logger.Infof("Closing consumer %v", p.consumerTopics) + if err = p.consumer.Close(); err != nil { + logger.Errorf("failed to close the consumer: %v", err) + } + } + close(p.consumerIncoming) + }() + + for { + select { + case <-p.consumerCtx.Done(): + return p.consumerCtx.Err() + default: + ev := p.consumer.Poll(p.consumerPollTimeout) + if ev == nil { + continue + } + switch e := ev.(type) { + case *kafka.Message: + p.consumerIncoming <- e + case kafka.Error: + // Errors should generally be considered informational, the client will try to automatically recover. + // But in here, we choose to terminate the application if all brokers are down. + logger.Infof("Error %v: %v", e.Code(), e) + if p.consumerErrorHandler != nil { + p.consumerErrorHandler(ctx, e) + } + if e.Code() == kafka.ErrAllBrokersDown { + logger.Error("All broker connections are down") + return e + } + } + } + } +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.consumerIncoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +// Close cleans up resources after use. Must be called to properly close underlying Kafka resources and avoid resource leaks +func (p *Protocol) Close(ctx context.Context) error { + p.closerMux.Lock() + defer p.closerMux.Unlock() + logger := cecontext.LoggerFrom(ctx) + + if p.consumerCancel != nil { + p.consumerCancel() + } + + if p.producer != nil && !p.producer.IsClosed() { + // Flush and close the producer with a 10 seconds timeout (closes Events channel) + for p.producer.Flush(10000) > 0 { + logger.Info("Flushing outstanding messages") + } + p.producer.Close() + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/write_producer_message.go b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/write_producer_message.go new file mode 100644 index 000000000..e640cbded --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2/write_producer_message.go @@ -0,0 +1,125 @@ +/* +Copyright 2023 The CloudEvents Authors +SPDX-License-Identifier: Apache-2.0 +*/ + +package kafka_confluent + +import ( + "bytes" + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "github.com/confluentinc/confluent-kafka-go/v2/kafka" +) + +// extends the kafka.Message to support the interfaces for the converting it to binding.Message +type kafkaMessageWriter kafka.Message + +var ( + _ binding.StructuredWriter = (*kafkaMessageWriter)(nil) + _ binding.BinaryWriter = (*kafkaMessageWriter)(nil) +) + +// WriteProducerMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteProducerMessage(ctx context.Context, in binding.Message, kafkaMsg *kafka.Message, + transformers ...binding.Transformer, +) error { + structuredWriter := (*kafkaMessageWriter)(kafkaMsg) + binaryWriter := (*kafkaMessageWriter)(kafkaMsg) + + _, err := binding.Write( + ctx, + in, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +func (b *kafkaMessageWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + b.Headers = []kafka.Header{{ + Key: contentTypeKey, + Value: []byte(f.MediaType()), + }} + + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + + b.Value = buf.Bytes() + return nil +} + +func (b *kafkaMessageWriter) Start(ctx context.Context) error { + b.Headers = []kafka.Header{} + return nil +} + +func (b *kafkaMessageWriter) End(ctx context.Context) error { + return nil +} + +func (b *kafkaMessageWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + b.Value = buf.Bytes() + return nil +} + +func (b *kafkaMessageWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + if attribute.Kind() == spec.DataContentType { + if value == nil { + b.removeProperty(contentTypeKey) + return nil + } + b.addProperty(contentTypeKey, value) + } else { + key := prefix + attribute.Name() + if value == nil { + b.removeProperty(key) + return nil + } + b.addProperty(key, value) + } + return nil +} + +func (b *kafkaMessageWriter) SetExtension(name string, value interface{}) error { + if value == nil { + b.removeProperty(prefix + name) + } + return b.addProperty(prefix+name, value) +} + +func (b *kafkaMessageWriter) removeProperty(key string) { + for i, v := range b.Headers { + if v.Key == key { + b.Headers = append(b.Headers[:i], b.Headers[i+1:]...) + break + } + } +} + +func (b *kafkaMessageWriter) addProperty(key string, value interface{}) error { + s, err := types.Format(value) + if err != nil { + return err + } + b.Headers = append(b.Headers, kafka.Header{Key: key, Value: []byte(s)}) + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/LICENSE b/vendor/github.com/confluentinc/confluent-kafka-go/v2/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/.gitignore b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/.gitignore new file mode 100644 index 000000000..b1a211198 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/.gitignore @@ -0,0 +1,2 @@ +testconf.json +go_rdkafka_generr/go_rdkafka_generr diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go new file mode 100644 index 000000000..670b2b42c --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go @@ -0,0 +1,58 @@ +/** + * Copyright 2016-2019 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" +) + +/* +#include "select_rdkafka.h" + +//Minimum required librdkafka version. This is checked both during +//build-time and runtime. +//Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error +//defines and strings in sync. +// + +#define MIN_RD_KAFKA_VERSION 0x02030000 + +#ifdef __APPLE__ +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#else +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#endif + +#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION +#ifdef __APPLE__ +#error "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#else +#error "confluent-kafka-go requires librdkafka v2.3.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#endif +#endif +*/ +import "C" + +func versionCheck() error { + ver, verstr := LibraryVersion() + if ver < C.MIN_RD_KAFKA_VERSION { + return newErrorFromString(ErrNotImplemented, + fmt.Sprintf("%s: librdkafka version %s (0x%x) detected", + C.MIN_VER_ERRSTR, verstr, ver)) + } + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md new file mode 100644 index 000000000..9924630a8 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md @@ -0,0 +1,159 @@ +# Information for confluent-kafka-go developers + +## Development process + +1. Use go1.19 (and related tooling) for development on confluent-kafka-go. +2. Make sure to run `gofmt` and `go vet` on your code. +3. While there is no hard-limit, try to keep your line length under 80 + characters. +3. [Test](#testing) your changes and create a PR. + + +NOTE: Whenever librdkafka error codes are updated make sure to run generate +before building: + +``` + $ make -f mk/Makefile generr + $ go build ./... +``` + + + +## Testing + +Some of the tests included in this directory, the benchmark and integration tests in particular, +require an existing Kafka cluster and a testconf.json configuration file to +provide tests with bootstrap brokers, topic name, etc. + +The format of testconf.json is a JSON object: +``` +{ + "Brokers": "", + "Topic": "" +} +``` + +See testconf-example.json for an example and full set of available options. + + +To run unit-tests: +``` +$ go test +``` + +To run benchmark tests: +``` +$ go test -bench . +``` + +For the code coverage: +``` +$ go test -coverprofile=coverage.out -bench=. +$ go tool cover -func=coverage.out +``` + + +## Build tags + +Different build types are supported through Go build tags (`-tags ..`), +these tags should be specified on the **application** build/get/install command. + + * By default the bundled platform-specific static build of librdkafka will + be used. This works out of the box on Mac OSX and glibc-based Linux distros, + such as Ubuntu and CentOS. + * `-tags musl` - must be specified when building on/for musl-based Linux + distros, such as Alpine. Will use the bundled static musl build of + librdkafka. + * `-tags dynamic` - link librdkafka dynamically. A shared librdkafka library + must be installed manually through other means (apt-get, yum, build from + source, etc). + + + +## Release process + +For each release candidate and final release, perform the following steps: + +### Review the CHANGELOG + +### Update bundle to latest librdkafka + +See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md). + + +### Update librdkafka version requirement + +Update the minimum required librdkafka version in `kafka/00version.go` +and `README.md` and the version in `examples/go.mod` and `mk/doc-gen.py`. + +### Update error codes + +Error codes can be automatically generated from the current librdkafka version. + + +Update generated error codes: + + $ make -f mk/Makefile generr + # Verify by building + + +## Generating HTML documentation + +To generate one-page HTML documentation run the mk/doc-gen.py script from the +top-level directory. This script requires the beautifulsoup4 Python package. + +``` +$ source .../your/virtualenv/bin/activate +$ pip install beautifulsoup4 +... +$ make -f mk/Makefile docs +``` + + +### Rebuild everything + + $ go clean -i ./... + $ go build ./... + + +### Run full test suite + +Set up a test cluster using whatever mechanism you typically use +(docker, trivup, ccloud, ..). + +Make sure to update `kafka/testconf.json` as needed (broker list, $BROKERS) + +Run test suite: + + $ go test ./... + + +### Verify examples + +Manually verify that the examples/ applications work. + +Also make sure the examples in README.md work. + +### Commit any changes + +Make sure to push to github before creating the tag to have CI tests pass. + + +### Create and push tag + + $ git tag v1.3.0 + $ git push --dry-run origin v1.3.0 + # Remove --dry-run and re-execute if it looks ok. + + +### Create release notes page on github + +### Update version in Confluent docs + +Put the new version in settings.sh of these two repos + +https://github.com/confluentinc/docs + +https://github.com/confluentinc/docs-platform + +### Don't forget tweeting it! diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go new file mode 100644 index 000000000..07fa5617e --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go @@ -0,0 +1,3463 @@ +/** + * Copyright 2018 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "time" + "unsafe" +) + +/* +#include "select_rdkafka.h" +#include + +static const rd_kafka_group_result_t * +group_result_by_idx (const rd_kafka_group_result_t **groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return groups[idx]; +} + +static const rd_kafka_topic_result_t * +topic_result_by_idx (const rd_kafka_topic_result_t **topics, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return topics[idx]; +} + +static const rd_kafka_ConfigResource_t * +ConfigResource_by_idx (const rd_kafka_ConfigResource_t **res, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return res[idx]; +} + +static const rd_kafka_ConfigEntry_t * +ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return entries[idx]; +} + +static const rd_kafka_acl_result_t * +acl_result_by_idx (const rd_kafka_acl_result_t **acl_results, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_results[idx]; +} + +static const rd_kafka_DeleteAcls_result_response_t * +DeleteAcls_result_response_by_idx (const rd_kafka_DeleteAcls_result_response_t **delete_acls_result_responses, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return delete_acls_result_responses[idx]; +} + +static const rd_kafka_AclBinding_t * +AclBinding_by_idx (const rd_kafka_AclBinding_t **acl_bindings, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_bindings[idx]; +} + +static const rd_kafka_ConsumerGroupListing_t * +ConsumerGroupListing_by_idx(const rd_kafka_ConsumerGroupListing_t **result_groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_groups[idx]; +} + +static const rd_kafka_ConsumerGroupDescription_t * +ConsumerGroupDescription_by_idx(const rd_kafka_ConsumerGroupDescription_t **result_groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_groups[idx]; +} + +static const rd_kafka_TopicDescription_t * +TopicDescription_by_idx(const rd_kafka_TopicDescription_t **result_topics, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_topics[idx]; +} + +static const rd_kafka_TopicPartitionInfo_t * +TopicPartitionInfo_by_idx(const rd_kafka_TopicPartitionInfo_t **partitions, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return partitions[idx]; +} + +static const rd_kafka_AclOperation_t AclOperation_by_idx(const rd_kafka_AclOperation_t *acl_operations, size_t cnt, size_t idx) { + if (idx >= cnt) + return RD_KAFKA_ACL_OPERATION_UNKNOWN; + return acl_operations[idx]; +} + +static const rd_kafka_Node_t *Node_by_idx(const rd_kafka_Node_t **nodes, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return nodes[idx]; +} + +static const rd_kafka_UserScramCredentialsDescription_t * +DescribeUserScramCredentials_result_description_by_idx(const rd_kafka_UserScramCredentialsDescription_t **descriptions, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return descriptions[idx]; +} + +static const rd_kafka_AlterUserScramCredentials_result_response_t* +AlterUserScramCredentials_result_response_by_idx(const rd_kafka_AlterUserScramCredentials_result_response_t **responses, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return responses[idx]; +} + +static const rd_kafka_ListOffsetsResultInfo_t * +ListOffsetsResultInfo_by_idx(const rd_kafka_ListOffsetsResultInfo_t **result_infos, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_infos[idx]; +} + +static const rd_kafka_error_t * +error_by_idx(const rd_kafka_error_t **errors, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return errors[idx]; +} +*/ +import "C" + +// AdminClient is derived from an existing Producer or Consumer +type AdminClient struct { + handle *handle + isDerived bool // Derived from existing client handle + isClosed uint32 // to check if Admin Client is closed or not. +} + +// IsClosed returns boolean representing if client is closed or not +func (a *AdminClient) IsClosed() bool { + return atomic.LoadUint32(&a.isClosed) == 1 +} + +func (a *AdminClient) verifyClient() error { + if a.IsClosed() { + return getOperationNotAllowedErrorForClosedClient() + } + return nil +} + +func durationToMilliseconds(t time.Duration) int { + if t > 0 { + return (int)(t.Seconds() * 1000.0) + } + return (int)(t) +} + +// TopicResult provides per-topic operation result (error) information. +type TopicResult struct { + // Topic name + Topic string + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// String returns a human-readable representation of a TopicResult. +func (t TopicResult) String() string { + if t.Error.code == 0 { + return t.Topic + } + return fmt.Sprintf("%s (%s)", t.Topic, t.Error.str) +} + +// ConsumerGroupResult provides per-group operation result (error) information. +type ConsumerGroupResult struct { + // Group name + Group string + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// String returns a human-readable representation of a ConsumerGroupResult. +func (g ConsumerGroupResult) String() string { + if g.Error.code == ErrNoError { + return g.Group + } + return fmt.Sprintf("%s (%s)", g.Group, g.Error.str) +} + +// ConsumerGroupState represents a consumer group state +type ConsumerGroupState int + +const ( + // ConsumerGroupStateUnknown - Unknown ConsumerGroupState + ConsumerGroupStateUnknown = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN) + // ConsumerGroupStatePreparingRebalance - preparing rebalance + ConsumerGroupStatePreparingRebalance = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE) + // ConsumerGroupStateCompletingRebalance - completing rebalance + ConsumerGroupStateCompletingRebalance = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE) + // ConsumerGroupStateStable - stable + ConsumerGroupStateStable = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_STABLE) + // ConsumerGroupStateDead - dead group + ConsumerGroupStateDead = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_DEAD) + // ConsumerGroupStateEmpty - empty group + ConsumerGroupStateEmpty = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY) +) + +// String returns the human-readable representation of a consumer_group_state +func (t ConsumerGroupState) String() string { + return C.GoString(C.rd_kafka_consumer_group_state_name( + C.rd_kafka_consumer_group_state_t(t))) +} + +// ConsumerGroupStateFromString translates a consumer group state name/string to +// a ConsumerGroupStateFromString value. +func ConsumerGroupStateFromString(stateString string) (ConsumerGroupState, error) { + cStr := C.CString(stateString) + defer C.free(unsafe.Pointer(cStr)) + state := ConsumerGroupState(C.rd_kafka_consumer_group_state_code(cStr)) + return state, nil +} + +// ConsumerGroupListing represents the result of ListConsumerGroups for a single +// group. +type ConsumerGroupListing struct { + // Group id. + GroupID string + // Is a simple consumer group. + IsSimpleConsumerGroup bool + // Group state. + State ConsumerGroupState +} + +// ListConsumerGroupsResult represents ListConsumerGroups results and errors. +type ListConsumerGroupsResult struct { + // List of valid ConsumerGroupListings. + Valid []ConsumerGroupListing + // List of errors. + Errors []error +} + +// MemberAssignment represents the assignment of a consumer group member. +type MemberAssignment struct { + // Partitions assigned to current member. + TopicPartitions []TopicPartition +} + +// MemberDescription represents the description of a consumer group member. +type MemberDescription struct { + // Client id. + ClientID string + // Group instance id. + GroupInstanceID string + // Consumer id. + ConsumerID string + // Group member host. + Host string + // Member assignment. + Assignment MemberAssignment +} + +// ConsumerGroupDescription represents the result of DescribeConsumerGroups for +// a single group. +type ConsumerGroupDescription struct { + // Group id. + GroupID string + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error + // Is a simple consumer group. + IsSimpleConsumerGroup bool + // Partition assignor identifier. + PartitionAssignor string + // Consumer group state. + State ConsumerGroupState + // Consumer group coordinator (has ID == -1 if not known). + Coordinator Node + // Members list. + Members []MemberDescription + // Operations allowed for the group (nil if not available or not requested) + AuthorizedOperations []ACLOperation +} + +// DescribeConsumerGroupsResult represents the result of a +// DescribeConsumerGroups call. +type DescribeConsumerGroupsResult struct { + // Slice of ConsumerGroupDescription. + ConsumerGroupDescriptions []ConsumerGroupDescription +} + +// TopicCollection represents a collection of topics. +type TopicCollection struct { + // Slice of topic names. + topicNames []string +} + +// NewTopicCollectionOfTopicNames creates a new TopicCollection based on a list +// of topic names. +func NewTopicCollectionOfTopicNames(names []string) TopicCollection { + return TopicCollection{ + topicNames: names, + } +} + +// TopicPartitionInfo represents a specific partition's information inside a +// TopicDescription. +type TopicPartitionInfo struct { + // Partition id. + Partition int + // Leader broker. + Leader *Node + // Replicas of the partition. + Replicas []Node + // In-Sync-Replicas of the partition. + Isr []Node +} + +// TopicDescription represents the result of DescribeTopics for +// a single topic. +type TopicDescription struct { + // Topic name. + Name string + // Topic Id + TopicID UUID + // Error, if any, of the result. Check with `Error.Code() != ErrNoError`. + Error Error + // Is the topic internal to Kafka? + IsInternal bool + // Partitions' information list. + Partitions []TopicPartitionInfo + // Operations allowed for the topic (nil if not available or not requested). + AuthorizedOperations []ACLOperation +} + +// DescribeTopicsResult represents the result of a +// DescribeTopics call. +type DescribeTopicsResult struct { + // Slice of TopicDescription. + TopicDescriptions []TopicDescription +} + +// DescribeClusterResult represents the result of DescribeCluster. +type DescribeClusterResult struct { + // Cluster id for the cluster (always available if broker version >= 0.10.1.0, otherwise nil). + ClusterID *string + // Current controller broker for the cluster (nil if there is none). + Controller *Node + // List of brokers in the cluster. + Nodes []Node + // Operations allowed for the cluster (nil if not available or not requested). + AuthorizedOperations []ACLOperation +} + +// DeleteConsumerGroupsResult represents the result of a DeleteConsumerGroups +// call. +type DeleteConsumerGroupsResult struct { + // Slice of ConsumerGroupResult. + ConsumerGroupResults []ConsumerGroupResult +} + +// ListConsumerGroupOffsetsResult represents the result of a +// ListConsumerGroupOffsets operation. +type ListConsumerGroupOffsetsResult struct { + // A slice of ConsumerGroupTopicPartitions, each element represents a group's + // TopicPartitions and Offsets. + ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions +} + +// AlterConsumerGroupOffsetsResult represents the result of a +// AlterConsumerGroupOffsets operation. +type AlterConsumerGroupOffsetsResult struct { + // A slice of ConsumerGroupTopicPartitions, each element represents a group's + // TopicPartitions and Offsets. + ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions +} + +// TopicSpecification holds parameters for creating a new topic. +// TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +type TopicSpecification struct { + // Topic name to create. + Topic string + // Number of partitions in topic. + NumPartitions int + // Default replication factor for the topic's partitions, or zero + // if an explicit ReplicaAssignment is set. + ReplicationFactor int + // (Optional) Explicit replica assignment. The outer array is + // indexed by the partition number, while the inner per-partition array + // contains the replica broker ids. The first broker in each + // broker id list will be the preferred replica. + ReplicaAssignment [][]int32 + // Topic configuration. + Config map[string]string +} + +// PartitionsSpecification holds parameters for creating additional partitions for a topic. +// PartitionsSpecification is analogous to NewPartitions in the Java Topic Admin API. +type PartitionsSpecification struct { + // Topic to create more partitions for. + Topic string + // New partition count for topic, must be higher than current partition count. + IncreaseTo int + // (Optional) Explicit replica assignment. The outer array is + // indexed by the new partition index (i.e., 0 for the first added + // partition), while the inner per-partition array + // contains the replica broker ids. The first broker in each + // broker id list will be the preferred replica. + ReplicaAssignment [][]int32 +} + +// ResourceType represents an Apache Kafka resource type +type ResourceType int + +const ( + // ResourceUnknown - Unknown + ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN) + // ResourceAny - match any resource type (DescribeConfigs) + ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY) + // ResourceTopic - Topic + ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC) + // ResourceGroup - Group + ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP) + // ResourceBroker - Broker + ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER) +) + +// String returns the human-readable representation of a ResourceType +func (t ResourceType) String() string { + return C.GoString(C.rd_kafka_ResourceType_name(C.rd_kafka_ResourceType_t(t))) +} + +// ResourceTypeFromString translates a resource type name/string to +// a ResourceType value. +func ResourceTypeFromString(typeString string) (ResourceType, error) { + switch strings.ToUpper(typeString) { + case "ANY": + return ResourceAny, nil + case "TOPIC": + return ResourceTopic, nil + case "GROUP": + return ResourceGroup, nil + case "BROKER": + return ResourceBroker, nil + default: + return ResourceUnknown, NewError(ErrInvalidArg, "Unknown resource type", false) + } +} + +// ConfigSource represents an Apache Kafka config source +type ConfigSource int + +const ( + // ConfigSourceUnknown is the default value + ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG) + // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic + ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG) + // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker + ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG) + // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster + ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG) + // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file) + ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG) + // ConfigSourceDefault is built-in default configuration for configs that have a default value + ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG) +) + +// String returns the human-readable representation of a ConfigSource type +func (t ConfigSource) String() string { + return C.GoString(C.rd_kafka_ConfigSource_name(C.rd_kafka_ConfigSource_t(t))) +} + +// ConfigResource holds parameters for altering an Apache Kafka configuration resource +type ConfigResource struct { + // Type of resource to set. + Type ResourceType + // Name of resource to set. + Name string + // Config entries to set. + // Configuration updates are atomic, any configuration property not provided + // here will be reverted (by the broker) to its default value. + // Use DescribeConfigs to retrieve the list of current configuration entry values. + Config []ConfigEntry +} + +// String returns a human-readable representation of a ConfigResource +func (c ConfigResource) String() string { + return fmt.Sprintf("Resource(%s, %s)", c.Type, c.Name) +} + +// AlterOperation specifies the operation to perform on the ConfigEntry. +// Currently only AlterOperationSet. +type AlterOperation int + +const ( + // AlterOperationSet sets/overwrites the configuration setting. + AlterOperationSet = iota +) + +// String returns the human-readable representation of an AlterOperation +func (o AlterOperation) String() string { + switch o { + case AlterOperationSet: + return "Set" + default: + return fmt.Sprintf("Unknown%d?", int(o)) + } +} + +// AlterConfigOpType specifies the operation to perform +// on the ConfigEntry for IncrementalAlterConfig +type AlterConfigOpType int + +const ( + // AlterConfigOpTypeSet sets/overwrites the configuration + // setting. + AlterConfigOpTypeSet = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET) + // AlterConfigOpTypeDelete sets the configuration setting + // to default or NULL. + AlterConfigOpTypeDelete = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE) + // AlterConfigOpTypeAppend appends the value to existing + // configuration settings. + AlterConfigOpTypeAppend = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND) + // AlterConfigOpTypeSubtract subtracts the value from + // existing configuration settings. + AlterConfigOpTypeSubtract = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT) +) + +// String returns the human-readable representation of an AlterOperation +func (o AlterConfigOpType) String() string { + switch o { + case AlterConfigOpTypeSet: + return "Set" + case AlterConfigOpTypeDelete: + return "Delete" + case AlterConfigOpTypeAppend: + return "Append" + case AlterConfigOpTypeSubtract: + return "Subtract" + default: + return fmt.Sprintf("Unknown %d", int(o)) + } +} + +// ConfigEntry holds parameters for altering a resource's configuration. +type ConfigEntry struct { + // Name of configuration entry, e.g., topic configuration property name. + Name string + // Value of configuration entry. + Value string + // Deprecated: Operation to perform on the entry. + Operation AlterOperation + // Operation to perform on the entry incrementally. + IncrementalOperation AlterConfigOpType +} + +// StringMapToConfigEntries creates a new map of ConfigEntry objects from the +// provided string map. The AlterOperation is set on each created entry. +func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry { + var ceList []ConfigEntry + + for k, v := range stringMap { + ceList = append(ceList, ConfigEntry{Name: k, Value: v, Operation: operation}) + } + + return ceList +} + +// StringMapToIncrementalConfigEntries creates a new map of ConfigEntry objects from the +// provided string map an operation map. The AlterConfigOpType is set on each created entry. +func StringMapToIncrementalConfigEntries(stringMap map[string]string, + operationMap map[string]AlterConfigOpType) []ConfigEntry { + var ceList []ConfigEntry + + for k, v := range stringMap { + ceList = append(ceList, ConfigEntry{Name: k, Value: v, IncrementalOperation: operationMap[k]}) + } + + return ceList +} + +// String returns a human-readable representation of a ConfigEntry. +func (c ConfigEntry) String() string { + return fmt.Sprintf("%v %s=\"%s\"", c.Operation, c.Name, c.Value) +} + +// ConfigEntryResult contains the result of a single configuration entry from a +// DescribeConfigs request. +type ConfigEntryResult struct { + // Name of configuration entry, e.g., topic configuration property name. + Name string + // Value of configuration entry. + Value string + // Source indicates the configuration source. + Source ConfigSource + // IsReadOnly indicates whether the configuration entry can be altered. + IsReadOnly bool + // IsDefault indicates whether the value is at its default. + IsDefault bool + // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset. + IsSensitive bool + // IsSynonym indicates whether the configuration entry is a synonym for another configuration property. + IsSynonym bool + // Synonyms contains a map of configuration entries that are synonyms to this configuration entry. + Synonyms map[string]ConfigEntryResult +} + +// String returns a human-readable representation of a ConfigEntryResult. +func (c ConfigEntryResult) String() string { + return fmt.Sprintf("%s=\"%s\"", c.Name, c.Value) +} + +// setFromC sets up a ConfigEntryResult from a C ConfigEntry +func configEntryResultFromC(cEntry *C.rd_kafka_ConfigEntry_t) (entry ConfigEntryResult) { + entry.Name = C.GoString(C.rd_kafka_ConfigEntry_name(cEntry)) + cValue := C.rd_kafka_ConfigEntry_value(cEntry) + if cValue != nil { + entry.Value = C.GoString(cValue) + } + entry.Source = ConfigSource(C.rd_kafka_ConfigEntry_source(cEntry)) + entry.IsReadOnly = cint2bool(C.rd_kafka_ConfigEntry_is_read_only(cEntry)) + entry.IsDefault = cint2bool(C.rd_kafka_ConfigEntry_is_default(cEntry)) + entry.IsSensitive = cint2bool(C.rd_kafka_ConfigEntry_is_sensitive(cEntry)) + entry.IsSynonym = cint2bool(C.rd_kafka_ConfigEntry_is_synonym(cEntry)) + + var cSynCnt C.size_t + cSyns := C.rd_kafka_ConfigEntry_synonyms(cEntry, &cSynCnt) + if cSynCnt > 0 { + entry.Synonyms = make(map[string]ConfigEntryResult) + } + + for si := 0; si < int(cSynCnt); si++ { + cSyn := C.ConfigEntry_by_idx(cSyns, cSynCnt, C.size_t(si)) + Syn := configEntryResultFromC(cSyn) + entry.Synonyms[Syn.Name] = Syn + } + + return entry +} + +// ConfigResourceResult provides the result for a resource from a AlterConfigs or +// DescribeConfigs request. +type ConfigResourceResult struct { + // Type of returned result resource. + Type ResourceType + // Name of returned result resource. + Name string + // Error, if any, of returned result resource. + Error Error + // Config entries, if any, of returned result resource. + Config map[string]ConfigEntryResult +} + +// String returns a human-readable representation of a ConfigResourceResult. +func (c ConfigResourceResult) String() string { + if c.Error.Code() != 0 { + return fmt.Sprintf("ResourceResult(%s, %s, \"%v\")", c.Type, c.Name, c.Error) + + } + return fmt.Sprintf("ResourceResult(%s, %s, %d config(s))", c.Type, c.Name, len(c.Config)) +} + +// ResourcePatternType enumerates the different types of Kafka resource patterns. +type ResourcePatternType int + +const ( + // ResourcePatternTypeUnknown is a resource pattern type not known or not set. + ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) + // ResourcePatternTypeAny matches any resource, used for lookups. + ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY) + // ResourcePatternTypeMatch will perform pattern matching + ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH) + // ResourcePatternTypeLiteral matches a literal resource name + ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL) + // ResourcePatternTypePrefixed matches a prefixed resource name + ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED) +) + +// String returns the human-readable representation of a ResourcePatternType +func (t ResourcePatternType) String() string { + return C.GoString(C.rd_kafka_ResourcePatternType_name(C.rd_kafka_ResourcePatternType_t(t))) +} + +// ResourcePatternTypeFromString translates a resource pattern type name to +// a ResourcePatternType value. +func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) { + switch strings.ToUpper(patternTypeString) { + case "ANY": + return ResourcePatternTypeAny, nil + case "MATCH": + return ResourcePatternTypeMatch, nil + case "LITERAL": + return ResourcePatternTypeLiteral, nil + case "PREFIXED": + return ResourcePatternTypePrefixed, nil + default: + return ResourcePatternTypeUnknown, NewError(ErrInvalidArg, "Unknown resource pattern type", false) + } +} + +// ACLOperation enumerates the different types of ACL operation. +type ACLOperation int + +const ( + // ACLOperationUnknown represents an unknown or unset operation + ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN) + // ACLOperationAny in a filter, matches any ACLOperation + ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY) + // ACLOperationAll represents all the operations + ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL) + // ACLOperationRead a read operation + ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ) + // ACLOperationWrite represents a write operation + ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE) + // ACLOperationCreate represents a create operation + ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE) + // ACLOperationDelete represents a delete operation + ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE) + // ACLOperationAlter represents an alter operation + ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER) + // ACLOperationDescribe represents a describe operation + ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE) + // ACLOperationClusterAction represents a cluster action operation + ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION) + // ACLOperationDescribeConfigs represents a describe configs operation + ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS) + // ACLOperationAlterConfigs represents an alter configs operation + ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS) + // ACLOperationIdempotentWrite represents an idempotent write operation + ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE) +) + +// String returns the human-readable representation of an ACLOperation +func (o ACLOperation) String() string { + return C.GoString(C.rd_kafka_AclOperation_name(C.rd_kafka_AclOperation_t(o))) +} + +// ACLOperationFromString translates a ACL operation name to +// a ACLOperation value. +func ACLOperationFromString(aclOperationString string) (ACLOperation, error) { + switch strings.ToUpper(aclOperationString) { + case "ANY": + return ACLOperationAny, nil + case "ALL": + return ACLOperationAll, nil + case "READ": + return ACLOperationRead, nil + case "WRITE": + return ACLOperationWrite, nil + case "CREATE": + return ACLOperationCreate, nil + case "DELETE": + return ACLOperationDelete, nil + case "ALTER": + return ACLOperationAlter, nil + case "DESCRIBE": + return ACLOperationDescribe, nil + case "CLUSTER_ACTION": + return ACLOperationClusterAction, nil + case "DESCRIBE_CONFIGS": + return ACLOperationDescribeConfigs, nil + case "ALTER_CONFIGS": + return ACLOperationAlterConfigs, nil + case "IDEMPOTENT_WRITE": + return ACLOperationIdempotentWrite, nil + default: + return ACLOperationUnknown, NewError(ErrInvalidArg, "Unknown ACL operation", false) + } +} + +// ACLPermissionType enumerates the different types of ACL permission types. +type ACLPermissionType int + +const ( + // ACLPermissionTypeUnknown represents an unknown ACLPermissionType + ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN) + // ACLPermissionTypeAny in a filter, matches any ACLPermissionType + ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY) + // ACLPermissionTypeDeny disallows access + ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY) + // ACLPermissionTypeAllow grants access + ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW) +) + +// String returns the human-readable representation of an ACLPermissionType +func (o ACLPermissionType) String() string { + return C.GoString(C.rd_kafka_AclPermissionType_name(C.rd_kafka_AclPermissionType_t(o))) +} + +// ACLPermissionTypeFromString translates a ACL permission type name to +// a ACLPermissionType value. +func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) { + switch strings.ToUpper(aclPermissionTypeString) { + case "ANY": + return ACLPermissionTypeAny, nil + case "DENY": + return ACLPermissionTypeDeny, nil + case "ALLOW": + return ACLPermissionTypeAllow, nil + default: + return ACLPermissionTypeUnknown, NewError(ErrInvalidArg, "Unknown ACL permission type", false) + } +} + +// ACLBinding specifies the operation and permission type for a specific principal +// over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +// returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBinding struct { + Type ResourceType // The resource type. + // The resource name, which depends on the resource type. + // For ResourceBroker the resource name is the broker id. + Name string + ResourcePatternType ResourcePatternType // The resource pattern, relative to the name. + Principal string // The principal this ACLBinding refers to. + Host string // The host that the call is allowed to come from. + Operation ACLOperation // The operation/s specified by this binding. + PermissionType ACLPermissionType // The permission type for the specified operation. +} + +// ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +// Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBindingFilter = ACLBinding + +// ACLBindings is a slice of ACLBinding that also implements +// the sort interface +type ACLBindings []ACLBinding + +// ACLBindingFilters is a slice of ACLBindingFilter that also implements +// the sort interface +type ACLBindingFilters []ACLBindingFilter + +func (a ACLBindings) Len() int { + return len(a) +} + +func (a ACLBindings) Less(i, j int) bool { + if a[i].Type != a[j].Type { + return a[i].Type < a[j].Type + } + if a[i].Name != a[j].Name { + return a[i].Name < a[j].Name + } + if a[i].ResourcePatternType != a[j].ResourcePatternType { + return a[i].ResourcePatternType < a[j].ResourcePatternType + } + if a[i].Principal != a[j].Principal { + return a[i].Principal < a[j].Principal + } + if a[i].Host != a[j].Host { + return a[i].Host < a[j].Host + } + if a[i].Operation != a[j].Operation { + return a[i].Operation < a[j].Operation + } + if a[i].PermissionType != a[j].PermissionType { + return a[i].PermissionType < a[j].PermissionType + } + return true +} + +func (a ACLBindings) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +// CreateACLResult provides create ACL error information. +type CreateACLResult struct { + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DescribeACLsResult provides describe ACLs result or error information. +type DescribeACLsResult struct { + // Slice of ACL bindings matching the provided filter + ACLBindings ACLBindings + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DeleteACLsResult provides delete ACLs result or error information. +type DeleteACLsResult = DescribeACLsResult + +// ScramMechanism enumerates SASL/SCRAM mechanisms. +// Used by `AdminClient.AlterUserScramCredentials` +// and `AdminClient.DescribeUserScramCredentials`. +type ScramMechanism int + +const ( + // ScramMechanismUnknown - Unknown SASL/SCRAM mechanism + ScramMechanismUnknown = ScramMechanism(C.RD_KAFKA_SCRAM_MECHANISM_UNKNOWN) + // ScramMechanismSHA256 - SCRAM-SHA-256 mechanism + ScramMechanismSHA256 = ScramMechanism(C.RD_KAFKA_SCRAM_MECHANISM_SHA_256) + // ScramMechanismSHA512 - SCRAM-SHA-512 mechanism + ScramMechanismSHA512 = ScramMechanism(C.RD_KAFKA_SCRAM_MECHANISM_SHA_512) +) + +// String returns the human-readable representation of an ScramMechanism +func (o ScramMechanism) String() string { + switch o { + case ScramMechanismSHA256: + return "SCRAM-SHA-256" + case ScramMechanismSHA512: + return "SCRAM-SHA-512" + default: + return "UNKNOWN" + } +} + +// ScramMechanismFromString translates a Scram Mechanism name to +// a ScramMechanism value. +func ScramMechanismFromString(mechanism string) (ScramMechanism, error) { + switch strings.ToUpper(mechanism) { + case "SCRAM-SHA-256": + return ScramMechanismSHA256, nil + case "SCRAM-SHA-512": + return ScramMechanismSHA512, nil + default: + return ScramMechanismUnknown, + NewError(ErrInvalidArg, "Unknown SCRAM mechanism", false) + } +} + +// ScramCredentialInfo contains Mechanism and Iterations for a +// SASL/SCRAM credential associated with a user. +type ScramCredentialInfo struct { + // Iterations - positive number of iterations used when creating the credential + Iterations int + // Mechanism - SASL/SCRAM mechanism + Mechanism ScramMechanism +} + +// UserScramCredentialsDescription represent all SASL/SCRAM credentials +// associated with a user that can be retrieved, or an error indicating +// why credentials could not be retrieved. +type UserScramCredentialsDescription struct { + // User - the user name. + User string + // ScramCredentialInfos - SASL/SCRAM credential representations for the user. + ScramCredentialInfos []ScramCredentialInfo + // Error - error corresponding to this user description. + Error Error +} + +// UserScramCredentialDeletion is a request to delete +// a SASL/SCRAM credential for a user. +type UserScramCredentialDeletion struct { + // User - user name + User string + // Mechanism - SASL/SCRAM mechanism. + Mechanism ScramMechanism +} + +// UserScramCredentialUpsertion is a request to update/insert +// a SASL/SCRAM credential for a user. +type UserScramCredentialUpsertion struct { + // User - user name + User string + // ScramCredentialInfo - the mechanism and iterations. + ScramCredentialInfo ScramCredentialInfo + // Password - password to HMAC before storage. + Password []byte + // Salt - salt to use. Will be generated randomly if nil. (optional) + Salt []byte +} + +// DescribeUserScramCredentialsResult represents the result of a +// DescribeUserScramCredentials call. +type DescribeUserScramCredentialsResult struct { + // Descriptions - Map from user name + // to UserScramCredentialsDescription + Descriptions map[string]UserScramCredentialsDescription +} + +// AlterUserScramCredentialsResult represents the result of a +// AlterUserScramCredentials call. +type AlterUserScramCredentialsResult struct { + // Errors - Map from user name + // to an Error, with ErrNoError code on success. + Errors map[string]Error +} + +// OffsetSpec specifies desired offsets while using ListOffsets. +type OffsetSpec int64 + +const ( + // MaxTimestampOffsetSpec is used to describe the offset with the Max Timestamp which may be different then LatestOffsetSpec as Timestamp can be set client side. + MaxTimestampOffsetSpec = OffsetSpec(C.RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP) + // EarliestOffsetSpec is used to describe the earliest offset for the TopicPartition. + EarliestOffsetSpec = OffsetSpec(C.RD_KAFKA_OFFSET_SPEC_EARLIEST) + // LatestOffsetSpec is used to describe the latest offset for the TopicPartition. + LatestOffsetSpec = OffsetSpec(C.RD_KAFKA_OFFSET_SPEC_LATEST) +) + +// NewOffsetSpecForTimestamp creates an OffsetSpec corresponding to the timestamp. +func NewOffsetSpecForTimestamp(timestamp int64) OffsetSpec { + return OffsetSpec(timestamp) +} + +// ListOffsetsResultInfo describes the result of ListOffsets request for a Topic Partition. +type ListOffsetsResultInfo struct { + Offset Offset + Timestamp int64 + LeaderEpoch *int32 + Error Error +} + +// ListOffsetsResult holds the map of TopicPartition to ListOffsetsResultInfo for a request. +type ListOffsetsResult struct { + ResultInfos map[TopicPartition]ListOffsetsResultInfo +} + +// waitResult waits for a result event on cQueue or the ctx to be cancelled, whichever happens +// first. +// The returned result event is checked for errors its error is returned if set. +func (a *AdminClient) waitResult(ctx context.Context, cQueue *C.rd_kafka_queue_t, cEventType C.rd_kafka_event_type_t) (rkev *C.rd_kafka_event_t, err error) { + resultChan := make(chan *C.rd_kafka_event_t) + closeChan := make(chan bool) // never written to, just closed + + go func() { + for { + select { + case _, ok := <-closeChan: + if !ok { + // Context cancelled/timed out + close(resultChan) + return + } + + default: + // Wait for result event for at most 50ms + // to avoid blocking for too long if + // context is cancelled. + rkev := C.rd_kafka_queue_poll(cQueue, 50) + if rkev != nil { + resultChan <- rkev + close(resultChan) + return + } + } + } + }() + + select { + case rkev = <-resultChan: + // Result type check + if cEventType != C.rd_kafka_event_type(rkev) { + err = newErrorFromString(ErrInvalidType, + fmt.Sprintf("Expected %d result event, not %d", (int)(cEventType), (int)(C.rd_kafka_event_type(rkev)))) + C.rd_kafka_event_destroy(rkev) + return nil, err + } + + // Generic error handling + cErr := C.rd_kafka_event_error(rkev) + if cErr != 0 { + err = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) + C.rd_kafka_event_destroy(rkev) + return nil, err + } + close(closeChan) + return rkev, nil + case <-ctx.Done(): + // signal close to go-routine + close(closeChan) + // wait for close from go-routine to make sure it is done + // using cQueue before we return. + rkev, ok := <-resultChan + if ok { + // throw away result since context was cancelled + C.rd_kafka_event_destroy(rkev) + } + return nil, ctx.Err() + } +} + +// cToConsumerGroupResults converts a C group_result_t array to Go ConsumerGroupResult list. +func (a *AdminClient) cToConsumerGroupResults( + cGroupRes **C.rd_kafka_group_result_t, cCnt C.size_t) (result []ConsumerGroupResult, err error) { + result = make([]ConsumerGroupResult, int(cCnt)) + + for idx := 0; idx < int(cCnt); idx++ { + cGroup := C.group_result_by_idx(cGroupRes, cCnt, C.size_t(idx)) + result[idx].Group = C.GoString(C.rd_kafka_group_result_name(cGroup)) + result[idx].Error = newErrorFromCError(C.rd_kafka_group_result_error(cGroup)) + } + + return result, nil +} + +// cToTopicResults converts a C topic_result_t array to Go TopicResult list. +func (a *AdminClient) cToTopicResults(cTopicRes **C.rd_kafka_topic_result_t, cCnt C.size_t) (result []TopicResult, err error) { + result = make([]TopicResult, int(cCnt)) + + for i := 0; i < int(cCnt); i++ { + cTopic := C.topic_result_by_idx(cTopicRes, cCnt, C.size_t(i)) + result[i].Topic = C.GoString(C.rd_kafka_topic_result_name(cTopic)) + result[i].Error = newErrorFromCString( + C.rd_kafka_topic_result_error(cTopic), + C.rd_kafka_topic_result_error_string(cTopic)) + } + + return result, nil +} + +// cToAuthorizedOperations converts a C AclOperation_t array to a Go +// ACLOperation list. +func (a *AdminClient) cToAuthorizedOperations( + cAuthorizedOperations *C.rd_kafka_AclOperation_t, + cAuthorizedOperationCnt C.size_t) []ACLOperation { + if cAuthorizedOperations == nil { + return nil + } + + authorizedOperations := make([]ACLOperation, int(cAuthorizedOperationCnt)) + for i := 0; i < int(cAuthorizedOperationCnt); i++ { + cAuthorizedOperation := C.AclOperation_by_idx( + cAuthorizedOperations, cAuthorizedOperationCnt, C.size_t(i)) + authorizedOperations[i] = ACLOperation(cAuthorizedOperation) + } + + return authorizedOperations +} + +// cToUUID converts a C rd_kafka_Uuid_t to a Go UUID. +func (a *AdminClient) cToUUID(cUUID *C.rd_kafka_Uuid_t) UUID { + uuid := UUID{ + mostSignificantBits: int64(C.rd_kafka_Uuid_most_significant_bits(cUUID)), + leastSignificantBits: int64(C.rd_kafka_Uuid_least_significant_bits(cUUID)), + base64str: C.GoString(C.rd_kafka_Uuid_base64str(cUUID)), + } + return uuid +} + +// cToNode converts a C Node_t* to a Go Node. +// If cNode is nil returns a Node with ID: -1. +func (a *AdminClient) cToNode(cNode *C.rd_kafka_Node_t) Node { + if cNode == nil { + return Node{ + ID: -1, + } + } + + node := Node{ + ID: int(C.rd_kafka_Node_id(cNode)), + Host: C.GoString(C.rd_kafka_Node_host(cNode)), + Port: int(C.rd_kafka_Node_port(cNode)), + } + + cRack := C.rd_kafka_Node_rack(cNode) + if cRack != nil { + rackID := C.GoString(cRack) + node.Rack = &rackID + } + + return node +} + +// cToNodePtr converts a C Node_t* to a Go *Node. +func (a *AdminClient) cToNodePtr(cNode *C.rd_kafka_Node_t) *Node { + if cNode == nil { + return nil + } + + node := a.cToNode(cNode) + return &node +} + +// cToNode converts a C Node_t array to a Go Node list. +func (a *AdminClient) cToNodes( + cNodes **C.rd_kafka_Node_t, cNodeCnt C.size_t) []Node { + nodes := make([]Node, int(cNodeCnt)) + for i := 0; i < int(cNodeCnt); i++ { + cNode := C.Node_by_idx(cNodes, cNodeCnt, C.size_t(i)) + nodes[i] = a.cToNode(cNode) + } + return nodes +} + +// cToConsumerGroupDescriptions converts a C rd_kafka_ConsumerGroupDescription_t +// array to a Go ConsumerGroupDescription slice. +func (a *AdminClient) cToConsumerGroupDescriptions( + cGroups **C.rd_kafka_ConsumerGroupDescription_t, + cGroupCount C.size_t) (result []ConsumerGroupDescription) { + result = make([]ConsumerGroupDescription, cGroupCount) + for idx := 0; idx < int(cGroupCount); idx++ { + cGroup := C.ConsumerGroupDescription_by_idx( + cGroups, cGroupCount, C.size_t(idx)) + + groupID := C.GoString( + C.rd_kafka_ConsumerGroupDescription_group_id(cGroup)) + err := newErrorFromCError( + C.rd_kafka_ConsumerGroupDescription_error(cGroup)) + isSimple := cint2bool( + C.rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(cGroup)) + paritionAssignor := C.GoString( + C.rd_kafka_ConsumerGroupDescription_partition_assignor(cGroup)) + state := ConsumerGroupState( + C.rd_kafka_ConsumerGroupDescription_state(cGroup)) + + cNode := C.rd_kafka_ConsumerGroupDescription_coordinator(cGroup) + coordinator := a.cToNode(cNode) + + membersCount := int( + C.rd_kafka_ConsumerGroupDescription_member_count(cGroup)) + members := make([]MemberDescription, membersCount) + + for midx := 0; midx < membersCount; midx++ { + cMember := + C.rd_kafka_ConsumerGroupDescription_member(cGroup, C.size_t(midx)) + cMemberAssignment := + C.rd_kafka_MemberDescription_assignment(cMember) + cToppars := + C.rd_kafka_MemberAssignment_partitions(cMemberAssignment) + memberAssignment := MemberAssignment{} + if cToppars != nil { + memberAssignment.TopicPartitions = newTopicPartitionsFromCparts(cToppars) + } + members[midx] = MemberDescription{ + ClientID: C.GoString( + C.rd_kafka_MemberDescription_client_id(cMember)), + GroupInstanceID: C.GoString( + C.rd_kafka_MemberDescription_group_instance_id(cMember)), + ConsumerID: C.GoString( + C.rd_kafka_MemberDescription_consumer_id(cMember)), + Host: C.GoString( + C.rd_kafka_MemberDescription_host(cMember)), + Assignment: memberAssignment, + } + } + + cAuthorizedOperationsCnt := C.size_t(0) + cAuthorizedOperations := C.rd_kafka_ConsumerGroupDescription_authorized_operations( + cGroup, &cAuthorizedOperationsCnt) + authorizedOperations := a.cToAuthorizedOperations(cAuthorizedOperations, + cAuthorizedOperationsCnt) + + result[idx] = ConsumerGroupDescription{ + GroupID: groupID, + Error: err, + IsSimpleConsumerGroup: isSimple, + PartitionAssignor: paritionAssignor, + State: state, + Coordinator: coordinator, + Members: members, + AuthorizedOperations: authorizedOperations, + } + } + return result +} + +// cToTopicPartitionInfo converts a C TopicPartitionInfo_t into a Go +// TopicPartitionInfo. +func (a *AdminClient) cToTopicPartitionInfo( + partitionInfo *C.rd_kafka_TopicPartitionInfo_t) TopicPartitionInfo { + cPartitionID := C.rd_kafka_TopicPartitionInfo_partition(partitionInfo) + info := TopicPartitionInfo{ + Partition: int(cPartitionID), + } + + cLeader := C.rd_kafka_TopicPartitionInfo_leader(partitionInfo) + info.Leader = a.cToNodePtr(cLeader) + + cReplicaCnt := C.size_t(0) + cReplicas := C.rd_kafka_TopicPartitionInfo_replicas( + partitionInfo, &cReplicaCnt) + info.Replicas = a.cToNodes(cReplicas, cReplicaCnt) + + cIsrCnt := C.size_t(0) + cIsr := C.rd_kafka_TopicPartitionInfo_isr(partitionInfo, &cIsrCnt) + info.Isr = a.cToNodes(cIsr, cIsrCnt) + + return info +} + +// cToTopicDescriptions converts a C TopicDescription_t +// array to a Go TopicDescription list. +func (a *AdminClient) cToTopicDescriptions( + cTopicDescriptions **C.rd_kafka_TopicDescription_t, + cTopicDescriptionCount C.size_t) (result []TopicDescription) { + result = make([]TopicDescription, cTopicDescriptionCount) + for idx := 0; idx < int(cTopicDescriptionCount); idx++ { + cTopic := C.TopicDescription_by_idx( + cTopicDescriptions, cTopicDescriptionCount, C.size_t(idx)) + + topicName := C.GoString( + C.rd_kafka_TopicDescription_name(cTopic)) + TopicID := a.cToUUID(C.rd_kafka_TopicDescription_topic_id(cTopic)) + err := newErrorFromCError( + C.rd_kafka_TopicDescription_error(cTopic)) + + if err.Code() != ErrNoError { + result[idx] = TopicDescription{ + Name: topicName, + Error: err, + } + continue + } + + cPartitionInfoCnt := C.size_t(0) + cPartitionInfos := C.rd_kafka_TopicDescription_partitions(cTopic, &cPartitionInfoCnt) + + partitions := make([]TopicPartitionInfo, int(cPartitionInfoCnt)) + + for pidx := 0; pidx < int(cPartitionInfoCnt); pidx++ { + cPartitionInfo := C.TopicPartitionInfo_by_idx(cPartitionInfos, cPartitionInfoCnt, C.size_t(pidx)) + partitions[pidx] = a.cToTopicPartitionInfo(cPartitionInfo) + } + + cAuthorizedOperationsCnt := C.size_t(0) + cAuthorizedOperations := C.rd_kafka_TopicDescription_authorized_operations( + cTopic, &cAuthorizedOperationsCnt) + authorizedOperations := a.cToAuthorizedOperations(cAuthorizedOperations, cAuthorizedOperationsCnt) + + result[idx] = TopicDescription{ + Name: topicName, + TopicID: TopicID, + Error: err, + Partitions: partitions, + AuthorizedOperations: authorizedOperations, + } + } + return result +} + +// cToDescribeClusterResult converts a C DescribeTopics_result_t to a Go +// DescribeClusterResult. +func (a *AdminClient) cToDescribeClusterResult( + cResult *C.rd_kafka_DescribeTopics_result_t) (result DescribeClusterResult) { + var clusterIDPtr *string = nil + cClusterID := C.rd_kafka_DescribeCluster_result_cluster_id(cResult) + if cClusterID != nil { + clusterID := C.GoString(cClusterID) + clusterIDPtr = &clusterID + } + + var controller *Node = nil + cController := C.rd_kafka_DescribeCluster_result_controller(cResult) + controller = a.cToNodePtr(cController) + + cNodeCnt := C.size_t(0) + cNodes := C.rd_kafka_DescribeCluster_result_nodes(cResult, &cNodeCnt) + nodes := a.cToNodes(cNodes, cNodeCnt) + + cAuthorizedOperationsCnt := C.size_t(0) + cAuthorizedOperations := + C.rd_kafka_DescribeCluster_result_authorized_operations( + cResult, &cAuthorizedOperationsCnt) + authorizedOperations := a.cToAuthorizedOperations( + cAuthorizedOperations, cAuthorizedOperationsCnt) + + return DescribeClusterResult{ + ClusterID: clusterIDPtr, + Controller: controller, + Nodes: nodes, + AuthorizedOperations: authorizedOperations, + } +} + +// cToDescribeUserScramCredentialsResult converts a C +// rd_kafka_DescribeUserScramCredentials_result_t to a Go map of users to +// UserScramCredentialsDescription. +func cToDescribeUserScramCredentialsResult( + cRes *C.rd_kafka_DescribeUserScramCredentials_result_t) map[string]UserScramCredentialsDescription { + result := make(map[string]UserScramCredentialsDescription) + var cDescriptionCount C.size_t + cDescriptions := + C.rd_kafka_DescribeUserScramCredentials_result_descriptions(cRes, + &cDescriptionCount) + + for i := 0; i < int(cDescriptionCount); i++ { + cDescription := + C.DescribeUserScramCredentials_result_description_by_idx( + cDescriptions, cDescriptionCount, C.size_t(i)) + user := C.GoString(C.rd_kafka_UserScramCredentialsDescription_user(cDescription)) + userDescription := UserScramCredentialsDescription{User: user} + + // Populate the error if required. + cError := C.rd_kafka_UserScramCredentialsDescription_error(cDescription) + if C.rd_kafka_error_code(cError) != C.RD_KAFKA_RESP_ERR_NO_ERROR { + userDescription.Error = newError(C.rd_kafka_error_code(cError)) + result[user] = userDescription + continue + } + + cCredentialCount := C.rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count(cDescription) + scramCredentialInfos := make([]ScramCredentialInfo, int(cCredentialCount)) + for j := 0; j < int(cCredentialCount); j++ { + cScramCredentialInfo := + C.rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + cDescription, C.size_t(j)) + cMechanism := C.rd_kafka_ScramCredentialInfo_mechanism(cScramCredentialInfo) + cIterations := C.rd_kafka_ScramCredentialInfo_iterations(cScramCredentialInfo) + scramCredentialInfos[j] = ScramCredentialInfo{ + Mechanism: ScramMechanism(cMechanism), + Iterations: int(cIterations), + } + } + userDescription.ScramCredentialInfos = scramCredentialInfos + result[user] = userDescription + } + return result +} + +// cToListOffsetsResult converts a C +// rd_kafka_ListOffsets_result_t to a Go ListOffsetsResult +func cToListOffsetsResult(cRes *C.rd_kafka_ListOffsets_result_t) (result ListOffsetsResult) { + result = ListOffsetsResult{ResultInfos: make(map[TopicPartition]ListOffsetsResultInfo)} + var cPartitionCount C.size_t + cResultInfos := C.rd_kafka_ListOffsets_result_infos(cRes, &cPartitionCount) + for itr := 0; itr < int(cPartitionCount); itr++ { + cResultInfo := C.ListOffsetsResultInfo_by_idx(cResultInfos, cPartitionCount, C.size_t(itr)) + resultInfo := ListOffsetsResultInfo{} + cPartition := C.rd_kafka_ListOffsetsResultInfo_topic_partition(cResultInfo) + Topic := C.GoString(cPartition.topic) + Partition := TopicPartition{Topic: &Topic, Partition: int32(cPartition.partition)} + resultInfo.Offset = Offset(cPartition.offset) + resultInfo.Timestamp = int64(C.rd_kafka_ListOffsetsResultInfo_timestamp(cResultInfo)) + cLeaderEpoch := int32(C.rd_kafka_topic_partition_get_leader_epoch(cPartition)) + if cLeaderEpoch >= 0 { + resultInfo.LeaderEpoch = &cLeaderEpoch + } + resultInfo.Error = newError(cPartition.err) + result.ResultInfos[Partition] = resultInfo + } + return result +} + +// ConsumerGroupDescription converts a C rd_kafka_ConsumerGroupListing_t array +// to a Go ConsumerGroupListing slice. +func (a *AdminClient) cToConsumerGroupListings( + cGroups **C.rd_kafka_ConsumerGroupListing_t, + cGroupCount C.size_t) (result []ConsumerGroupListing) { + result = make([]ConsumerGroupListing, cGroupCount) + + for idx := 0; idx < int(cGroupCount); idx++ { + cGroup := + C.ConsumerGroupListing_by_idx(cGroups, cGroupCount, C.size_t(idx)) + state := ConsumerGroupState( + C.rd_kafka_ConsumerGroupListing_state(cGroup)) + + result[idx] = ConsumerGroupListing{ + GroupID: C.GoString( + C.rd_kafka_ConsumerGroupListing_group_id(cGroup)), + State: state, + IsSimpleConsumerGroup: cint2bool( + C.rd_kafka_ConsumerGroupListing_is_simple_consumer_group(cGroup)), + } + } + + return result +} + +// cToErrorList converts a C rd_kafka_error_t array to a Go errors slice. +func (a *AdminClient) cToErrorList( + cErrs **C.rd_kafka_error_t, cErrCount C.size_t) (errs []error) { + errs = make([]error, cErrCount) + + for idx := 0; idx < int(cErrCount); idx++ { + cErr := C.error_by_idx(cErrs, cErrCount, C.size_t(idx)) + errs[idx] = newErrorFromCError(cErr) + } + + return errs +} + +// cConfigResourceToResult converts a C ConfigResource result array to Go ConfigResourceResult +func (a *AdminClient) cConfigResourceToResult(cRes **C.rd_kafka_ConfigResource_t, cCnt C.size_t) (result []ConfigResourceResult, err error) { + result = make([]ConfigResourceResult, int(cCnt)) + + for i := 0; i < int(cCnt); i++ { + cRes := C.ConfigResource_by_idx(cRes, cCnt, C.size_t(i)) + result[i].Type = ResourceType(C.rd_kafka_ConfigResource_type(cRes)) + result[i].Name = C.GoString(C.rd_kafka_ConfigResource_name(cRes)) + result[i].Error = newErrorFromCString( + C.rd_kafka_ConfigResource_error(cRes), + C.rd_kafka_ConfigResource_error_string(cRes)) + var cConfigCnt C.size_t + cConfigs := C.rd_kafka_ConfigResource_configs(cRes, &cConfigCnt) + if cConfigCnt > 0 { + result[i].Config = make(map[string]ConfigEntryResult) + } + for ci := 0; ci < int(cConfigCnt); ci++ { + cEntry := C.ConfigEntry_by_idx(cConfigs, cConfigCnt, C.size_t(ci)) + entry := configEntryResultFromC(cEntry) + result[i].Config[entry.Name] = entry + } + } + + return result, nil +} + +// ClusterID returns the cluster ID as reported in broker metadata. +// +// Note on cancellation: Although the underlying C function respects the +// timeout, it currently cannot be manually cancelled. That means manually +// cancelling the context will block until the C function call returns. +// +// Requires broker version >= 0.10.0. +func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error) { + err = a.verifyClient() + if err != nil { + return "", err + } + + responseChan := make(chan *C.char, 1) + + go func() { + responseChan <- C.rd_kafka_clusterid(a.handle.rk, cTimeoutFromContext(ctx)) + }() + + select { + case <-ctx.Done(): + if cClusterID := <-responseChan; cClusterID != nil { + C.rd_kafka_mem_free(a.handle.rk, unsafe.Pointer(cClusterID)) + } + return "", ctx.Err() + + case cClusterID := <-responseChan: + if cClusterID == nil { // C timeout + <-ctx.Done() + return "", ctx.Err() + } + defer C.rd_kafka_mem_free(a.handle.rk, unsafe.Pointer(cClusterID)) + return C.GoString(cClusterID), nil + } +} + +// ControllerID returns the broker ID of the current controller as reported in +// broker metadata. +// +// Note on cancellation: Although the underlying C function respects the +// timeout, it currently cannot be manually cancelled. That means manually +// cancelling the context will block until the C function call returns. +// +// Requires broker version >= 0.10.0. +func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error) { + err = a.verifyClient() + if err != nil { + return -1, err + } + + responseChan := make(chan int32, 1) + + go func() { + responseChan <- int32(C.rd_kafka_controllerid(a.handle.rk, cTimeoutFromContext(ctx))) + }() + + select { + case <-ctx.Done(): + <-responseChan + return 0, ctx.Err() + + case controllerID := <-responseChan: + if controllerID < 0 { // C timeout + <-ctx.Done() + return 0, ctx.Err() + } + return controllerID, nil + } +} + +// CreateTopics creates topics in cluster. +// +// The list of TopicSpecification objects define the per-topic partition count, replicas, etc. +// +// Topic creation is non-atomic and may succeed for some topics but fail for others, +// make sure to check the result for topic-specific errors. +// +// Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cTopics := make([]*C.rd_kafka_NewTopic_t, len(topics)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go TopicSpecifications to C TopicSpecifications + for i, topic := range topics { + + var cReplicationFactor C.int + if topic.ReplicationFactor == 0 { + cReplicationFactor = -1 + } else { + cReplicationFactor = C.int(topic.ReplicationFactor) + } + if topic.ReplicaAssignment != nil { + if cReplicationFactor != -1 { + return nil, newErrorFromString(ErrInvalidArg, + "TopicSpecification.ReplicationFactor and TopicSpecification.ReplicaAssignment are mutually exclusive") + } + + if len(topic.ReplicaAssignment) != topic.NumPartitions { + return nil, newErrorFromString(ErrInvalidArg, + "TopicSpecification.ReplicaAssignment must contain exactly TopicSpecification.NumPartitions partitions") + } + } + + cTopics[i] = C.rd_kafka_NewTopic_new( + C.CString(topic.Topic), + C.int(topic.NumPartitions), + cReplicationFactor, + cErrstr, cErrstrSize) + if cTopics[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Topic %s: %s", topic.Topic, C.GoString(cErrstr))) + } + + defer C.rd_kafka_NewTopic_destroy(cTopics[i]) + + for p, replicas := range topic.ReplicaAssignment { + cReplicas := make([]C.int32_t, len(replicas)) + for ri, replica := range replicas { + cReplicas[ri] = C.int32_t(replica) + } + cErr := C.rd_kafka_NewTopic_set_replica_assignment( + cTopics[i], C.int32_t(p), + (*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)), + cErrstr, cErrstrSize) + if cErr != 0 { + return nil, newCErrorFromString(cErr, + fmt.Sprintf("Failed to set replica assignment for topic %s partition %d: %s", topic.Topic, p, C.GoString(cErrstr))) + } + } + + for key, value := range topic.Config { + cErr := C.rd_kafka_NewTopic_set_config( + cTopics[i], + C.CString(key), C.CString(value)) + if cErr != 0 { + return nil, newCErrorFromString(cErr, + fmt.Sprintf("Failed to set config %s=%s for topic %s", key, value, topic.Topic)) + } + } + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATETOPICS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreateTopics( + a.handle.rk, + (**C.rd_kafka_NewTopic_t)(&cTopics[0]), + C.size_t(len(cTopics)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATETOPICS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_CreateTopics_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cTopicRes := C.rd_kafka_CreateTopics_result_topics(cRes, &cCnt) + + return a.cToTopicResults(cTopicRes, cCnt) +} + +// DeleteTopics deletes a batch of topics. +// +// This operation is not transactional and may succeed for a subset of topics while +// failing others. +// It may take several seconds after the DeleteTopics result returns success for +// all the brokers to become aware that the topics are gone. During this time, +// topic metadata and configuration may continue to return information about deleted topics. +// +// Requires broker version >= 0.10.1.0 +func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cTopics := make([]*C.rd_kafka_DeleteTopic_t, len(topics)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go DeleteTopics to C DeleteTopics + for i, topic := range topics { + cTopics[i] = C.rd_kafka_DeleteTopic_new(C.CString(topic)) + if cTopics[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for topic %s", topic)) + } + + defer C.rd_kafka_DeleteTopic_destroy(cTopics[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETETOPICS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteTopics( + a.handle.rk, + (**C.rd_kafka_DeleteTopic_t)(&cTopics[0]), + C.size_t(len(cTopics)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETETOPICS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DeleteTopics_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cTopicRes := C.rd_kafka_DeleteTopics_result_topics(cRes, &cCnt) + + return a.cToTopicResults(cTopicRes, cCnt) +} + +// CreatePartitions creates additional partitions for topics. +func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cParts := make([]*C.rd_kafka_NewPartitions_t, len(partitions)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go PartitionsSpecification to C NewPartitions + for i, part := range partitions { + cParts[i] = C.rd_kafka_NewPartitions_new(C.CString(part.Topic), C.size_t(part.IncreaseTo), cErrstr, cErrstrSize) + if cParts[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Topic %s: %s", part.Topic, C.GoString(cErrstr))) + } + + defer C.rd_kafka_NewPartitions_destroy(cParts[i]) + + for pidx, replicas := range part.ReplicaAssignment { + cReplicas := make([]C.int32_t, len(replicas)) + for ri, replica := range replicas { + cReplicas[ri] = C.int32_t(replica) + } + cErr := C.rd_kafka_NewPartitions_set_replica_assignment( + cParts[i], C.int32_t(pidx), + (*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)), + cErrstr, cErrstrSize) + if cErr != 0 { + return nil, newCErrorFromString(cErr, + fmt.Sprintf("Failed to set replica assignment for topic %s new partition index %d: %s", part.Topic, pidx, C.GoString(cErrstr))) + } + } + + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreatePartitions( + a.handle.rk, + (**C.rd_kafka_NewPartitions_t)(&cParts[0]), + C.size_t(len(cParts)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_CreatePartitions_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cTopicRes := C.rd_kafka_CreatePartitions_result_topics(cRes, &cCnt) + + return a.cToTopicResults(cTopicRes, cCnt) +} + +// AlterConfigs alters/updates cluster resource configuration. +// +// Updates are not transactional so they may succeed for a subset +// of the provided resources while others fail. +// The configuration for a particular resource is updated atomically, +// replacing values using the provided ConfigEntrys and reverting +// unspecified ConfigEntrys to their default values. +// +// Requires broker version >=0.11.0.0 +// +// AlterConfigs will replace all existing configuration for +// the provided resources with the new configuration given, +// reverting all other configuration to their default values. +// +// Multiple resources and resource types may be set, but at most one +// resource of type ResourceBroker is allowed per call since these +// resource requests must be sent to the broker specified in the resource. +// Deprecated: AlterConfigs is deprecated in favour of IncrementalAlterConfigs +func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go ConfigResources to C ConfigResources + for i, res := range resources { + cRes[i] = C.rd_kafka_ConfigResource_new( + C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name)) + if cRes[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for resource %v", res)) + } + + defer C.rd_kafka_ConfigResource_destroy(cRes[i]) + + for _, entry := range res.Config { + var cErr C.rd_kafka_resp_err_t + switch entry.Operation { + case AlterOperationSet: + cErr = C.rd_kafka_ConfigResource_set_config( + cRes[i], C.CString(entry.Name), C.CString(entry.Value)) + default: + panic(fmt.Sprintf("Invalid ConfigEntry.Operation: %v", entry.Operation)) + } + + if cErr != 0 { + return nil, + newCErrorFromString(cErr, + fmt.Sprintf("Failed to add configuration %s: %s", + entry, C.GoString(C.rd_kafka_err2str(cErr)))) + } + } + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_ALTERCONFIGS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_AlterConfigs( + a.handle.rk, + (**C.rd_kafka_ConfigResource_t)(&cRes[0]), + C.size_t(len(cRes)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cResult := C.rd_kafka_event_AlterConfigs_result(rkev) + + // Convert results from C to Go + var cCnt C.size_t + cResults := C.rd_kafka_AlterConfigs_result_resources(cResult, &cCnt) + + return a.cConfigResourceToResult(cResults, cCnt) +} + +// IncrementalAlterConfigs alters/updates cluster resource configuration. +// +// Updates are not transactional so they may succeed for some resources +// while fail for others. The configs for a particular resource are +// updated atomically, executing the corresponding incremental +// operations on the provided configurations. +// +// Requires broker version >=2.3.0 +// +// IncrementalAlterConfigs will only change configurations for provided +// resources with the new configuration given. +// +// Multiple resources and resource types may be set, but at most one +// resource of type ResourceBroker is allowed per call since these +// resource requests must be sent to the broker specified in the resource. +func (a *AdminClient) IncrementalAlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go ConfigResources to C ConfigResources + for i, res := range resources { + cName := C.CString(res.Name) + defer C.free(unsafe.Pointer(cName)) + cRes[i] = C.rd_kafka_ConfigResource_new( + C.rd_kafka_ResourceType_t(res.Type), cName) + if cRes[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for resource %v", res)) + } + + defer C.rd_kafka_ConfigResource_destroy(cRes[i]) + + for _, entry := range res.Config { + cName := C.CString(entry.Name) + defer C.free(unsafe.Pointer(cName)) + cValue := C.CString(entry.Value) + defer C.free(unsafe.Pointer(cValue)) + cError := C.rd_kafka_ConfigResource_add_incremental_config( + cRes[i], cName, + C.rd_kafka_AlterConfigOpType_t(entry.IncrementalOperation), + cValue) + + if cError != nil { + err := newErrorFromCErrorDestroy(cError) + return nil, err + } + } + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_IncrementalAlterConfigs( + a.handle.rk, + (**C.rd_kafka_ConfigResource_t)(&cRes[0]), + C.size_t(len(cRes)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cResult := C.rd_kafka_event_IncrementalAlterConfigs_result(rkev) + + // Convert results from C to Go + var cCnt C.size_t + cResults := C.rd_kafka_IncrementalAlterConfigs_result_resources(cResult, &cCnt) + + return a.cConfigResourceToResult(cResults, cCnt) +} + +// DescribeConfigs retrieves configuration for cluster resources. +// +// The returned configuration includes default values, use +// ConfigEntryResult.IsDefault or ConfigEntryResult.Source to distinguish +// default values from manually configured settings. +// +// The value of config entries where .IsSensitive is true +// will always be nil to avoid disclosing sensitive +// information, such as security settings. +// +// Configuration entries where .IsReadOnly is true can't be modified +// (with AlterConfigs). +// +// Synonym configuration entries are returned if the broker supports +// it (broker version >= 1.1.0). See .Synonyms. +// +// Requires broker version >=0.11.0.0 +// +// Multiple resources and resource types may be requested, but at most +// one resource of type ResourceBroker is allowed per call +// since these resource requests must be sent to the broker specified +// in the resource. +func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go ConfigResources to C ConfigResources + for i, res := range resources { + cRes[i] = C.rd_kafka_ConfigResource_new( + C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name)) + if cRes[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for resource %v", res)) + } + + defer C.rd_kafka_ConfigResource_destroy(cRes[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DescribeConfigs( + a.handle.rk, + (**C.rd_kafka_ConfigResource_t)(&cRes[0]), + C.size_t(len(cRes)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cResult := C.rd_kafka_event_DescribeConfigs_result(rkev) + + // Convert results from C to Go + var cCnt C.size_t + cResults := C.rd_kafka_DescribeConfigs_result_resources(cResult, &cCnt) + + return a.cConfigResourceToResult(cResults, cCnt) +} + +// GetMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + err := a.verifyClient() + if err != nil { + return nil, err + } + return getMetadata(a, topic, allTopics, timeoutMs) +} + +// String returns a human readable name for an AdminClient instance +func (a *AdminClient) String() string { + return fmt.Sprintf("admin-%s", a.handle.String()) +} + +// get_handle implements the Handle interface +func (a *AdminClient) gethandle() *handle { + return a.handle +} + +// SetOAuthBearerToken sets the the data to be transmitted +// to a broker during SASL/OAUTHBEARER authentication. It will return nil +// on success, otherwise an error if: +// 1) the token data is invalid (meaning an expiration time in the past +// or either a token value or an extension key or value that does not meet +// the regular expression requirements as per +// https://tools.ietf.org/html/rfc7628#section-3.1); +// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 3) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { + err := a.verifyClient() + if err != nil { + return err + } + return a.handle.setOAuthBearerToken(oauthBearerToken) +} + +// SetOAuthBearerTokenFailure sets the error message describing why token +// retrieval/setting failed; it also schedules a new token refresh event for 10 +// seconds later so the attempt may be retried. It will return nil on +// success, otherwise an error if: +// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 2) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error { + err := a.verifyClient() + if err != nil { + return err + } + return a.handle.setOAuthBearerTokenFailure(errstr) +} + +// aclBindingToC converts a Go ACLBinding struct to a C rd_kafka_AclBinding_t +func (a *AdminClient) aclBindingToC(aclBinding *ACLBinding, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBinding_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBinding.Name) > 0 { + cName = C.CString(aclBinding.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBinding.Principal) > 0 { + cPrincipal = C.CString(aclBinding.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBinding.Host) > 0 { + cHost = C.CString(aclBinding.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBinding_new( + C.rd_kafka_ResourceType_t(aclBinding.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBinding.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBinding.Operation), + C.rd_kafka_AclPermissionType_t(aclBinding.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding %v: %v", aclBinding, C.GoString(cErrstr))) + } + return +} + +// aclBindingFilterToC converts a Go ACLBindingFilter struct to a C rd_kafka_AclBindingFilter_t +func (a *AdminClient) aclBindingFilterToC(aclBindingFilter *ACLBindingFilter, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBindingFilter_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBindingFilter.Name) > 0 { + cName = C.CString(aclBindingFilter.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBindingFilter.Principal) > 0 { + cPrincipal = C.CString(aclBindingFilter.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBindingFilter.Host) > 0 { + cHost = C.CString(aclBindingFilter.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBindingFilter_new( + C.rd_kafka_ResourceType_t(aclBindingFilter.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBindingFilter.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBindingFilter.Operation), + C.rd_kafka_AclPermissionType_t(aclBindingFilter.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding filter %v: %v", aclBindingFilter, C.GoString(cErrstr))) + } + return +} + +// cToACLBinding converts a C rd_kafka_AclBinding_t to Go ACLBinding +func (a *AdminClient) cToACLBinding(cACLBinding *C.rd_kafka_AclBinding_t) ACLBinding { + return ACLBinding{ + ResourceType(C.rd_kafka_AclBinding_restype(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_name(cACLBinding)), + ResourcePatternType(C.rd_kafka_AclBinding_resource_pattern_type(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_principal(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_host(cACLBinding)), + ACLOperation(C.rd_kafka_AclBinding_operation(cACLBinding)), + ACLPermissionType(C.rd_kafka_AclBinding_permission_type(cACLBinding)), + } +} + +// cToACLBindings converts a C rd_kafka_AclBinding_t list to Go ACLBindings +func (a *AdminClient) cToACLBindings(cACLBindings **C.rd_kafka_AclBinding_t, aclCnt C.size_t) (result ACLBindings) { + result = make(ACLBindings, aclCnt) + for i := uint(0); i < uint(aclCnt); i++ { + cACLBinding := C.AclBinding_by_idx(cACLBindings, aclCnt, C.size_t(i)) + if cACLBinding == nil { + panic("AclBinding_by_idx must not return nil") + } + result[i] = a.cToACLBinding(cACLBinding) + } + return +} + +// cToCreateACLResults converts a C acl_result_t array to Go CreateACLResult list. +func (a *AdminClient) cToCreateACLResults(cCreateAclsRes **C.rd_kafka_acl_result_t, aclCnt C.size_t) (result []CreateACLResult, err error) { + result = make([]CreateACLResult, uint(aclCnt)) + + for i := uint(0); i < uint(aclCnt); i++ { + cCreateACLRes := C.acl_result_by_idx(cCreateAclsRes, aclCnt, C.size_t(i)) + if cCreateACLRes != nil { + cCreateACLError := C.rd_kafka_acl_result_error(cCreateACLRes) + result[i].Error = newErrorFromCError(cCreateACLError) + } + } + + return result, nil +} + +// cToDescribeACLsResult converts a C rd_kafka_event_t to a Go DescribeAclsResult struct. +func (a *AdminClient) cToDescribeACLsResult(rkev *C.rd_kafka_event_t) (result *DescribeACLsResult) { + result = &DescribeACLsResult{} + err := C.rd_kafka_event_error(rkev) + errCode := ErrorCode(err) + errStr := C.rd_kafka_event_error_string(rkev) + + var cResultACLsCount C.size_t + cResult := C.rd_kafka_event_DescribeAcls_result(rkev) + cResultACLs := C.rd_kafka_DescribeAcls_result_acls(cResult, &cResultACLsCount) + if errCode != ErrNoError { + result.Error = newErrorFromCString(err, errStr) + } + result.ACLBindings = a.cToACLBindings(cResultACLs, cResultACLsCount) + return +} + +// cToDeleteACLsResults converts a C rd_kafka_DeleteAcls_result_response_t array to Go DeleteAclsResult slice. +func (a *AdminClient) cToDeleteACLsResults(cDeleteACLsResResponse **C.rd_kafka_DeleteAcls_result_response_t, resResponseCnt C.size_t) (result []DeleteACLsResult) { + result = make([]DeleteACLsResult, uint(resResponseCnt)) + + for i := uint(0); i < uint(resResponseCnt); i++ { + cDeleteACLsResResponse := C.DeleteAcls_result_response_by_idx(cDeleteACLsResResponse, resResponseCnt, C.size_t(i)) + if cDeleteACLsResResponse == nil { + panic("DeleteAcls_result_response_by_idx must not return nil") + } + + cDeleteACLsError := C.rd_kafka_DeleteAcls_result_response_error(cDeleteACLsResResponse) + result[i].Error = newErrorFromCError(cDeleteACLsError) + + var cMatchingACLsCount C.size_t + cMatchingACLs := C.rd_kafka_DeleteAcls_result_response_matching_acls( + cDeleteACLsResResponse, &cMatchingACLsCount) + + result[i].ACLBindings = a.cToACLBindings(cMatchingACLs, cMatchingACLsCount) + } + return +} + +// CreateACLs creates one or more ACL bindings. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `aclBindings` - A slice of ACL binding specifications to create. +// - `options` - Create ACLs options +// +// Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +// plus an error that is not nil for client level errors +func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + if aclBindings == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBinding structs") + } + if len(aclBindings) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBinding structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindings := make([]*C.rd_kafka_AclBinding_t, len(aclBindings)) + + for i, aclBinding := range aclBindings { + cACLBindings[i], err = a.aclBindingToC(&aclBinding, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindings[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEACLS, genericOptions) + if err != nil { + return nil, err + } + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreateAcls( + a.handle.rk, + (**C.rd_kafka_AclBinding_t)(&cACLBindings[0]), + C.size_t(len(cACLBindings)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultCnt C.size_t + cResult := C.rd_kafka_event_CreateAcls_result(rkev) + aclResults := C.rd_kafka_CreateAcls_result_acls(cResult, &cResultCnt) + result, err = a.cToCreateACLResults(aclResults, cResultCnt) + return +} + +// DescribeACLs matches ACL bindings by filter. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `aclBindingFilter` - A filter with attributes that must match. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with: +// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +// - `options` - Describe ACLs options +// +// Returns a slice of ACLBindings when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilter, err := a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DescribeAcls( + a.handle.rk, + cACLBindingFilter, + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + result = a.cToDescribeACLsResult(rkev) + return +} + +// DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with: +// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +// - `options` - Delete ACLs options +// +// Returns a slice of ACLBinding for each filter when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + if aclBindingFilters == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBindingFilter structs") + } + if len(aclBindingFilters) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBindingFilter structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilters := make([]*C.rd_kafka_AclBindingFilter_t, len(aclBindingFilters)) + + for i, aclBindingFilter := range aclBindingFilters { + cACLBindingFilters[i], err = a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindingFilters[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteAcls( + a.handle.rk, + (**C.rd_kafka_AclBindingFilter_t)(&cACLBindingFilters[0]), + C.size_t(len(cACLBindingFilters)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultResponsesCount C.size_t + cResult := C.rd_kafka_event_DeleteAcls_result(rkev) + cResultResponses := C.rd_kafka_DeleteAcls_result_responses(cResult, &cResultResponsesCount) + result = a.cToDeleteACLsResults(cResultResponses, cResultResponsesCount) + return +} + +// SetSaslCredentials sets the SASL credentials used for this admin client. +// The new credentials will overwrite the old ones (which were set when creating +// the admin client or by a previous call to SetSaslCredentials). The new +// credentials will be used the next time the admin client needs to authenticate +// to a broker. This method will not disconnect existing broker connections that +// were established with the old credentials. +// This method applies only to the SASL PLAIN and SCRAM mechanisms. +func (a *AdminClient) SetSaslCredentials(username, password string) error { + err := a.verifyClient() + if err != nil { + return err + } + + return setSaslCredentials(a.handle.rk, username, password) +} + +// Close an AdminClient instance. +func (a *AdminClient) Close() { + if !atomic.CompareAndSwapUint32(&a.isClosed, 0, 1) { + return + } + if a.isDerived { + // Derived AdminClient needs no cleanup. + a.handle = &handle{} + return + } + + a.handle.cleanup() + + C.rd_kafka_destroy(a.handle.rk) +} + +// ListConsumerGroups lists the consumer groups available in the cluster. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `options` - ListConsumerGroupsAdminOption options. +// +// Returns a ListConsumerGroupsResult, which contains a slice corresponding to +// each group in the cluster and a slice of errors encountered while listing. +// Additionally, an error that is not nil for client-level errors is returned. +// Both the returned error, and the errors slice should be checked. +func (a *AdminClient) ListConsumerGroups( + ctx context.Context, + options ...ListConsumerGroupsAdminOption) (result ListConsumerGroupsResult, err error) { + + result = ListConsumerGroupsResult{} + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, + C.RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_ListConsumerGroups (asynchronous). + C.rd_kafka_ListConsumerGroups( + a.handle.rk, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_ListConsumerGroups_result(rkev) + + // Convert result and broker errors from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_ListConsumerGroups_result_valid(cRes, &cGroupCount) + result.Valid = a.cToConsumerGroupListings(cGroups, cGroupCount) + + var cErrsCount C.size_t + cErrs := C.rd_kafka_ListConsumerGroups_result_errors(cRes, &cErrsCount) + if cErrsCount == 0 { + return result, nil + } + + result.Errors = a.cToErrorList(cErrs, cErrsCount) + return result, nil +} + +// DescribeConsumerGroups describes groups from cluster as specified by the +// groups list. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `groups` - Slice of groups to describe. This should not be nil/empty. +// - `options` - DescribeConsumerGroupsAdminOption options. +// +// Returns DescribeConsumerGroupsResult, which contains a slice of +// ConsumerGroupDescriptions corresponding to the input groups, plus an error +// that is not `nil` for client level errors. Individual +// ConsumerGroupDescriptions inside the slice should also be checked for +// errors. +func (a *AdminClient) DescribeConsumerGroups( + ctx context.Context, groups []string, + options ...DescribeConsumerGroupsAdminOption) (result DescribeConsumerGroupsResult, err error) { + + describeResult := DescribeConsumerGroupsResult{} + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert group names into char** required by the implementation. + cGroupNameList := make([]*C.char, len(groups)) + cGroupNameCount := C.size_t(len(groups)) + + for idx, group := range groups { + cGroupNameList[idx] = C.CString(group) + defer C.free(unsafe.Pointer(cGroupNameList[idx])) + } + + var cGroupNameListPtr **C.char + if cGroupNameCount > 0 { + cGroupNameListPtr = ((**C.char)(&cGroupNameList[0])) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, genericOptions) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeConsumerGroups (asynchronous). + C.rd_kafka_DescribeConsumerGroups( + a.handle.rk, + cGroupNameListPtr, + cGroupNameCount, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeConsumerGroups_result(rkev) + + // Convert result from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_DescribeConsumerGroups_result_groups(cRes, &cGroupCount) + describeResult.ConsumerGroupDescriptions = a.cToConsumerGroupDescriptions(cGroups, cGroupCount) + + return describeResult, nil +} + +// DescribeTopics describes topics from cluster as specified by the +// topics list. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `topics` - Collection of topics to describe. This should not have nil +// topic names. +// - `options` - DescribeTopicsAdminOption options. +// +// Returns DescribeTopicsResult, which contains a slice of +// TopicDescriptions corresponding to the input topics, plus an error +// that is not `nil` for client level errors. Individual +// TopicDescriptions inside the slice should also be checked for +// errors. Individual TopicDescriptions also have a +// slice of allowed ACLOperations. +func (a *AdminClient) DescribeTopics( + ctx context.Context, topics TopicCollection, + options ...DescribeTopicsAdminOption) (result DescribeTopicsResult, err error) { + + describeResult := DescribeTopicsResult{} + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert topic names into char**. + cTopicNameList := make([]*C.char, len(topics.topicNames)) + cTopicNameCount := C.size_t(len(topics.topicNames)) + + if topics.topicNames == nil { + return describeResult, newErrorFromString(ErrInvalidArg, + "TopicCollection of topic names cannot be nil") + } + + for idx, topic := range topics.topicNames { + cTopicNameList[idx] = C.CString(topic) + defer C.free(unsafe.Pointer(cTopicNameList[idx])) + } + + var cTopicNameListPtr **C.char + if cTopicNameCount > 0 { + cTopicNameListPtr = ((**C.char)(&cTopicNameList[0])) + } + + // Convert char** of topic names into rd_kafka_TopicCollection_t* + cTopicCollection := C.rd_kafka_TopicCollection_of_topic_names( + cTopicNameListPtr, cTopicNameCount) + defer C.rd_kafka_TopicCollection_destroy(cTopicCollection) + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBETOPICS, genericOptions) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeTopics (asynchronous). + C.rd_kafka_DescribeTopics( + a.handle.rk, + cTopicCollection, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeTopics_result(rkev) + + // Convert result from C to Go. + var cTopicDescriptionCount C.size_t + cTopicDescriptions := + C.rd_kafka_DescribeTopics_result_topics(cRes, &cTopicDescriptionCount) + describeResult.TopicDescriptions = + a.cToTopicDescriptions(cTopicDescriptions, cTopicDescriptionCount) + + return describeResult, nil +} + +// DescribeCluster describes the cluster +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `options` - DescribeClusterAdminOption options. +// +// Returns ClusterDescription, which contains current cluster ID and controller +// along with a slice of Nodes. It also has a slice of allowed ACLOperations. +func (a *AdminClient) DescribeCluster( + ctx context.Context, + options ...DescribeClusterAdminOption) (result DescribeClusterResult, err error) { + err = a.verifyClient() + if err != nil { + return result, err + } + clusterDesc := DescribeClusterResult{} + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER, genericOptions) + if err != nil { + return clusterDesc, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeCluster (asynchronous). + C.rd_kafka_DescribeCluster( + a.handle.rk, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT) + if err != nil { + return clusterDesc, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeCluster_result(rkev) + + // Convert result from C to Go. + clusterDesc = a.cToDescribeClusterResult(cRes) + + return clusterDesc, nil +} + +// DeleteConsumerGroups deletes a batch of consumer groups. +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `groups` - A slice of groupIDs to delete. +// - `options` - DeleteConsumerGroupsAdminOption options. +// +// Returns a DeleteConsumerGroupsResult containing a slice of ConsumerGroupResult, with +// group-level errors, (if any) contained inside; and an error that is not nil +// for client level errors. +func (a *AdminClient) DeleteConsumerGroups( + ctx context.Context, + groups []string, options ...DeleteConsumerGroupsAdminOption) (result DeleteConsumerGroupsResult, err error) { + cGroups := make([]*C.rd_kafka_DeleteGroup_t, len(groups)) + deleteResult := DeleteConsumerGroupsResult{} + err = a.verifyClient() + if err != nil { + return deleteResult, err + } + + // Convert Go DeleteGroups to C DeleteGroups + for i, group := range groups { + cGroupID := C.CString(group) + defer C.free(unsafe.Pointer(cGroupID)) + + cGroups[i] = C.rd_kafka_DeleteGroup_new(cGroupID) + if cGroups[i] == nil { + return deleteResult, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for group %s", group)) + } + + defer C.rd_kafka_DeleteGroup_destroy(cGroups[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DELETEGROUPS, genericOptions) + if err != nil { + return deleteResult, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteGroups( + a.handle.rk, + (**C.rd_kafka_DeleteGroup_t)(&cGroups[0]), + C.size_t(len(cGroups)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEGROUPS_RESULT) + if err != nil { + return deleteResult, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DeleteGroups_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cGroupRes := C.rd_kafka_DeleteGroups_result_groups(cRes, &cCnt) + + deleteResult.ConsumerGroupResults, err = a.cToConsumerGroupResults(cGroupRes, cCnt) + return deleteResult, err +} + +// ListConsumerGroupOffsets fetches the offsets for topic partition(s) for +// consumer group(s). +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of which +// has the id of a consumer group, and a slice of the TopicPartitions we +// need to fetch the offsets for. The slice of TopicPartitions can be nil, to fetch +// all topic partitions for that group. +// Currently, the size of `groupsPartitions` has to be exactly one. +// - `options` - ListConsumerGroupOffsetsAdminOption options. +// +// Returns a ListConsumerGroupOffsetsResult, containing a slice of +// ConsumerGroupTopicPartitions corresponding to the input slice, plus an error that is +// not `nil` for client level errors. Individual TopicPartitions inside each of +// the ConsumerGroupTopicPartitions should also be checked for errors. +func (a *AdminClient) ListConsumerGroupOffsets( + ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, + options ...ListConsumerGroupOffsetsAdminOption) (lcgor ListConsumerGroupOffsetsResult, err error) { + err = a.verifyClient() + if err != nil { + return lcgor, err + } + + lcgor.ConsumerGroupsTopicPartitions = nil + + // For now, we only support one group at a time given as a single element of + // groupsPartitions. + // Code has been written so that only this if-guard needs to be removed when + // we add support for multiple ConsumerGroupTopicPartitions. + if len(groupsPartitions) != 1 { + return lcgor, fmt.Errorf( + "expected length of groupsPartitions is 1, got %d", len(groupsPartitions)) + } + + cGroupsPartitions := make([]*C.rd_kafka_ListConsumerGroupOffsets_t, + len(groupsPartitions)) + + // Convert Go ConsumerGroupTopicPartitions to C ListConsumerGroupOffsets. + for i, groupPartitions := range groupsPartitions { + // We need to destroy this list because rd_kafka_ListConsumerGroupOffsets_new + // creates a copy of it. + var cPartitions *C.rd_kafka_topic_partition_list_t = nil + + if groupPartitions.Partitions != nil { + cPartitions = newCPartsFromTopicPartitions(groupPartitions.Partitions) + defer C.rd_kafka_topic_partition_list_destroy(cPartitions) + } + + cGroupID := C.CString(groupPartitions.Group) + defer C.free(unsafe.Pointer(cGroupID)) + + cGroupsPartitions[i] = + C.rd_kafka_ListConsumerGroupOffsets_new(cGroupID, cPartitions) + defer C.rd_kafka_ListConsumerGroupOffsets_destroy(cGroupsPartitions[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, genericOptions) + if err != nil { + return lcgor, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_ListConsumerGroupOffsets (asynchronous). + C.rd_kafka_ListConsumerGroupOffsets( + a.handle.rk, + (**C.rd_kafka_ListConsumerGroupOffsets_t)(&cGroupsPartitions[0]), + C.size_t(len(cGroupsPartitions)), + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT) + if err != nil { + return lcgor, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_ListConsumerGroupOffsets_result(rkev) + + // Convert result from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_ListConsumerGroupOffsets_result_groups(cRes, &cGroupCount) + lcgor.ConsumerGroupsTopicPartitions = a.cToConsumerGroupTopicPartitions(cGroups, cGroupCount) + + return lcgor, nil +} + +// AlterConsumerGroupOffsets alters the offsets for topic partition(s) for +// consumer group(s). +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of +// which has the id of a consumer group, and a slice of the TopicPartitions +// we need to alter the offsets for. Currently, the size of +// `groupsPartitions` has to be exactly one. +// - `options` - AlterConsumerGroupOffsetsAdminOption options. +// +// Returns a AlterConsumerGroupOffsetsResult, containing a slice of +// ConsumerGroupTopicPartitions corresponding to the input slice, plus an error +// that is not `nil` for client level errors. Individual TopicPartitions inside +// each of the ConsumerGroupTopicPartitions should also be checked for errors. +// This will succeed at the partition level only if the group is not actively +// subscribed to the corresponding topic(s). +func (a *AdminClient) AlterConsumerGroupOffsets( + ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, + options ...AlterConsumerGroupOffsetsAdminOption) (acgor AlterConsumerGroupOffsetsResult, err error) { + err = a.verifyClient() + if err != nil { + return acgor, err + } + + acgor.ConsumerGroupsTopicPartitions = nil + + // For now, we only support one group at a time given as a single element of groupsPartitions. + // Code has been written so that only this if-guard needs to be removed when we add support for + // multiple ConsumerGroupTopicPartitions. + if len(groupsPartitions) != 1 { + return acgor, fmt.Errorf( + "expected length of groupsPartitions is 1, got %d", + len(groupsPartitions)) + } + + cGroupsPartitions := make( + []*C.rd_kafka_AlterConsumerGroupOffsets_t, len(groupsPartitions)) + + // Convert Go ConsumerGroupTopicPartitions to C AlterConsumerGroupOffsets. + for idx, groupPartitions := range groupsPartitions { + // We need to destroy this list because rd_kafka_AlterConsumerGroupOffsets_new + // creates a copy of it. + cPartitions := newCPartsFromTopicPartitions(groupPartitions.Partitions) + + cGroupID := C.CString(groupPartitions.Group) + defer C.free(unsafe.Pointer(cGroupID)) + + cGroupsPartitions[idx] = + C.rd_kafka_AlterConsumerGroupOffsets_new(cGroupID, cPartitions) + defer C.rd_kafka_AlterConsumerGroupOffsets_destroy(cGroupsPartitions[idx]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, genericOptions) + if err != nil { + return acgor, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_AlterConsumerGroupOffsets (asynchronous). + C.rd_kafka_AlterConsumerGroupOffsets( + a.handle.rk, + (**C.rd_kafka_AlterConsumerGroupOffsets_t)(&cGroupsPartitions[0]), + C.size_t(len(cGroupsPartitions)), + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + if err != nil { + return acgor, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_AlterConsumerGroupOffsets_result(rkev) + + // Convert result from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_AlterConsumerGroupOffsets_result_groups(cRes, &cGroupCount) + acgor.ConsumerGroupsTopicPartitions = a.cToConsumerGroupTopicPartitions(cGroups, cGroupCount) + + return acgor, nil +} + +// DescribeUserScramCredentials describe SASL/SCRAM credentials for the +// specified user names. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `users` - a slice of string, each one correspond to a user name, no +// duplicates are allowed +// - `options` - DescribeUserScramCredentialsAdminOption options. +// +// Returns a map from user name to user SCRAM credentials description. +// Each description can have an individual error. +func (a *AdminClient) DescribeUserScramCredentials( + ctx context.Context, users []string, + options ...DescribeUserScramCredentialsAdminOption) (result DescribeUserScramCredentialsResult, err error) { + result = DescribeUserScramCredentialsResult{ + Descriptions: make(map[string]UserScramCredentialsDescription), + } + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert user names into char** required by the implementation. + cUserList := make([]*C.char, len(users)) + cUserCount := C.size_t(len(users)) + + for idx, user := range users { + cUserList[idx] = C.CString(user) + defer C.free(unsafe.Pointer(cUserList[idx])) + } + + var cUserListPtr **C.char + if cUserCount > 0 { + cUserListPtr = ((**C.char)(&cUserList[0])) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, + C.RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeConsumerGroups (asynchronous). + C.rd_kafka_DescribeUserScramCredentials( + a.handle.rk, + cUserListPtr, + cUserCount, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeUserScramCredentials_result(rkev) + + // Convert result from C to Go. + result.Descriptions = cToDescribeUserScramCredentialsResult(cRes) + return result, nil +} + +// ListOffsets describe offsets for the +// specified TopicPartiton based on an OffsetSpec. +// +// Parameters: +// +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `topicPartitionOffsets` - a map from TopicPartition to OffsetSpec, it +// holds either the OffsetSpec enum value or timestamp. Must not be nil. +// - `options` - ListOffsetsAdminOption options. +// +// Returns a ListOffsetsResult. +// Each TopicPartition's ListOffset can have an individual error. +func (a *AdminClient) ListOffsets( + ctx context.Context, topicPartitionOffsets map[TopicPartition]OffsetSpec, + options ...ListOffsetsAdminOption) (result ListOffsetsResult, err error) { + if topicPartitionOffsets == nil { + return result, newErrorFromString(ErrInvalidArg, "expected topicPartitionOffsets parameter.") + } + + topicPartitions := C.rd_kafka_topic_partition_list_new(C.int(len(topicPartitionOffsets))) + defer C.rd_kafka_topic_partition_list_destroy(topicPartitions) + + for tp, offsetValue := range topicPartitionOffsets { + cStr := C.CString(*tp.Topic) + defer C.free(unsafe.Pointer(cStr)) + topicPartition := C.rd_kafka_topic_partition_list_add(topicPartitions, cStr, C.int32_t(tp.Partition)) + topicPartition.offset = C.int64_t(offsetValue) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_LISTOFFSETS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_ListOffsets (asynchronous). + C.rd_kafka_ListOffsets( + a.handle.rk, + topicPartitions, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_LISTOFFSETS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_ListOffsets_result(rkev) + + // Convert result from C to Go. + result = cToListOffsetsResult(cRes) + + return result, nil +} + +// AlterUserScramCredentials alters SASL/SCRAM credentials. +// The pair (user, mechanism) must be unique among upsertions and deletions. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `upsertions` - a slice of user credential upsertions +// - `deletions` - a slice of user credential deletions +// - `options` - AlterUserScramCredentialsAdminOption options. +// +// Returns a map from user name to the corresponding Error, with error code +// ErrNoError when the request succeeded. +func (a *AdminClient) AlterUserScramCredentials( + ctx context.Context, upsertions []UserScramCredentialUpsertion, deletions []UserScramCredentialDeletion, + options ...AlterUserScramCredentialsAdminOption) (result AlterUserScramCredentialsResult, err error) { + result = AlterUserScramCredentialsResult{ + Errors: make(map[string]Error), + } + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert user names into char** required by the implementation. + cAlterationList := make([]*C.rd_kafka_UserScramCredentialAlteration_t, len(upsertions)+len(deletions)) + cAlterationCount := C.size_t(len(upsertions) + len(deletions)) + idx := 0 + + for _, upsertion := range upsertions { + user := C.CString(upsertion.User) + defer C.free(unsafe.Pointer(user)) + + var salt *C.uchar = nil + var saltSize C.size_t = 0 + if upsertion.Salt != nil { + salt = (*C.uchar)(&upsertion.Salt[0]) + saltSize = C.size_t(len(upsertion.Salt)) + } + + cAlterationList[idx] = C.rd_kafka_UserScramCredentialUpsertion_new(user, + C.rd_kafka_ScramMechanism_t(upsertion.ScramCredentialInfo.Mechanism), + C.int(upsertion.ScramCredentialInfo.Iterations), + (*C.uchar)(&upsertion.Password[0]), C.size_t(len(upsertion.Password)), + salt, saltSize) + defer C.rd_kafka_UserScramCredentialAlteration_destroy(cAlterationList[idx]) + idx = idx + 1 + } + + for _, deletion := range deletions { + user := C.CString(deletion.User) + defer C.free(unsafe.Pointer(user)) + cAlterationList[idx] = C.rd_kafka_UserScramCredentialDeletion_new( + user, C.rd_kafka_ScramMechanism_t(deletion.Mechanism)) + defer C.rd_kafka_UserScramCredentialAlteration_destroy(cAlterationList[idx]) + idx = idx + 1 + } + + var cAlterationListPtr **C.rd_kafka_UserScramCredentialAlteration_t + if cAlterationCount > 0 { + cAlterationListPtr = ((**C.rd_kafka_UserScramCredentialAlteration_t)(&cAlterationList[0])) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_AlterUserScramCredentials (asynchronous). + C.rd_kafka_AlterUserScramCredentials( + a.handle.rk, + cAlterationListPtr, + cAlterationCount, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_AlterUserScramCredentials_result(rkev) + + // Convert result from C to Go. + var cResponseSize C.size_t + cResponses := C.rd_kafka_AlterUserScramCredentials_result_responses(cRes, &cResponseSize) + for i := 0; i < int(cResponseSize); i++ { + cResponse := C.AlterUserScramCredentials_result_response_by_idx( + cResponses, cResponseSize, C.size_t(i)) + user := C.GoString(C.rd_kafka_AlterUserScramCredentials_result_response_user(cResponse)) + err := newErrorFromCError(C.rd_kafka_AlterUserScramCredentials_result_response_error(cResponse)) + result.Errors[user] = err + } + + return result, nil +} + +// NewAdminClient creats a new AdminClient instance with a new underlying client instance +func NewAdminClient(conf *ConfigMap) (*AdminClient, error) { + + err := versionCheck() + if err != nil { + return nil, err + } + + a := &AdminClient{} + a.handle = &handle{} + + // Convert ConfigMap to librdkafka conf_t + cConf, err := conf.convert() + if err != nil { + return nil, err + } + + cErrstr := (*C.char)(C.malloc(C.size_t(256))) + defer C.free(unsafe.Pointer(cErrstr)) + + C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) + + // Create librdkafka producer instance. The Producer is somewhat cheaper than + // the consumer, but any instance type can be used for Admin APIs. + a.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) + if a.handle.rk == nil { + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + a.isDerived = false + a.handle.setup() + + a.isClosed = 0 + + return a, nil +} + +// NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. +// The AdminClient will use the same configuration and connections as the parent instance. +func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error) { + if p.handle.rk == nil { + return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed producer") + } + + a = &AdminClient{} + a.handle = &p.handle + a.isDerived = true + a.isClosed = 0 + return a, nil +} + +// NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. +// The AdminClient will use the same configuration and connections as the parent instance. +func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error) { + if c.handle.rk == nil { + return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed consumer") + } + + a = &AdminClient{} + a.handle = &c.handle + a.isDerived = true + a.isClosed = 0 + return a, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go new file mode 100644 index 000000000..b2d1f634a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go @@ -0,0 +1,587 @@ +/** + * Copyright 2018 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "time" + "unsafe" +) + +/* +#include "select_rdkafka.h" +#include +*/ +import "C" + +// AdminOptionOperationTimeout sets the broker's operation timeout, such as the +// timeout for CreateTopics to complete the creation of topics on the controller +// before returning a result to the application. +// +// CreateTopics, DeleteTopics, CreatePartitions: +// a value 0 will return immediately after triggering topic +// creation, while > 0 will wait this long for topic creation to propagate +// in cluster. +// +// Default: 0 (return immediately). +// +// Valid for CreateTopics, DeleteTopics, CreatePartitions. +type AdminOptionOperationTimeout struct { + isSet bool + val time.Duration +} + +func (ao AdminOptionOperationTimeout) supportsCreateTopics() { +} +func (ao AdminOptionOperationTimeout) supportsDeleteTopics() { +} +func (ao AdminOptionOperationTimeout) supportsCreatePartitions() { +} + +func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cErr := C.rd_kafka_AdminOptions_set_operation_timeout( + cOptions, C.int(durationToMilliseconds(ao.val)), + cErrstr, cErrstrSize) + if cErr != 0 { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newCErrorFromString(cErr, + fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr))) + + } + + return nil +} + +// SetAdminOperationTimeout sets the broker's operation timeout, such as the +// timeout for CreateTopics to complete the creation of topics on the controller +// before returning a result to the application. +// +// CreateTopics, DeleteTopics, CreatePartitions: +// a value 0 will return immediately after triggering topic +// creation, while > 0 will wait this long for topic creation to propagate +// in cluster. +// +// Default: 0 (return immediately). +// +// Valid for CreateTopics, DeleteTopics, CreatePartitions. +func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) { + ao.isSet = true + ao.val = t + return ao +} + +// AdminOptionRequestTimeout sets the overall request timeout, including broker +// lookup, request transmission, operation time on broker, and response. +// +// Default: `socket.timeout.ms`. +// +// Valid for all Admin API methods. +type AdminOptionRequestTimeout struct { + isSet bool + val time.Duration +} + +func (ao AdminOptionRequestTimeout) supportsCreateTopics() { +} +func (ao AdminOptionRequestTimeout) supportsDeleteTopics() { +} +func (ao AdminOptionRequestTimeout) supportsCreatePartitions() { +} +func (ao AdminOptionRequestTimeout) supportsAlterConfigs() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() { +} + +func (ao AdminOptionRequestTimeout) supportsCreateACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDescribeACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDeleteACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsListConsumerGroups() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeConsumerGroups() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeTopics() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeCluster() { +} +func (ao AdminOptionRequestTimeout) supportsDeleteConsumerGroups() { +} +func (ao AdminOptionRequestTimeout) supportsListConsumerGroupOffsets() { +} +func (ao AdminOptionRequestTimeout) supportsAlterConsumerGroupOffsets() { +} +func (ao AdminOptionRequestTimeout) supportsListOffsets() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeUserScramCredentials() { +} +func (ao AdminOptionRequestTimeout) supportsAlterUserScramCredentials() { +} +func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cErr := C.rd_kafka_AdminOptions_set_request_timeout( + cOptions, C.int(durationToMilliseconds(ao.val)), + cErrstr, cErrstrSize) + if cErr != 0 { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newCErrorFromString(cErr, + fmt.Sprintf("%s", C.GoString(cErrstr))) + + } + + return nil +} + +// SetAdminRequestTimeout sets the overall request timeout, including broker +// lookup, request transmission, operation time on broker, and response. +// +// Default: `socket.timeout.ms`. +// +// Valid for all Admin API methods. +func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) { + ao.isSet = true + ao.val = t + return ao +} + +// IsolationLevel is a type which is used for AdminOptions to set the IsolationLevel. +type IsolationLevel int + +const ( + // IsolationLevelReadUncommitted - read uncommitted isolation level + IsolationLevelReadUncommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED) + // IsolationLevelReadCommitted - read committed isolation level + IsolationLevelReadCommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED) +) + +// AdminOptionIsolationLevel sets the overall request IsolationLevel. +// +// Default: `ReadUncommitted`. +// +// Valid for ListOffsets. +type AdminOptionIsolationLevel struct { + isSet bool + val IsolationLevel +} + +func (ao AdminOptionIsolationLevel) supportsListOffsets() { +} +func (ao AdminOptionIsolationLevel) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cError := C.rd_kafka_AdminOptions_set_isolation_level( + cOptions, C.rd_kafka_IsolationLevel_t(ao.val)) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + + } + + return nil + +} + +// SetAdminIsolationLevel sets the overall IsolationLevel for a request. +// +// Default: `ReadUncommitted`. +// +// Valid for ListOffsets. +func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel) { + ao.isSet = true + ao.val = isolationLevel + return ao +} + +// AdminOptionValidateOnly tells the broker to only validate the request, +// without performing the requested operation (create topics, etc). +// +// Default: false. +// +// Valid for CreateTopics, CreatePartitions, AlterConfigs +type AdminOptionValidateOnly struct { + isSet bool + val bool +} + +func (ao AdminOptionValidateOnly) supportsCreateTopics() { +} +func (ao AdminOptionValidateOnly) supportsCreatePartitions() { +} +func (ao AdminOptionValidateOnly) supportsAlterConfigs() { +} + +func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cErr := C.rd_kafka_AdminOptions_set_validate_only( + cOptions, bool2cint(ao.val), + cErrstr, cErrstrSize) + if cErr != 0 { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newCErrorFromString(cErr, + fmt.Sprintf("%s", C.GoString(cErrstr))) + + } + + return nil +} + +// SetAdminValidateOnly tells the broker to only validate the request, +// without performing the requested operation (create topics, etc). +// +// Default: false. +// +// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs +func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) { + ao.isSet = true + ao.val = validateOnly + return ao +} + +// AdminOptionRequireStableOffsets decides if the broker should return stable +// offsets (transaction-committed). +// +// Default: false +// +// Valid for ListConsumerGroupOffsets. +type AdminOptionRequireStableOffsets struct { + isSet bool + val bool +} + +func (ao AdminOptionRequireStableOffsets) supportsListConsumerGroupOffsets() { +} + +func (ao AdminOptionRequireStableOffsets) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cError := C.rd_kafka_AdminOptions_set_require_stable_offsets( + cOptions, bool2cint(ao.val)) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SetAdminRequireStableOffsets decides if the broker should return stable +// offsets (transaction-committed). +// +// Default: false +// +// Valid for ListConsumerGroupOffsets. +func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets) { + ao.isSet = true + ao.val = val + return ao +} + +// AdminOptionMatchConsumerGroupStates decides groups in which state(s) should be +// listed. +// +// Default: nil (lists groups in all states). +// +// Valid for ListConsumerGroups. +type AdminOptionMatchConsumerGroupStates struct { + isSet bool + val []ConsumerGroupState +} + +func (ao AdminOptionMatchConsumerGroupStates) supportsListConsumerGroups() { +} + +func (ao AdminOptionMatchConsumerGroupStates) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet || ao.val == nil { + return nil + } + + // Convert states from Go slice to C pointer. + cStates := make([]C.rd_kafka_consumer_group_state_t, len(ao.val)) + cStatesCount := C.size_t(len(ao.val)) + + for idx, state := range ao.val { + cStates[idx] = C.rd_kafka_consumer_group_state_t(state) + } + + cStatesPtr := ((*C.rd_kafka_consumer_group_state_t)(&cStates[0])) + cError := C.rd_kafka_AdminOptions_set_match_consumer_group_states( + cOptions, cStatesPtr, cStatesCount) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// AdminOptionIncludeAuthorizedOperations decides if the broker should return +// authorized operations. +// +// Default: false +// +// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +type AdminOptionIncludeAuthorizedOperations struct { + isSet bool + val bool +} + +func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeConsumerGroups() { +} +func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeTopics() { +} +func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeCluster() { +} + +func (ao AdminOptionIncludeAuthorizedOperations) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cError := C.rd_kafka_AdminOptions_set_include_authorized_operations( + cOptions, bool2cint(ao.val)) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SetAdminOptionIncludeAuthorizedOperations decides if the broker should return +// authorized operations. +// +// Default: false +// +// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations) { + ao.isSet = true + ao.val = val + return ao +} + +// SetAdminMatchConsumerGroupStates decides groups in which state(s) should be +// listed. +// +// Default: nil (lists groups in all states). +// +// Valid for ListConsumerGroups. +func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates) { + ao.isSet = true + ao.val = val + return ao +} + +// CreateTopicsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +type CreateTopicsAdminOption interface { + supportsCreateTopics() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteTopicsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout. +type DeleteTopicsAdminOption interface { + supportsDeleteTopics() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// CreatePartitionsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +type CreatePartitionsAdminOption interface { + supportsCreatePartitions() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AlterConfigsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental. +type AlterConfigsAdminOption interface { + supportsAlterConfigs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeConfigsAdminOption - see setters. +// +// See SetAdminRequestTimeout. +type DescribeConfigsAdminOption interface { + supportsDescribeConfigs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// CreateACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type CreateACLsAdminOption interface { + supportsCreateACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DescribeACLsAdminOption interface { + supportsDescribeACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DeleteACLsAdminOption interface { + supportsDeleteACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// ListConsumerGroupsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminMatchConsumerGroupStates. +type ListConsumerGroupsAdminOption interface { + supportsListConsumerGroups() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeConsumerGroupsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +type DescribeConsumerGroupsAdminOption interface { + supportsDescribeConsumerGroups() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeTopicsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +type DescribeTopicsAdminOption interface { + supportsDescribeTopics() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeClusterAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +type DescribeClusterAdminOption interface { + supportsDescribeCluster() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteConsumerGroupsAdminOption - see setters. +// +// See SetAdminRequestTimeout. +type DeleteConsumerGroupsAdminOption interface { + supportsDeleteConsumerGroups() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// ListConsumerGroupOffsetsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminRequireStableOffsets. +type ListConsumerGroupOffsetsAdminOption interface { + supportsListConsumerGroupOffsets() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AlterConsumerGroupOffsetsAdminOption - see setter. +// +// See SetAdminRequestTimeout. +type AlterConsumerGroupOffsetsAdminOption interface { + supportsAlterConsumerGroupOffsets() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeUserScramCredentialsAdminOption - see setter. +// +// See SetAdminRequestTimeout. +type DescribeUserScramCredentialsAdminOption interface { + supportsDescribeUserScramCredentials() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AlterUserScramCredentialsAdminOption - see setter. +// +// See SetAdminRequestTimeout. +type AlterUserScramCredentialsAdminOption interface { + supportsAlterUserScramCredentials() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// ListOffsetsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminIsolationLevel. +type ListOffsetsAdminOption interface { + supportsListOffsets() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AdminOption is a generic type not to be used directly. +// +// See CreateTopicsAdminOption et.al. +type AdminOption interface { + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) { + + cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType) + for _, opt := range options { + if opt == nil { + continue + } + err := opt.apply(cOptions) + if err != nil { + return nil, err + } + } + + return cOptions, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html new file mode 100644 index 000000000..e83ca809a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html @@ -0,0 +1,7595 @@ + + + + + + + + kafka - Go Documentation Server + + + + + + + + +
+ ... +
+ +
+
+

+ Package kafka + + +

+ + + + +
+
+
+ + import "github.com/confluentinc/confluent-kafka-go/v2/kafka" + +
+
+
+
+ + Overview + +
+
+ + Index + +
+
+
+
+
+ +
+ +
+

+ Overview â–¾ +

+

+ Package kafka provides high-level Apache Kafka producer and consumers +using bindings on-top of the librdkafka C library. +

+

+ # High-level Consumer +

+

+ * Decide if you want to read messages and events by calling `.Poll()` or +the deprecated option of using the `.Events()` channel. (If you want to use +`.Events()` channel then set `"go.events.channel.enable": true`). +

+

+ * Create a Consumer with `kafka.NewConsumer()` providing at +least the `bootstrap.servers` and `group.id` configuration properties. +

+

+ * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics) +to join the group with the specified subscription set. +Subscriptions are atomic, calling `.Subscribe*()` again will leave +the group and rejoin with the new set of topics. +

+

+ * Start reading events and messages from either the `.Events` channel +or by calling `.Poll()`. +

+

+ * When the group has rebalanced each client member is assigned a +(sub-)set of topic+partitions. +By default the consumer will start fetching messages for its assigned +partitions at this point, but your application may enable rebalance +events to get an insight into what the assigned partitions where +as well as set the initial offsets. To do this you need to pass +`"go.application.rebalance.enable": true` to the `NewConsumer()` call +mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event +with the assigned partition set. You can optionally modify the initial +offsets (they'll default to stored offsets and if there are no previously stored +offsets it will fall back to `"auto.offset.reset"` +which defaults to the `latest` message) and then call `.Assign(partitions)` +to start consuming. If you don't need to modify the initial offsets you will +not need to call `.Assign()`, the client will do so automatically for you if +you dont, unless you are using the channel-based consumer in which case +you MUST call `.Assign()` when receiving the `AssignedPartitions` and +`RevokedPartitions` events. +

+

+ * As messages are fetched they will be made available on either the +`.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`. +

+

+ * Handle messages, events and errors to your liking. +

+

+ * When you are done consuming call `.Close()` to commit final offsets +and leave the consumer group. +

+

+ # Producer +

+

+ * Create a Producer with `kafka.NewProducer()` providing at least +the `bootstrap.servers` configuration properties. +

+

+ * Messages may now be produced either by sending a `*kafka.Message` +on the `.ProduceChannel` or by calling `.Produce()`. +

+

+ * Producing is an asynchronous operation so the client notifies the application +of per-message produce success or failure through something called delivery reports. +Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message` +and you should check `msg.TopicPartition.Error` for `nil` to find out if the message +was succesfully delivered or not. +It is also possible to direct delivery reports to alternate channels +by providing a non-nil `chan Event` channel to `.Produce()`. +If no delivery reports are wanted they can be completely disabled by +setting configuration property `"go.delivery.reports": false`. +

+

+ * When you are done producing messages you will need to make sure all messages +are indeed delivered to the broker (or failed), remember that this is +an asynchronous client so some of your messages may be lingering in internal +channels or tranmission queues. +To do this you can either keep track of the messages you've produced +and wait for their corresponding delivery reports, or call the convenience +function `.Flush()` that will block until all message deliveries are done +or the provided timeout elapses. +

+

+ * Finally call `.Close()` to decommission the producer. +

+

+ # Transactional producer API +

+

+ The transactional producer operates on top of the idempotent producer, +and provides full exactly-once semantics (EOS) for Apache Kafka when used +with the transaction aware consumer (`isolation.level=read_committed`). +

+

+ A producer instance is configured for transactions by setting the +`transactional.id` to an identifier unique for the application. This +id will be used to fence stale transactions from previous instances of +the application, typically following an outage or crash. +

+

+ After creating the transactional producer instance using `NewProducer()` +the transactional state must be initialized by calling +`InitTransactions()`. This is a blocking call that will +acquire a runtime producer id from the transaction coordinator broker +as well as abort any stale transactions and fence any still running producer +instances with the same `transactional.id`. +

+

+ Once transactions are initialized the application may begin a new +transaction by calling `BeginTransaction()`. +A producer instance may only have one single on-going transaction. +

+

+ Any messages produced after the transaction has been started will +belong to the ongoing transaction and will be committed or aborted +atomically. +It is not permitted to produce messages outside a transaction +boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`, +`AbortTransaction()` or if the current transaction has failed. +

+

+ If consumed messages are used as input to the transaction, the consumer +instance must be configured with `enable.auto.commit` set to `false`. +To commit the consumed offsets along with the transaction pass the +list of consumed partitions and the last offset processed + 1 to +`SendOffsetsToTransaction()` prior to committing the transaction. +This allows an aborted transaction to be restarted using the previously +committed offsets. +

+

+ To commit the produced messages, and any consumed offsets, to the +current transaction, call `CommitTransaction()`. +This call will block until the transaction has been fully committed or +failed (typically due to fencing by a newer producer instance). +

+

+ Alternatively, if processing fails, or an abortable transaction error is +raised, the transaction needs to be aborted by calling +`AbortTransaction()` which marks any produced messages and +offset commits as aborted. +

+

+ After the current transaction has been committed or aborted a new +transaction may be started by calling `BeginTransaction()` again. +

+

+ Retriable errors: +Some error cases allow the attempted operation to be retried, this is +indicated by the error object having the retriable flag set which can +be detected by calling `err.(kafka.Error).IsRetriable()`. +When this flag is set the application may retry the operation immediately +or preferably after a shorter grace period (to avoid busy-looping). +Retriable errors include timeouts, broker transport failures, etc. +

+

+ Abortable errors: +An ongoing transaction may fail permanently due to various errors, +such as transaction coordinator becoming unavailable, write failures to the +Apache Kafka log, under-replicated partitions, etc. +At this point the producer application must abort the current transaction +using `AbortTransaction()` and optionally start a new transaction +by calling `BeginTransaction()`. +Whether an error is abortable or not is detected by calling +`err.(kafka.Error).TxnRequiresAbort()` on the returned error object. +

+

+ Fatal errors: +While the underlying idempotent producer will typically only raise +fatal errors for unrecoverable cluster errors where the idempotency +guarantees can't be maintained, most of these are treated as abortable by +the transactional producer since transactions may be aborted and retried +in their entirety; +The transactional producer on the other hand introduces a set of additional +fatal errors which the application needs to handle by shutting down the +producer and terminate. There is no way for a producer instance to recover +from fatal errors. +Whether an error is fatal or not is detected by calling +`err.(kafka.Error).IsFatal()` on the returned error object or by checking +the global `GetFatalError()`. +

+

+ Handling of other errors: +For errors that have neither retriable, abortable or the fatal flag set +it is not always obvious how to handle them. While some of these errors +may be indicative of bugs in the application code, such as when +an invalid parameter is passed to a method, other errors might originate +from the broker and be passed thru as-is to the application. +The general recommendation is to treat these errors, that have +neither the retriable or abortable flags set, as fatal. +

+

+ Error handling example: +

+
retry:
+
+   err := producer.CommitTransaction(...)
+   if err == nil {
+       return nil
+   } else if err.(kafka.Error).TxnRequiresAbort() {
+       do_abort_transaction_and_reset_inputs()
+   } else if err.(kafka.Error).IsRetriable() {
+       goto retry
+   } else { // treat all other errors as fatal errors
+       panic(err)
+   }
+
+

+ # Events +

+

+ Apart from emitting messages and delivery reports the client also communicates +with the application through a number of different event types. +An application may choose to handle or ignore these events. +

+

+ # Consumer events +

+

+ * `*kafka.Message` - a fetched message. +

+

+ * `AssignedPartitions` - The assigned partition set for this client following a rebalance. +Requires `go.application.rebalance.enable` +

+

+ * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance. +`AssignedPartitions` and `RevokedPartitions` are symmetrical. +Requires `go.application.rebalance.enable` +

+

+ * `PartitionEOF` - Consumer has reached the end of a partition. +NOTE: The consumer will keep trying to fetch new messages for the partition. +

+

+ * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled). +

+

+ # Producer events +

+

+ * `*kafka.Message` - delivery report for produced message. +Check `.TopicPartition.Error` for delivery result. +

+

+ # Generic events for both Consumer and Producer +

+

+ * `KafkaError` - client (error codes are prefixed with _) or broker error. +These errors are normally just informational since the +client will try its best to automatically recover (eventually). +

+

+ * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required. +This event only occurs with sasl.mechanism=OAUTHBEARER. +Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient +instance when a successful token retrieval is completed, otherwise be sure to +invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or +if setting the token failed, which could happen if an extension doesn't meet +the required regular expression); invoking SetOAuthBearerTokenFailure() will +schedule a new event for 10 seconds later so another retrieval can be attempted. +

+

+ Hint: If your application registers a signal notification +(signal.Notify) makes sure the signals channel is buffered to avoid +possible complications with blocking Poll() calls. +

+

+ Note: The Confluent Kafka Go client is safe for concurrent use. +

+
+
+
+ +
+

+ Index â–¾ +

+ +
+
+
+ + Constants + +
+
+ + func LibraryVersion() (int, string) + +
+
+ + func WriteErrorCodes(f *os.File) + +
+
+ + type ACLBinding + +
+
+ + type ACLBindingFilter + +
+
+ + type ACLBindingFilters + +
+
+ + type ACLBindings + +
+
+ + func (a ACLBindings) Len() int + +
+
+ + func (a ACLBindings) Less(i, j int) bool + +
+
+ + func (a ACLBindings) Swap(i, j int) + +
+
+ + type ACLOperation + +
+
+ + func ACLOperationFromString(aclOperationString string) (ACLOperation, error) + +
+
+ + func (o ACLOperation) String() string + +
+
+ + type ACLPermissionType + +
+
+ + func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) + +
+
+ + func (o ACLPermissionType) String() string + +
+
+ + type AdminClient + +
+
+ + func NewAdminClient(conf *ConfigMap) (*AdminClient, error) + +
+
+ + func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error) + +
+
+ + func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error) + +
+
+ + func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) + +
+
+ + func (a *AdminClient) AlterConsumerGroupOffsets(ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, options ...AlterConsumerGroupOffsetsAdminOption) (acgor AlterConsumerGroupOffsetsResult, err error) + +
+
+ + func (a *AdminClient) AlterUserScramCredentials(ctx context.Context, upsertions []UserScramCredentialUpsertion, deletions []UserScramCredentialDeletion, options ...AlterUserScramCredentialsAdminOption) (result AlterUserScramCredentialsResult, err error) + +
+
+ + func (a *AdminClient) Close() + +
+
+ + func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error) + +
+
+ + func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error) + +
+
+ + func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) + +
+
+ + func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) + +
+
+ + func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error) + +
+
+ + func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) + +
+
+ + func (a *AdminClient) DeleteConsumerGroups(ctx context.Context, groups []string, options ...DeleteConsumerGroupsAdminOption) (result DeleteConsumerGroupsResult, err error) + +
+
+ + func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error) + +
+
+ + func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) + +
+
+ + func (a *AdminClient) DescribeCluster(ctx context.Context, options ...DescribeClusterAdminOption) (result DescribeClusterResult, err error) + +
+
+ + func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) + +
+
+ + func (a *AdminClient) DescribeConsumerGroups(ctx context.Context, groups []string, options ...DescribeConsumerGroupsAdminOption) (result DescribeConsumerGroupsResult, err error) + +
+
+ + func (a *AdminClient) DescribeTopics(ctx context.Context, topics TopicCollection, options ...DescribeTopicsAdminOption) (result DescribeTopicsResult, err error) + +
+
+ + func (a *AdminClient) DescribeUserScramCredentials(ctx context.Context, users []string, options ...DescribeUserScramCredentialsAdminOption) (result DescribeUserScramCredentialsResult, err error) + +
+
+ + func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (a *AdminClient) IncrementalAlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) + +
+
+ + func (a *AdminClient) IsClosed() bool + +
+
+ + func (a *AdminClient) ListConsumerGroupOffsets(ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, options ...ListConsumerGroupOffsetsAdminOption) (lcgor ListConsumerGroupOffsetsResult, err error) + +
+
+ + func (a *AdminClient) ListConsumerGroups(ctx context.Context, options ...ListConsumerGroupsAdminOption) (result ListConsumerGroupsResult, err error) + +
+
+ + func (a *AdminClient) ListOffsets(ctx context.Context, topicPartitionOffsets map[TopicPartition]OffsetSpec, options ...ListOffsetsAdminOption) (result ListOffsetsResult, err error) + +
+
+ + func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error + +
+
+ + func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error + +
+
+ + func (a *AdminClient) SetSaslCredentials(username, password string) error + +
+
+ + func (a *AdminClient) String() string + +
+
+ + type AdminOption + +
+
+ + type AdminOptionIncludeAuthorizedOperations + +
+
+ + func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations) + +
+
+ + type AdminOptionIsolationLevel + +
+
+ + func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel) + +
+
+ + type AdminOptionMatchConsumerGroupStates + +
+
+ + func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates) + +
+
+ + type AdminOptionOperationTimeout + +
+
+ + func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) + +
+
+ + type AdminOptionRequestTimeout + +
+
+ + func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) + +
+
+ + type AdminOptionRequireStableOffsets + +
+
+ + func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets) + +
+
+ + type AdminOptionValidateOnly + +
+
+ + func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) + +
+
+ + type AlterConfigOpType + +
+
+ + func (o AlterConfigOpType) String() string + +
+
+ + type AlterConfigsAdminOption + +
+
+ + type AlterConsumerGroupOffsetsAdminOption + +
+
+ + type AlterConsumerGroupOffsetsResult + +
+
+ + type AlterOperation + +
+
+ + func (o AlterOperation) String() string + +
+
+ + type AlterUserScramCredentialsAdminOption + +
+
+ + type AlterUserScramCredentialsResult + +
+
+ + type AssignedPartitions + +
+
+ + func (e AssignedPartitions) String() string + +
+
+ + type BrokerMetadata + +
+
+ + type ConfigEntry + +
+
+ + func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry + +
+
+ + func StringMapToIncrementalConfigEntries(stringMap map[string]string, operationMap map[string]AlterConfigOpType) []ConfigEntry + +
+
+ + func (c ConfigEntry) String() string + +
+
+ + type ConfigEntryResult + +
+
+ + func (c ConfigEntryResult) String() string + +
+
+ + type ConfigMap + +
+
+ + func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) + +
+
+ + func (m ConfigMap) Set(kv string) error + +
+
+ + func (m ConfigMap) SetKey(key string, value ConfigValue) error + +
+
+ + type ConfigResource + +
+
+ + func (c ConfigResource) String() string + +
+
+ + type ConfigResourceResult + +
+
+ + func (c ConfigResourceResult) String() string + +
+
+ + type ConfigSource + +
+
+ + func (t ConfigSource) String() string + +
+
+ + type ConfigValue + +
+
+ + type Consumer + +
+
+ + func NewConsumer(conf *ConfigMap) (*Consumer, error) + +
+
+ + func (c *Consumer) Assign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) Assignment() (partitions []TopicPartition, err error) + +
+
+ + func (c *Consumer) AssignmentLost() bool + +
+
+ + func (c *Consumer) Close() (err error) + +
+
+ + func (c *Consumer) Commit() ([]TopicPartition, error) + +
+
+ + func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) Events() chan Event + +
+
+ + func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error) + +
+
+ + func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (c *Consumer) GetRebalanceProtocol() string + +
+
+ + func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error) + +
+
+ + func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) IsClosed() bool + +
+
+ + func (c *Consumer) Logs() chan LogEvent + +
+
+ + func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) Pause(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) Poll(timeoutMs int) (event Event) + +
+
+ + func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) + +
+
+ + func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) + +
+
+ + func (c *Consumer) Resume(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) Seek(partition TopicPartition, ignoredTimeoutMs int) error + +
+
+ + func (c *Consumer) SeekPartitions(partitions []TopicPartition) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error + +
+
+ + func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error + +
+
+ + func (c *Consumer) SetSaslCredentials(username, password string) error + +
+
+ + func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) String() string + +
+
+ + func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error + +
+
+ + func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) + +
+
+ + func (c *Consumer) Subscription() (topics []string, err error) + +
+
+ + func (c *Consumer) Unassign() (err error) + +
+
+ + func (c *Consumer) Unsubscribe() (err error) + +
+
+ + type ConsumerGroupDescription + +
+
+ + type ConsumerGroupListing + +
+
+ + type ConsumerGroupMetadata + +
+
+ + func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error) + +
+
+ + type ConsumerGroupResult + +
+
+ + func (g ConsumerGroupResult) String() string + +
+
+ + type ConsumerGroupState + +
+
+ + func ConsumerGroupStateFromString(stateString string) (ConsumerGroupState, error) + +
+
+ + func (t ConsumerGroupState) String() string + +
+
+ + type ConsumerGroupTopicPartitions + +
+
+ + func (gtp ConsumerGroupTopicPartitions) String() string + +
+
+ + type CreateACLResult + +
+
+ + type CreateACLsAdminOption + +
+
+ + type CreatePartitionsAdminOption + +
+
+ + type CreateTopicsAdminOption + +
+
+ + type DeleteACLsAdminOption + +
+
+ + type DeleteACLsResult + +
+
+ + type DeleteConsumerGroupsAdminOption + +
+
+ + type DeleteConsumerGroupsResult + +
+
+ + type DeleteTopicsAdminOption + +
+
+ + type DescribeACLsAdminOption + +
+
+ + type DescribeACLsResult + +
+
+ + type DescribeClusterAdminOption + +
+
+ + type DescribeClusterResult + +
+
+ + type DescribeConfigsAdminOption + +
+
+ + type DescribeConsumerGroupsAdminOption + +
+
+ + type DescribeConsumerGroupsResult + +
+
+ + type DescribeTopicsAdminOption + +
+
+ + type DescribeTopicsResult + +
+
+ + type DescribeUserScramCredentialsAdminOption + +
+
+ + type DescribeUserScramCredentialsResult + +
+
+ + type Error + +
+
+ + func NewError(code ErrorCode, str string, fatal bool) (err Error) + +
+
+ + func (e Error) Code() ErrorCode + +
+
+ + func (e Error) Error() string + +
+
+ + func (e Error) IsFatal() bool + +
+
+ + func (e Error) IsRetriable() bool + +
+
+ + func (e Error) IsTimeout() bool + +
+
+ + func (e Error) String() string + +
+
+ + func (e Error) TxnRequiresAbort() bool + +
+
+ + type ErrorCode + +
+
+ + func (c ErrorCode) String() string + +
+
+ + type Event + +
+
+ + type Handle + +
+
+ + type Header + +
+
+ + func (h Header) String() string + +
+
+ + type IsolationLevel + +
+
+ + type ListConsumerGroupOffsetsAdminOption + +
+
+ + type ListConsumerGroupOffsetsResult + +
+
+ + type ListConsumerGroupsAdminOption + +
+
+ + type ListConsumerGroupsResult + +
+
+ + type ListOffsetsAdminOption + +
+
+ + type ListOffsetsResult + +
+
+ + type ListOffsetsResultInfo + +
+
+ + type LogEvent + +
+
+ + func (logEvent LogEvent) String() string + +
+
+ + type MemberAssignment + +
+
+ + type MemberDescription + +
+
+ + type Message + +
+
+ + func (m *Message) String() string + +
+
+ + type Metadata + +
+
+ + type MockCluster + +
+
+ + func NewMockCluster(brokerCount int) (*MockCluster, error) + +
+
+ + func (mc *MockCluster) BootstrapServers() string + +
+
+ + func (mc *MockCluster) Close() + +
+
+ + func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error + +
+
+ + func (mc *MockCluster) SetBrokerDown(brokerID int) error + +
+
+ + func (mc *MockCluster) SetBrokerUp(brokerID int) error + +
+
+ + func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error + +
+
+ + type Node + +
+
+ + func (n Node) String() string + +
+
+ + type OAuthBearerToken + +
+
+ + type OAuthBearerTokenRefresh + +
+
+ + func (o OAuthBearerTokenRefresh) String() string + +
+
+ + type Offset + +
+
+ + func NewOffset(offset interface{}) (Offset, error) + +
+
+ + func OffsetTail(relativeOffset Offset) Offset + +
+
+ + func (o *Offset) Set(offset interface{}) error + +
+
+ + func (o Offset) String() string + +
+
+ + type OffsetSpec + +
+
+ + func NewOffsetSpecForTimestamp(timestamp int64) OffsetSpec + +
+
+ + type OffsetsCommitted + +
+
+ + func (o OffsetsCommitted) String() string + +
+
+ + type PartitionEOF + +
+
+ + func (p PartitionEOF) String() string + +
+
+ + type PartitionMetadata + +
+
+ + type PartitionsSpecification + +
+
+ + type Producer + +
+
+ + func NewProducer(conf *ConfigMap) (*Producer, error) + +
+
+ + func (p *Producer) AbortTransaction(ctx context.Context) error + +
+
+ + func (p *Producer) BeginTransaction() error + +
+
+ + func (p *Producer) Close() + +
+
+ + func (p *Producer) CommitTransaction(ctx context.Context) error + +
+
+ + func (p *Producer) Events() chan Event + +
+
+ + func (p *Producer) Flush(timeoutMs int) int + +
+
+ + func (p *Producer) GetFatalError() error + +
+
+ + func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (p *Producer) InitTransactions(ctx context.Context) error + +
+
+ + func (p *Producer) IsClosed() bool + +
+
+ + func (p *Producer) Len() int + +
+
+ + func (p *Producer) Logs() chan LogEvent + +
+
+ + func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) + +
+
+ + func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error + +
+
+ + func (p *Producer) ProduceChannel() chan *Message + +
+
+ + func (p *Producer) Purge(flags int) error + +
+
+ + func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) + +
+
+ + func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error + +
+
+ + func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error + +
+
+ + func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error + +
+
+ + func (p *Producer) SetSaslCredentials(username, password string) error + +
+
+ + func (p *Producer) String() string + +
+
+ + func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode + +
+
+ + type RebalanceCb + +
+
+ + type ResourcePatternType + +
+
+ + func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) + +
+
+ + func (t ResourcePatternType) String() string + +
+
+ + type ResourceType + +
+
+ + func ResourceTypeFromString(typeString string) (ResourceType, error) + +
+
+ + func (t ResourceType) String() string + +
+
+ + type RevokedPartitions + +
+
+ + func (e RevokedPartitions) String() string + +
+
+ + type ScramCredentialInfo + +
+
+ + type ScramMechanism + +
+
+ + func ScramMechanismFromString(mechanism string) (ScramMechanism, error) + +
+
+ + func (o ScramMechanism) String() string + +
+
+ + type Stats + +
+
+ + func (e Stats) String() string + +
+
+ + type TimestampType + +
+
+ + func (t TimestampType) String() string + +
+
+ + type TopicCollection + +
+
+ + func NewTopicCollectionOfTopicNames(names []string) TopicCollection + +
+
+ + type TopicDescription + +
+
+ + type TopicMetadata + +
+
+ + type TopicPartition + +
+
+ + func (p TopicPartition) String() string + +
+
+ + type TopicPartitionInfo + +
+
+ + type TopicPartitions + +
+
+ + func (tps TopicPartitions) Len() int + +
+
+ + func (tps TopicPartitions) Less(i, j int) bool + +
+
+ + func (tps TopicPartitions) Swap(i, j int) + +
+
+ + type TopicResult + +
+
+ + func (t TopicResult) String() string + +
+
+ + type TopicSpecification + +
+
+ + type UUID + +
+
+ + func (uuid UUID) GetLeastSignificantBits() int64 + +
+
+ + func (uuid UUID) GetMostSignificantBits() int64 + +
+
+ + func (uuid UUID) String() string + +
+
+ + type UserScramCredentialDeletion + +
+
+ + type UserScramCredentialUpsertion + +
+
+ + type UserScramCredentialsDescription + +
+
+
+ +

+ Package files +

+

+ + + 00version.go + + + adminapi.go + + + adminoptions.go + + + build_glibc_linux_amd64.go + + + config.go + + + consumer.go + + + context.go + + + error.go + + + error_gen.go + + + event.go + + + generated_errors.go + + + handle.go + + + header.go + + + kafka.go + + + log.go + + + message.go + + + metadata.go + + + misc.go + + + mockcluster.go + + + offset.go + + + producer.go + + + time.go + + +

+
+ +
+ +

+ Constants +

+
const (
+    // ConsumerGroupStateUnknown - Unknown ConsumerGroupState
+    ConsumerGroupStateUnknown = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN)
+    // ConsumerGroupStatePreparingRebalance - preparing rebalance
+    ConsumerGroupStatePreparingRebalance = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE)
+    // ConsumerGroupStateCompletingRebalance - completing rebalance
+    ConsumerGroupStateCompletingRebalance = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE)
+    // ConsumerGroupStateStable - stable
+    ConsumerGroupStateStable = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_STABLE)
+    // ConsumerGroupStateDead - dead group
+    ConsumerGroupStateDead = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_DEAD)
+    // ConsumerGroupStateEmpty - empty group
+    ConsumerGroupStateEmpty = ConsumerGroupState(C.RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY)
+)
+
const (
+    // ResourceUnknown - Unknown
+    ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN)
+    // ResourceAny - match any resource type (DescribeConfigs)
+    ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY)
+    // ResourceTopic - Topic
+    ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC)
+    // ResourceGroup - Group
+    ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP)
+    // ResourceBroker - Broker
+    ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER)
+)
+
const (
+    // ConfigSourceUnknown is the default value
+    ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG)
+    // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic
+    ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG)
+    // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker
+    ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG)
+    // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster
+    ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG)
+    // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file)
+    ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG)
+    // ConfigSourceDefault is built-in default configuration for configs that have a default value
+    ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG)
+)
+
const (
+    // AlterConfigOpTypeSet sets/overwrites the configuration
+    // setting.
+    AlterConfigOpTypeSet = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET)
+    // AlterConfigOpTypeDelete sets the configuration setting
+    // to default or NULL.
+    AlterConfigOpTypeDelete = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE)
+    // AlterConfigOpTypeAppend appends the value to existing
+    // configuration settings.
+    AlterConfigOpTypeAppend = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND)
+    // AlterConfigOpTypeSubtract subtracts the value from
+    // existing configuration settings.
+    AlterConfigOpTypeSubtract = AlterConfigOpType(C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT)
+)
+
const (
+    // ResourcePatternTypeUnknown is a resource pattern type not known or not set.
+    ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN)
+    // ResourcePatternTypeAny matches any resource, used for lookups.
+    ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY)
+    // ResourcePatternTypeMatch will perform pattern matching
+    ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH)
+    // ResourcePatternTypeLiteral matches a literal resource name
+    ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL)
+    // ResourcePatternTypePrefixed matches a prefixed resource name
+    ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED)
+)
+
const (
+    // ACLOperationUnknown represents an unknown or unset operation
+    ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN)
+    // ACLOperationAny in a filter, matches any ACLOperation
+    ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY)
+    // ACLOperationAll represents all the operations
+    ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL)
+    // ACLOperationRead a read operation
+    ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ)
+    // ACLOperationWrite represents a write operation
+    ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE)
+    // ACLOperationCreate represents a create operation
+    ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE)
+    // ACLOperationDelete represents a delete operation
+    ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE)
+    // ACLOperationAlter represents an alter operation
+    ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER)
+    // ACLOperationDescribe represents a describe operation
+    ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE)
+    // ACLOperationClusterAction represents a cluster action operation
+    ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION)
+    // ACLOperationDescribeConfigs represents a describe configs operation
+    ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS)
+    // ACLOperationAlterConfigs represents an alter configs operation
+    ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS)
+    // ACLOperationIdempotentWrite represents an idempotent write operation
+    ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE)
+)
+
const (
+    // ACLPermissionTypeUnknown represents an unknown ACLPermissionType
+    ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN)
+    // ACLPermissionTypeAny in a filter, matches any ACLPermissionType
+    ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY)
+    // ACLPermissionTypeDeny disallows access
+    ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY)
+    // ACLPermissionTypeAllow grants access
+    ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW)
+)
+
const (
+    // ScramMechanismUnknown - Unknown SASL/SCRAM mechanism
+    ScramMechanismUnknown = ScramMechanism(C.RD_KAFKA_SCRAM_MECHANISM_UNKNOWN)
+    // ScramMechanismSHA256 - SCRAM-SHA-256 mechanism
+    ScramMechanismSHA256 = ScramMechanism(C.RD_KAFKA_SCRAM_MECHANISM_SHA_256)
+    // ScramMechanismSHA512 - SCRAM-SHA-512 mechanism
+    ScramMechanismSHA512 = ScramMechanism(C.RD_KAFKA_SCRAM_MECHANISM_SHA_512)
+)
+
const (
+    // MaxTimestampOffsetSpec is used to describe the offset with the Max Timestamp which may be different then LatestOffsetSpec as Timestamp can be set client side.
+    MaxTimestampOffsetSpec = OffsetSpec(C.RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP)
+    // EarliestOffsetSpec is used to describe the earliest offset for the TopicPartition.
+    EarliestOffsetSpec = OffsetSpec(C.RD_KAFKA_OFFSET_SPEC_EARLIEST)
+    // LatestOffsetSpec is used to describe the latest offset for the TopicPartition.
+    LatestOffsetSpec = OffsetSpec(C.RD_KAFKA_OFFSET_SPEC_LATEST)
+)
+
const (
+    // IsolationLevelReadUncommitted - read uncommitted isolation level
+    IsolationLevelReadUncommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED)
+    // IsolationLevelReadCommitted - read committed isolation level
+    IsolationLevelReadCommitted = IsolationLevel(C.RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED)
+)
+
const (
+    // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
+    TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
+    // TimestampCreateTime indicates timestamp set by producer (source time)
+    TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
+    // TimestampLogAppendTime indicates timestamp set set by broker (store time)
+    TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
+)
+
const (
+    // PurgeInFlight purges messages in-flight to or from the broker.
+    // Purging these messages will void any future acknowledgements from the
+    // broker, making it impossible for the application to know if these
+    // messages were successfully delivered or not.
+    // Retrying these messages may lead to duplicates.
+    PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT)
+
+    // PurgeQueue Purge messages in internal queues.
+    PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE)
+
+    // PurgeNonBlocking Don't wait for background thread queue purging to finish.
+    PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING)
+)
+
const (
+    // AlterOperationSet sets/overwrites the configuration setting.
+    AlterOperationSet = iota
+)
+

+ LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +

+
const LibrdkafkaLinkInfo = "static glibc_linux_amd64 from librdkafka-static-bundle-v2.3.0.tgz"
+

+ OffsetBeginning represents the earliest offset (logical) +

+
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
+

+ OffsetEnd represents the latest offset (logical) +

+
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
+

+ OffsetInvalid represents an invalid/unspecified offset +

+
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
+

+ OffsetStored represents a stored offset +

+
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
+

+ PartitionAny represents any partition (for partitioning), +or unspecified value (for all other cases) +

+
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
+

+ func + + LibraryVersion + + +

+
func LibraryVersion() (int, string)
+

+ LibraryVersion returns the underlying librdkafka library version as a +(version_int, version_str) tuple. +

+

+ func + + WriteErrorCodes + + +

+
func WriteErrorCodes(f *os.File)
+

+ WriteErrorCodes writes Go error code constants to file from the +librdkafka error codes. +This function is not intended for public use. +

+

+ type + + ACLBinding + + +

+

+ ACLBinding specifies the operation and permission type for a specific principal +over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +

+
type ACLBinding struct {
+    Type ResourceType // The resource type.
+    // The resource name, which depends on the resource type.
+    // For ResourceBroker the resource name is the broker id.
+    Name                string
+    ResourcePatternType ResourcePatternType // The resource pattern, relative to the name.
+    Principal           string              // The principal this ACLBinding refers to.
+    Host                string              // The host that the call is allowed to come from.
+    Operation           ACLOperation        // The operation/s specified by this binding.
+    PermissionType      ACLPermissionType   // The permission type for the specified operation.
+}
+
+

+ type + + ACLBindingFilter + + +

+

+ ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +

+
type ACLBindingFilter = ACLBinding
+

+ type + + ACLBindingFilters + + +

+

+ ACLBindingFilters is a slice of ACLBindingFilter that also implements +the sort interface +

+
type ACLBindingFilters []ACLBindingFilter
+

+ type + + ACLBindings + + +

+

+ ACLBindings is a slice of ACLBinding that also implements +the sort interface +

+
type ACLBindings []ACLBinding
+

+ func (ACLBindings) + + Len + + +

+
func (a ACLBindings) Len() int
+

+ func (ACLBindings) + + Less + + +

+
func (a ACLBindings) Less(i, j int) bool
+

+ func (ACLBindings) + + Swap + + +

+
func (a ACLBindings) Swap(i, j int)
+

+ type + + ACLOperation + + +

+

+ ACLOperation enumerates the different types of ACL operation. +

+
type ACLOperation int
+

+ func + + ACLOperationFromString + + +

+
func ACLOperationFromString(aclOperationString string) (ACLOperation, error)
+

+ ACLOperationFromString translates a ACL operation name to +a ACLOperation value. +

+

+ func (ACLOperation) + + String + + +

+
func (o ACLOperation) String() string
+

+ String returns the human-readable representation of an ACLOperation +

+

+ type + + ACLPermissionType + + +

+

+ ACLPermissionType enumerates the different types of ACL permission types. +

+
type ACLPermissionType int
+

+ func + + ACLPermissionTypeFromString + + +

+
func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error)
+

+ ACLPermissionTypeFromString translates a ACL permission type name to +a ACLPermissionType value. +

+

+ func (ACLPermissionType) + + String + + +

+
func (o ACLPermissionType) String() string
+

+ String returns the human-readable representation of an ACLPermissionType +

+

+ type + + AdminClient + + +

+

+ AdminClient is derived from an existing Producer or Consumer +

+
type AdminClient struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewAdminClient + + +

+
func NewAdminClient(conf *ConfigMap) (*AdminClient, error)
+

+ NewAdminClient creats a new AdminClient instance with a new underlying client instance +

+

+ func + + NewAdminClientFromConsumer + + +

+
func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error)
+

+ NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. +The AdminClient will use the same configuration and connections as the parent instance. +

+

+ func + + NewAdminClientFromProducer + + +

+
func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error)
+

+ NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. +The AdminClient will use the same configuration and connections as the parent instance. +

+

+ func (*AdminClient) + + AlterConfigs + + +

+
func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)
+

+ AlterConfigs alters/updates cluster resource configuration. +

+

+ Updates are not transactional so they may succeed for a subset +of the provided resources while others fail. +The configuration for a particular resource is updated atomically, +replacing values using the provided ConfigEntrys and reverting +unspecified ConfigEntrys to their default values. +

+

+ Requires broker version >=0.11.0.0 +

+

+ AlterConfigs will replace all existing configuration for +the provided resources with the new configuration given, +reverting all other configuration to their default values. +

+

+ Multiple resources and resource types may be set, but at most one +resource of type ResourceBroker is allowed per call since these +resource requests must be sent to the broker specified in the resource. +Deprecated: AlterConfigs is deprecated in favour of IncrementalAlterConfigs +

+

+ func (*AdminClient) + + AlterConsumerGroupOffsets + + +

+
func (a *AdminClient) AlterConsumerGroupOffsets(
+    ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions,
+    options ...AlterConsumerGroupOffsetsAdminOption) (acgor AlterConsumerGroupOffsetsResult, err error)
+

+ AlterConsumerGroupOffsets alters the offsets for topic partition(s) for +consumer group(s). +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of
+  which has the id of a consumer group, and a slice of the TopicPartitions
+  we need to alter the offsets for. Currently, the size of
+  `groupsPartitions` has to be exactly one.
+- `options` - AlterConsumerGroupOffsetsAdminOption options.
+
+

+ Returns a AlterConsumerGroupOffsetsResult, containing a slice of +ConsumerGroupTopicPartitions corresponding to the input slice, plus an error +that is not `nil` for client level errors. Individual TopicPartitions inside +each of the ConsumerGroupTopicPartitions should also be checked for errors. +This will succeed at the partition level only if the group is not actively +subscribed to the corresponding topic(s). +

+

+ func (*AdminClient) + + AlterUserScramCredentials + + +

+
func (a *AdminClient) AlterUserScramCredentials(
+    ctx context.Context, upsertions []UserScramCredentialUpsertion, deletions []UserScramCredentialDeletion,
+    options ...AlterUserScramCredentialsAdminOption) (result AlterUserScramCredentialsResult, err error)
+

+ AlterUserScramCredentials alters SASL/SCRAM credentials. +The pair (user, mechanism) must be unique among upsertions and deletions. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `upsertions` - a slice of user credential upsertions
+- `deletions` - a slice of user credential deletions
+- `options` - AlterUserScramCredentialsAdminOption options.
+
+

+ Returns a map from user name to the corresponding Error, with error code +ErrNoError when the request succeeded. +

+

+ func (*AdminClient) + + Close + + +

+
func (a *AdminClient) Close()
+

+ Close an AdminClient instance. +

+

+ func (*AdminClient) + + ClusterID + + +

+
func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error)
+

+ ClusterID returns the cluster ID as reported in broker metadata. +

+

+ Note on cancellation: Although the underlying C function respects the +timeout, it currently cannot be manually cancelled. That means manually +cancelling the context will block until the C function call returns. +

+

+ Requires broker version >= 0.10.0. +

+

+ func (*AdminClient) + + ControllerID + + +

+
func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)
+

+ ControllerID returns the broker ID of the current controller as reported in +broker metadata. +

+

+ Note on cancellation: Although the underlying C function respects the +timeout, it currently cannot be manually cancelled. That means manually +cancelling the context will block until the C function call returns. +

+

+ Requires broker version >= 0.10.0. +

+

+ func (*AdminClient) + + CreateACLs + + +

+
func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error)
+

+ CreateACLs creates one or more ACL bindings. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for indefinite.
+- `aclBindings` - A slice of ACL binding specifications to create.
+- `options` - Create ACLs options
+
+

+ Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +plus an error that is not nil for client level errors +

+

+ func (*AdminClient) + + CreatePartitions + + +

+
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error)
+

+ CreatePartitions creates additional partitions for topics. +

+

+ func (*AdminClient) + + CreateTopics + + +

+
func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)
+

+ CreateTopics creates topics in cluster. +

+

+ The list of TopicSpecification objects define the per-topic partition count, replicas, etc. +

+

+ Topic creation is non-atomic and may succeed for some topics but fail for others, +make sure to check the result for topic-specific errors. +

+

+ Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +

+

+ func (*AdminClient) + + DeleteACLs + + +

+
func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error)
+

+ DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for indefinite.
+- `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete.
+  string attributes match exact values or any string if set to empty string.
+  Enum attributes match exact values or any value if ending with `Any`.
+  If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with:
+- `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name
+- `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name
+- `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name
+- `options` - Delete ACLs options
+
+

+ Returns a slice of ACLBinding for each filter when the operation was successful +plus an error that is not `nil` for client level errors +

+

+ func (*AdminClient) + + DeleteConsumerGroups + + +

+
func (a *AdminClient) DeleteConsumerGroups(
+    ctx context.Context,
+    groups []string, options ...DeleteConsumerGroupsAdminOption) (result DeleteConsumerGroupsResult, err error)
+

+ DeleteConsumerGroups deletes a batch of consumer groups. +Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `groups` - A slice of groupIDs to delete.
+- `options` - DeleteConsumerGroupsAdminOption options.
+
+

+ Returns a DeleteConsumerGroupsResult containing a slice of ConsumerGroupResult, with +group-level errors, (if any) contained inside; and an error that is not nil +for client level errors. +

+

+ func (*AdminClient) + + DeleteTopics + + +

+
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)
+

+ DeleteTopics deletes a batch of topics. +

+

+ This operation is not transactional and may succeed for a subset of topics while +failing others. +It may take several seconds after the DeleteTopics result returns success for +all the brokers to become aware that the topics are gone. During this time, +topic metadata and configuration may continue to return information about deleted topics. +

+

+ Requires broker version >= 0.10.1.0 +

+

+ func (*AdminClient) + + DescribeACLs + + +

+
func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error)
+

+ DescribeACLs matches ACL bindings by filter. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for indefinite.
+- `aclBindingFilter` - A filter with attributes that must match.
+  string attributes match exact values or any string if set to empty string.
+  Enum attributes match exact values or any value if ending with `Any`.
+  If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with:
+- `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name
+- `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name
+- `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name
+- `options` - Describe ACLs options
+
+

+ Returns a slice of ACLBindings when the operation was successful +plus an error that is not `nil` for client level errors +

+

+ func (*AdminClient) + + DescribeCluster + + +

+
func (a *AdminClient) DescribeCluster(
+    ctx context.Context,
+    options ...DescribeClusterAdminOption) (result DescribeClusterResult, err error)
+

+ DescribeCluster describes the cluster +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `options` - DescribeClusterAdminOption options.
+
+

+ Returns ClusterDescription, which contains current cluster ID and controller +along with a slice of Nodes. It also has a slice of allowed ACLOperations. +

+

+ func (*AdminClient) + + DescribeConfigs + + +

+
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error)
+

+ DescribeConfigs retrieves configuration for cluster resources. +

+

+ The returned configuration includes default values, use +ConfigEntryResult.IsDefault or ConfigEntryResult.Source to distinguish +default values from manually configured settings. +

+

+ The value of config entries where .IsSensitive is true +will always be nil to avoid disclosing sensitive +information, such as security settings. +

+

+ Configuration entries where .IsReadOnly is true can't be modified +(with AlterConfigs). +

+

+ Synonym configuration entries are returned if the broker supports +it (broker version >= 1.1.0). See .Synonyms. +

+

+ Requires broker version >=0.11.0.0 +

+

+ Multiple resources and resource types may be requested, but at most +one resource of type ResourceBroker is allowed per call +since these resource requests must be sent to the broker specified +in the resource. +

+

+ func (*AdminClient) + + DescribeConsumerGroups + + +

+
func (a *AdminClient) DescribeConsumerGroups(
+    ctx context.Context, groups []string,
+    options ...DescribeConsumerGroupsAdminOption) (result DescribeConsumerGroupsResult, err error)
+

+ DescribeConsumerGroups describes groups from cluster as specified by the +groups list. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `groups` - Slice of groups to describe. This should not be nil/empty.
+- `options` - DescribeConsumerGroupsAdminOption options.
+
+

+ Returns DescribeConsumerGroupsResult, which contains a slice of +ConsumerGroupDescriptions corresponding to the input groups, plus an error +that is not `nil` for client level errors. Individual +ConsumerGroupDescriptions inside the slice should also be checked for +errors. +

+

+ func (*AdminClient) + + DescribeTopics + + +

+
func (a *AdminClient) DescribeTopics(
+    ctx context.Context, topics TopicCollection,
+    options ...DescribeTopicsAdminOption) (result DescribeTopicsResult, err error)
+

+ DescribeTopics describes topics from cluster as specified by the +topics list. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `topics` - Collection of topics to describe. This should not have nil
+  topic names.
+- `options` - DescribeTopicsAdminOption options.
+
+

+ Returns DescribeTopicsResult, which contains a slice of +TopicDescriptions corresponding to the input topics, plus an error +that is not `nil` for client level errors. Individual +TopicDescriptions inside the slice should also be checked for +errors. Individual TopicDescriptions also have a +slice of allowed ACLOperations. +

+

+ func (*AdminClient) + + DescribeUserScramCredentials + + +

+
func (a *AdminClient) DescribeUserScramCredentials(
+    ctx context.Context, users []string,
+    options ...DescribeUserScramCredentialsAdminOption) (result DescribeUserScramCredentialsResult, err error)
+

+ DescribeUserScramCredentials describe SASL/SCRAM credentials for the +specified user names. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `users` - a slice of string, each one correspond to a user name, no
+  duplicates are allowed
+- `options` - DescribeUserScramCredentialsAdminOption options.
+
+

+ Returns a map from user name to user SCRAM credentials description. +Each description can have an individual error. +

+

+ func (*AdminClient) + + GetMetadata + + +

+
func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +

+

+ func (*AdminClient) + + IncrementalAlterConfigs + + +

+
func (a *AdminClient) IncrementalAlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)
+

+ IncrementalAlterConfigs alters/updates cluster resource configuration. +

+

+ Updates are not transactional so they may succeed for some resources +while fail for others. The configs for a particular resource are +updated atomically, executing the corresponding incremental +operations on the provided configurations. +

+

+ Requires broker version >=2.3.0 +

+

+ IncrementalAlterConfigs will only change configurations for provided +resources with the new configuration given. +

+

+ Multiple resources and resource types may be set, but at most one +resource of type ResourceBroker is allowed per call since these +resource requests must be sent to the broker specified in the resource. +

+

+ func (*AdminClient) + + IsClosed + + +

+
func (a *AdminClient) IsClosed() bool
+

+ IsClosed returns boolean representing if client is closed or not +

+

+ func (*AdminClient) + + ListConsumerGroupOffsets + + +

+
func (a *AdminClient) ListConsumerGroupOffsets(
+    ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions,
+    options ...ListConsumerGroupOffsetsAdminOption) (lcgor ListConsumerGroupOffsetsResult, err error)
+

+ ListConsumerGroupOffsets fetches the offsets for topic partition(s) for +consumer group(s). +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for indefinite.
+- `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of which
+  has the id of a consumer group, and a slice of the TopicPartitions we
+  need to fetch the offsets for. The slice of TopicPartitions can be nil, to fetch
+  all topic partitions for that group.
+  Currently, the size of `groupsPartitions` has to be exactly one.
+- `options` - ListConsumerGroupOffsetsAdminOption options.
+
+

+ Returns a ListConsumerGroupOffsetsResult, containing a slice of +ConsumerGroupTopicPartitions corresponding to the input slice, plus an error that is +not `nil` for client level errors. Individual TopicPartitions inside each of +the ConsumerGroupTopicPartitions should also be checked for errors. +

+

+ func (*AdminClient) + + ListConsumerGroups + + +

+
func (a *AdminClient) ListConsumerGroups(
+    ctx context.Context,
+    options ...ListConsumerGroupsAdminOption) (result ListConsumerGroupsResult, err error)
+

+ ListConsumerGroups lists the consumer groups available in the cluster. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `options` - ListConsumerGroupsAdminOption options.
+
+

+ Returns a ListConsumerGroupsResult, which contains a slice corresponding to +each group in the cluster and a slice of errors encountered while listing. +Additionally, an error that is not nil for client-level errors is returned. +Both the returned error, and the errors slice should be checked. +

+

+ func (*AdminClient) + + ListOffsets + + +

+
func (a *AdminClient) ListOffsets(
+    ctx context.Context, topicPartitionOffsets map[TopicPartition]OffsetSpec,
+    options ...ListOffsetsAdminOption) (result ListOffsetsResult, err error)
+

+ ListOffsets describe offsets for the +specified TopicPartiton based on an OffsetSpec. +

+

+ Parameters: +

+
- `ctx` - context with the maximum amount of time to block, or nil for
+  indefinite.
+- `topicPartitionOffsets` - a map from TopicPartition to OffsetSpec, it
+  holds either the OffsetSpec enum value or timestamp. Must not be nil.
+- `options` - ListOffsetsAdminOption options.
+
+

+ Returns a ListOffsetsResult. +Each TopicPartition's ListOffset can have an individual error. +

+

+ func (*AdminClient) + + SetOAuthBearerToken + + +

+
func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+

+ SetOAuthBearerToken sets the the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. It will return nil +on success, otherwise an error if: +1) the token data is invalid (meaning an expiration time in the past +or either a token value or an extension key or value that does not meet +the regular expression requirements as per + + https://tools.ietf.org/html/rfc7628#section-3.1 + + ); +2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +3) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+

+ func (*AdminClient) + + SetOAuthBearerTokenFailure + + +

+
func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error
+

+ SetOAuthBearerTokenFailure sets the error message describing why token +retrieval/setting failed; it also schedules a new token refresh event for 10 +seconds later so the attempt may be retried. It will return nil on +success, otherwise an error if: +1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +2) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+

+ func (*AdminClient) + + SetSaslCredentials + + +

+
func (a *AdminClient) SetSaslCredentials(username, password string) error
+

+ SetSaslCredentials sets the SASL credentials used for this admin client. +The new credentials will overwrite the old ones (which were set when creating +the admin client or by a previous call to SetSaslCredentials). The new +credentials will be used the next time the admin client needs to authenticate +to a broker. This method will not disconnect existing broker connections that +were established with the old credentials. +This method applies only to the SASL PLAIN and SCRAM mechanisms. +

+

+ func (*AdminClient) + + String + + +

+
func (a *AdminClient) String() string
+

+ String returns a human readable name for an AdminClient instance +

+

+ type + + AdminOption + + +

+

+ AdminOption is a generic type not to be used directly. +

+

+ See CreateTopicsAdminOption et.al. +

+
type AdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AdminOptionIncludeAuthorizedOperations + + +

+

+ AdminOptionIncludeAuthorizedOperations decides if the broker should return +authorized operations. +

+

+ Default: false +

+

+ Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +

+
type AdminOptionIncludeAuthorizedOperations struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminOptionIncludeAuthorizedOperations + + +

+
func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations)
+

+ SetAdminOptionIncludeAuthorizedOperations decides if the broker should return +authorized operations. +

+

+ Default: false +

+

+ Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +

+

+ type + + AdminOptionIsolationLevel + + +

+

+ AdminOptionIsolationLevel sets the overall request IsolationLevel. +

+

+ Default: `ReadUncommitted`. +

+

+ Valid for ListOffsets. +

+
type AdminOptionIsolationLevel struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminIsolationLevel + + +

+
func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel)
+

+ SetAdminIsolationLevel sets the overall IsolationLevel for a request. +

+

+ Default: `ReadUncommitted`. +

+

+ Valid for ListOffsets. +

+

+ type + + AdminOptionMatchConsumerGroupStates + + +

+

+ AdminOptionMatchConsumerGroupStates decides groups in which state(s) should be +listed. +

+

+ Default: nil (lists groups in all states). +

+

+ Valid for ListConsumerGroups. +

+
type AdminOptionMatchConsumerGroupStates struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminMatchConsumerGroupStates + + +

+
func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates)
+

+ SetAdminMatchConsumerGroupStates decides groups in which state(s) should be +listed. +

+

+ Default: nil (lists groups in all states). +

+

+ Valid for ListConsumerGroups. +

+

+ type + + AdminOptionOperationTimeout + + +

+

+ AdminOptionOperationTimeout sets the broker's operation timeout, such as the +timeout for CreateTopics to complete the creation of topics on the controller +before returning a result to the application. +

+

+ CreateTopics, DeleteTopics, CreatePartitions: +a value 0 will return immediately after triggering topic +creation, while > 0 will wait this long for topic creation to propagate +in cluster. +

+

+ Default: 0 (return immediately). +

+

+ Valid for CreateTopics, DeleteTopics, CreatePartitions. +

+
type AdminOptionOperationTimeout struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminOperationTimeout + + +

+
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout)
+

+ SetAdminOperationTimeout sets the broker's operation timeout, such as the +timeout for CreateTopics to complete the creation of topics on the controller +before returning a result to the application. +

+

+ CreateTopics, DeleteTopics, CreatePartitions: +a value 0 will return immediately after triggering topic +creation, while > 0 will wait this long for topic creation to propagate +in cluster. +

+

+ Default: 0 (return immediately). +

+

+ Valid for CreateTopics, DeleteTopics, CreatePartitions. +

+

+ type + + AdminOptionRequestTimeout + + +

+

+ AdminOptionRequestTimeout sets the overall request timeout, including broker +lookup, request transmission, operation time on broker, and response. +

+

+ Default: `socket.timeout.ms`. +

+

+ Valid for all Admin API methods. +

+
type AdminOptionRequestTimeout struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminRequestTimeout + + +

+
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout)
+

+ SetAdminRequestTimeout sets the overall request timeout, including broker +lookup, request transmission, operation time on broker, and response. +

+

+ Default: `socket.timeout.ms`. +

+

+ Valid for all Admin API methods. +

+

+ type + + AdminOptionRequireStableOffsets + + +

+

+ AdminOptionRequireStableOffsets decides if the broker should return stable +offsets (transaction-committed). +

+

+ Default: false +

+

+ Valid for ListConsumerGroupOffsets. +

+
type AdminOptionRequireStableOffsets struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminRequireStableOffsets + + +

+
func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets)
+

+ SetAdminRequireStableOffsets decides if the broker should return stable +offsets (transaction-committed). +

+

+ Default: false +

+

+ Valid for ListConsumerGroupOffsets. +

+

+ type + + AdminOptionValidateOnly + + +

+

+ AdminOptionValidateOnly tells the broker to only validate the request, +without performing the requested operation (create topics, etc). +

+

+ Default: false. +

+

+ Valid for CreateTopics, CreatePartitions, AlterConfigs +

+
type AdminOptionValidateOnly struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminValidateOnly + + +

+
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly)
+

+ SetAdminValidateOnly tells the broker to only validate the request, +without performing the requested operation (create topics, etc). +

+

+ Default: false. +

+

+ Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs +

+

+ type + + AlterConfigOpType + + +

+

+ AlterConfigOpType specifies the operation to perform +on the ConfigEntry for IncrementalAlterConfig +

+
type AlterConfigOpType int
+

+ func (AlterConfigOpType) + + String + + +

+
func (o AlterConfigOpType) String() string
+

+ String returns the human-readable representation of an AlterOperation +

+

+ type + + AlterConfigsAdminOption + + +

+

+ AlterConfigsAdminOption - see setters. +

+

+ See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental. +

+
type AlterConfigsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AlterConsumerGroupOffsetsAdminOption + + +

+

+ AlterConsumerGroupOffsetsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout. +

+
type AlterConsumerGroupOffsetsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AlterConsumerGroupOffsetsResult + + +

+

+ AlterConsumerGroupOffsetsResult represents the result of a +AlterConsumerGroupOffsets operation. +

+
type AlterConsumerGroupOffsetsResult struct {
+    // A slice of ConsumerGroupTopicPartitions, each element represents a group's
+    // TopicPartitions and Offsets.
+    ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions
+}
+
+

+ type + + AlterOperation + + +

+

+ AlterOperation specifies the operation to perform on the ConfigEntry. +Currently only AlterOperationSet. +

+
type AlterOperation int
+

+ func (AlterOperation) + + String + + +

+
func (o AlterOperation) String() string
+

+ String returns the human-readable representation of an AlterOperation +

+

+ type + + AlterUserScramCredentialsAdminOption + + +

+

+ AlterUserScramCredentialsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout. +

+
type AlterUserScramCredentialsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AlterUserScramCredentialsResult + + +

+

+ AlterUserScramCredentialsResult represents the result of a +AlterUserScramCredentials call. +

+
type AlterUserScramCredentialsResult struct {
+    // Errors - Map from user name
+    // to an Error, with ErrNoError code on success.
+    Errors map[string]Error
+}
+
+

+ type + + AssignedPartitions + + +

+

+ AssignedPartitions consumer group rebalance event: assigned partition set +

+
type AssignedPartitions struct {
+    Partitions []TopicPartition
+}
+
+

+ func (AssignedPartitions) + + String + + +

+
func (e AssignedPartitions) String() string
+

+ type + + BrokerMetadata + + +

+

+ BrokerMetadata contains per-broker metadata +

+
type BrokerMetadata struct {
+    ID   int32
+    Host string
+    Port int
+}
+
+

+ type + + ConfigEntry + + +

+

+ ConfigEntry holds parameters for altering a resource's configuration. +

+
type ConfigEntry struct {
+    // Name of configuration entry, e.g., topic configuration property name.
+    Name string
+    // Value of configuration entry.
+    Value string
+    // Deprecated: Operation to perform on the entry.
+    Operation AlterOperation
+    // Operation to perform on the entry incrementally.
+    IncrementalOperation AlterConfigOpType
+}
+
+

+ func + + StringMapToConfigEntries + + +

+
func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry
+

+ StringMapToConfigEntries creates a new map of ConfigEntry objects from the +provided string map. The AlterOperation is set on each created entry. +

+

+ func + + StringMapToIncrementalConfigEntries + + +

+
func StringMapToIncrementalConfigEntries(stringMap map[string]string,
+    operationMap map[string]AlterConfigOpType) []ConfigEntry
+

+ StringMapToIncrementalConfigEntries creates a new map of ConfigEntry objects from the +provided string map an operation map. The AlterConfigOpType is set on each created entry. +

+

+ func (ConfigEntry) + + String + + +

+
func (c ConfigEntry) String() string
+

+ String returns a human-readable representation of a ConfigEntry. +

+

+ type + + ConfigEntryResult + + +

+

+ ConfigEntryResult contains the result of a single configuration entry from a +DescribeConfigs request. +

+
type ConfigEntryResult struct {
+    // Name of configuration entry, e.g., topic configuration property name.
+    Name string
+    // Value of configuration entry.
+    Value string
+    // Source indicates the configuration source.
+    Source ConfigSource
+    // IsReadOnly indicates whether the configuration entry can be altered.
+    IsReadOnly bool
+    // IsDefault indicates whether the value is at its default.
+    IsDefault bool
+    // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset.
+    IsSensitive bool
+    // IsSynonym indicates whether the configuration entry is a synonym for another configuration property.
+    IsSynonym bool
+    // Synonyms contains a map of configuration entries that are synonyms to this configuration entry.
+    Synonyms map[string]ConfigEntryResult
+}
+
+

+ func (ConfigEntryResult) + + String + + +

+
func (c ConfigEntryResult) String() string
+

+ String returns a human-readable representation of a ConfigEntryResult. +

+

+ type + + ConfigMap + + +

+

+ ConfigMap is a map containing standard librdkafka configuration properties as documented in: + + https://github.com/confluentinc/librdkafka/tree/master/CONFIGURATION.md + +

+

+ The special property "default.topic.config" (optional) is a ConfigMap +containing default topic configuration properties. +

+

+ The use of "default.topic.config" is deprecated, +topic configuration properties shall be specified in the standard ConfigMap. +For backwards compatibility, "default.topic.config" (if supplied) +takes precedence. +

+
type ConfigMap map[string]ConfigValue
+

+ func (ConfigMap) + + Get + + +

+
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error)
+

+ Get finds the given key in the ConfigMap and returns its value. +If the key is not found `defval` is returned. +If the key is found but the type does not match that of `defval` (unless nil) +an ErrInvalidArg error is returned. +

+

+ func (ConfigMap) + + Set + + +

+
func (m ConfigMap) Set(kv string) error
+

+ Set implements flag.Set (command line argument parser) as a convenience +for `-X key=value` config. +

+

+ func (ConfigMap) + + SetKey + + +

+
func (m ConfigMap) SetKey(key string, value ConfigValue) error
+

+ SetKey sets configuration property key to value. +

+

+ For user convenience a key prefixed with {topic}. will be +set on the "default.topic.config" sub-map, this use is deprecated. +

+

+ type + + ConfigResource + + +

+

+ ConfigResource holds parameters for altering an Apache Kafka configuration resource +

+
type ConfigResource struct {
+    // Type of resource to set.
+    Type ResourceType
+    // Name of resource to set.
+    Name string
+    // Config entries to set.
+    // Configuration updates are atomic, any configuration property not provided
+    // here will be reverted (by the broker) to its default value.
+    // Use DescribeConfigs to retrieve the list of current configuration entry values.
+    Config []ConfigEntry
+}
+
+

+ func (ConfigResource) + + String + + +

+
func (c ConfigResource) String() string
+

+ String returns a human-readable representation of a ConfigResource +

+

+ type + + ConfigResourceResult + + +

+

+ ConfigResourceResult provides the result for a resource from a AlterConfigs or +DescribeConfigs request. +

+
type ConfigResourceResult struct {
+    // Type of returned result resource.
+    Type ResourceType
+    // Name of returned result resource.
+    Name string
+    // Error, if any, of returned result resource.
+    Error Error
+    // Config entries, if any, of returned result resource.
+    Config map[string]ConfigEntryResult
+}
+
+

+ func (ConfigResourceResult) + + String + + +

+
func (c ConfigResourceResult) String() string
+

+ String returns a human-readable representation of a ConfigResourceResult. +

+

+ type + + ConfigSource + + +

+

+ ConfigSource represents an Apache Kafka config source +

+
type ConfigSource int
+

+ func (ConfigSource) + + String + + +

+
func (t ConfigSource) String() string
+

+ String returns the human-readable representation of a ConfigSource type +

+

+ type + + ConfigValue + + +

+

+ ConfigValue supports the following types: +

+
bool, int, string, any type with the standard String() interface
+
+
type ConfigValue interface{}
+

+ type + + Consumer + + +

+

+ Consumer implements a High-level Apache Kafka Consumer instance +

+
type Consumer struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewConsumer + + +

+
func NewConsumer(conf *ConfigMap) (*Consumer, error)
+

+ NewConsumer creates a new high-level Consumer instance. +

+

+ conf is a *ConfigMap with standard librdkafka configuration properties. +

+

+ Supported special configuration properties: +

+
go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
+                                     If set to true the app must handle the AssignedPartitions and
+                                     RevokedPartitions events and call Assign() and Unassign()
+                                     respectively.
+go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled.
+go.events.channel.size (int, 1000) - Events() channel size
+go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
+go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
+
+

+ WARNING: Due to the buffering nature of channels (and queues in general) the +use of the events channel risks receiving outdated events and +messages. Minimizing go.events.channel.size reduces the risk +and number of outdated events and messages but does not eliminate +the factor completely. With a channel size of 1 at most one +event or message may be outdated. +

+

+ func (*Consumer) + + Assign + + +

+
func (c *Consumer) Assign(partitions []TopicPartition) (err error)
+

+ Assign an atomic set of partitions to consume. +

+

+ The .Offset field of each TopicPartition must either be set to an absolute +starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +but should typically be set to `kafka.OffsetStored` to have the consumer +use the committed offset as a start position, with a fallback to +`auto.offset.reset` if there is no committed offset. +

+

+ This replaces the current assignment. +

+

+ func (*Consumer) + + Assignment + + +

+
func (c *Consumer) Assignment() (partitions []TopicPartition, err error)
+

+ Assignment returns the current partition assignments +

+

+ func (*Consumer) + + AssignmentLost + + +

+
func (c *Consumer) AssignmentLost() bool
+

+ AssignmentLost returns true if current partition assignment has been lost. +This method is only applicable for use with a subscribing consumer when +handling a rebalance event or callback. +Partitions that have been lost may already be owned by other members in the +group and therefore commiting offsets, for example, may fail. +

+

+ func (*Consumer) + + Close + + +

+
func (c *Consumer) Close() (err error)
+

+ Close Consumer instance. +The object is no longer usable after this call. +

+

+ func (*Consumer) + + Commit + + +

+
func (c *Consumer) Commit() ([]TopicPartition, error)
+

+ Commit offsets for currently assigned partitions +This is a blocking call. +Returns the committed offsets on success. +

+

+ func (*Consumer) + + CommitMessage + + +

+
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)
+

+ CommitMessage commits offset based on the provided message. +This is a blocking call. +Returns the committed offsets on success. +

+

+ func (*Consumer) + + CommitOffsets + + +

+
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)
+

+ CommitOffsets commits the provided list of offsets +This is a blocking call. +Returns the committed offsets on success. +

+

+ func (*Consumer) + + Committed + + +

+
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+

+ Committed retrieves committed offsets for the given set of partitions +

+

+ func (*Consumer) + + Events + + +

+
func (c *Consumer) Events() chan Event
+

+ Events returns the Events channel (if enabled) +

+

+ Deprecated: Events (channel based consumer) is deprecated in favour +of Poll(). +

+

+ func (*Consumer) + + GetConsumerGroupMetadata + + +

+
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error)
+

+ GetConsumerGroupMetadata returns the consumer's current group metadata. +This object should be passed to the transactional producer's +SendOffsetsToTransaction() API. +

+

+ func (*Consumer) + + GetMetadata + + +

+
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +

+

+ func (*Consumer) + + GetRebalanceProtocol + + +

+
func (c *Consumer) GetRebalanceProtocol() string
+

+ GetRebalanceProtocol returns the current consumer group rebalance protocol, +which is either "EAGER" or "COOPERATIVE". +If the rebalance protocol is not known in the current state an empty string +is returned. +Should typically only be called during rebalancing. +

+

+ func (*Consumer) + + GetWatermarkOffsets + + +

+
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)
+

+ GetWatermarkOffsets returns the cached low and high offsets for the given topic +and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets. +The low offset is populated every statistics.interval.ms if that value is set. +OffsetInvalid will be returned if there is no cached offset for either value. +

+

+ func (*Consumer) + + IncrementalAssign + + +

+
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)
+

+ IncrementalAssign adds the specified partitions to the current set of +partitions to consume. +

+

+ The .Offset field of each TopicPartition must either be set to an absolute +starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +but should typically be set to `kafka.OffsetStored` to have the consumer +use the committed offset as a start position, with a fallback to +`auto.offset.reset` if there is no committed offset. +

+

+ The new partitions must not be part of the current assignment. +

+

+ func (*Consumer) + + IncrementalUnassign + + +

+
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)
+

+ IncrementalUnassign removes the specified partitions from the current set of +partitions to consume. +

+

+ The .Offset field of the TopicPartition is ignored. +

+

+ The removed partitions must be part of the current assignment. +

+

+ func (*Consumer) + + IsClosed + + +

+
func (c *Consumer) IsClosed() bool
+

+ IsClosed returns boolean representing if client is closed or not +

+

+ func (*Consumer) + + Logs + + +

+
func (c *Consumer) Logs() chan LogEvent
+

+ Logs returns the log channel if enabled, or nil otherwise. +

+

+ func (*Consumer) + + OffsetsForTimes + + +

+
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+

+ OffsetsForTimes looks up offsets by timestamp for the given partitions. +

+

+ The returned offset for each partition is the earliest offset whose +timestamp is greater than or equal to the given timestamp in the +corresponding partition. If the provided timestamp exceeds that of the +last message in the partition, a value of -1 will be returned. +

+

+ The timestamps to query are represented as `.Offset` in the `times` +argument and the looked up offsets are represented as `.Offset` in the returned +`offsets` list. +

+

+ The function will block for at most timeoutMs milliseconds. +

+

+ Duplicate Topic+Partitions are not supported. +Per-partition errors may be returned in the `.Error` field. +

+

+ func (*Consumer) + + Pause + + +

+
func (c *Consumer) Pause(partitions []TopicPartition) (err error)
+

+ Pause consumption for the provided list of partitions +

+

+ Note that messages already enqueued on the consumer's Event channel +(if `go.events.channel.enable` has been set) will NOT be purged by +this call, set `go.events.channel.size` accordingly. +

+

+ func (*Consumer) + + Poll + + +

+
func (c *Consumer) Poll(timeoutMs int) (event Event)
+

+ Poll the consumer for messages or events. +

+

+ # Will block for at most timeoutMs milliseconds +

+

+ The following callbacks may be triggered: +

+
Subscribe()'s rebalanceCb
+
+

+ Returns nil on timeout, else an Event +

+

+ func (*Consumer) + + Position + + +

+
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error)
+

+ Position returns the current consume position for the given partitions. +Typical use is to call Assignment() to get the partition list +and then pass it to Position() to get the current consume position for +each of the assigned partitions. +The consume position is the next message to read from the partition. +i.e., the offset of the last message seen by the application + 1. +

+

+ func (*Consumer) + + QueryWatermarkOffsets + + +

+
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+

+ QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition. +

+

+ func (*Consumer) + + ReadMessage + + +

+
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error)
+

+ ReadMessage polls the consumer for a message. +

+

+ This is a convenience API that wraps Poll() and only returns +messages or errors. All other event types are discarded. +

+

+ The call will block for at most `timeout` waiting for +a new message or error. `timeout` may be set to -1 for +indefinite wait. +

+

+ Timeout is returned as (nil, err) where `err.(kafka.Error).IsTimeout() == true`. +

+

+ Messages are returned as (msg, nil), +while general errors are returned as (nil, err), +and partition-specific errors are returned as (msg, err) where +msg.TopicPartition provides partition-specific information (such as topic, partition and offset). +

+

+ All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded. +

+

+ func (*Consumer) + + Resume + + +

+
func (c *Consumer) Resume(partitions []TopicPartition) (err error)
+

+ Resume consumption for the provided list of partitions +

+

+ func (*Consumer) + + Seek + + +

+
func (c *Consumer) Seek(partition TopicPartition, ignoredTimeoutMs int) error
+

+ Seek seeks the given topic partitions using the offset from the TopicPartition. +

+

+ The ignoredTimeoutMs parameter is ignored. Instead, this method blocks until +the fetcher state is updated for the given partition with the new offset. +This guarantees that no previously fetched messages for the old offset (or +fetch position) will be passed to the application once this call returns. +It will still take some time after the method returns until messages are +fetched at the new offset. +

+

+ Seek() may only be used for partitions already being consumed +(through Assign() or implicitly through a self-rebalanced Subscribe()). +To set the starting offset it is preferred to use Assign() and provide +a starting offset for each partition. +

+

+ Returns an error on failure or nil otherwise. +Deprecated: Seek is deprecated in favour of SeekPartitions(). +

+

+ func (*Consumer) + + SeekPartitions + + +

+
func (c *Consumer) SeekPartitions(partitions []TopicPartition) ([]TopicPartition, error)
+

+ SeekPartitions seeks the given topic partitions to the per-partition offset +stored in the .Offset field of each partition. +

+

+ The offset may be either absolute (>= 0) or a logical offset (e.g. OffsetEnd). +

+

+ SeekPartitions() may only be used for partitions already being consumed +(through Assign() or implicitly through a self-rebalanced Subscribe()). +To set the starting offset it is preferred to use Assign() in a +kafka.AssignedPartitions handler and provide a starting offset for each +partition. +

+

+ Returns an error on failure or nil otherwise. Individual partition errors +should be checked in the per-partition .Error field. +

+

+ func (*Consumer) + + SetOAuthBearerToken + + +

+
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+

+ SetOAuthBearerToken sets the the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. It will return nil +on success, otherwise an error if: +1) the token data is invalid (meaning an expiration time in the past +or either a token value or an extension key or value that does not meet +the regular expression requirements as per + + https://tools.ietf.org/html/rfc7628#section-3.1 + + ); +2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +3) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+

+ func (*Consumer) + + SetOAuthBearerTokenFailure + + +

+
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error
+

+ SetOAuthBearerTokenFailure sets the error message describing why token +retrieval/setting failed; it also schedules a new token refresh event for 10 +seconds later so the attempt may be retried. It will return nil on +success, otherwise an error if: +1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +2) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+

+ func (*Consumer) + + SetSaslCredentials + + +

+
func (c *Consumer) SetSaslCredentials(username, password string) error
+

+ SetSaslCredentials sets the SASL credentials used for this consumer. The new credentials +will overwrite the old ones (which were set when creating the consumer or by a previous +call to SetSaslCredentials). The new credentials will be used the next time the +consumer needs to authenticate to a broker. This method will not disconnect +existing broker connections that were established with the old credentials. +This method applies only to the SASL PLAIN and SCRAM mechanisms. +

+

+ func (*Consumer) + + StoreMessage + + +

+
func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error)
+

+ StoreMessage stores offset based on the provided message. +This is a convenience method that uses StoreOffsets to do the actual work. +

+

+ func (*Consumer) + + StoreOffsets + + +

+
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error)
+

+ StoreOffsets stores the provided list of offsets that will be committed +to the offset store according to `auto.commit.interval.ms` or manual +offset-less Commit(). +

+

+ Returns the stored offsets on success. If at least one offset couldn't be stored, +an error and a list of offsets is returned. Each offset can be checked for +specific errors via its `.Error` member. +

+

+ func (*Consumer) + + String + + +

+
func (c *Consumer) String() string
+

+ Strings returns a human readable name for a Consumer instance +

+

+ func (*Consumer) + + Subscribe + + +

+
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
+

+ Subscribe to a single topic +This replaces the current subscription +

+

+ func (*Consumer) + + SubscribeTopics + + +

+
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)
+

+ SubscribeTopics subscribes to the provided list of topics. +This replaces the current subscription. +

+

+ func (*Consumer) + + Subscription + + +

+
func (c *Consumer) Subscription() (topics []string, err error)
+

+ Subscription returns the current subscription as set by Subscribe() +

+

+ func (*Consumer) + + Unassign + + +

+
func (c *Consumer) Unassign() (err error)
+

+ Unassign the current set of partitions to consume. +

+

+ func (*Consumer) + + Unsubscribe + + +

+
func (c *Consumer) Unsubscribe() (err error)
+

+ Unsubscribe from the current subscription, if any. +

+

+ type + + ConsumerGroupDescription + + +

+

+ ConsumerGroupDescription represents the result of DescribeConsumerGroups for +a single group. +

+
type ConsumerGroupDescription struct {
+    // Group id.
+    GroupID string
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+    // Is a simple consumer group.
+    IsSimpleConsumerGroup bool
+    // Partition assignor identifier.
+    PartitionAssignor string
+    // Consumer group state.
+    State ConsumerGroupState
+    // Consumer group coordinator (has ID == -1 if not known).
+    Coordinator Node
+    // Members list.
+    Members []MemberDescription
+    // Operations allowed for the group (nil if not available or not requested)
+    AuthorizedOperations []ACLOperation
+}
+
+

+ type + + ConsumerGroupListing + + +

+

+ ConsumerGroupListing represents the result of ListConsumerGroups for a single +group. +

+
type ConsumerGroupListing struct {
+    // Group id.
+    GroupID string
+    // Is a simple consumer group.
+    IsSimpleConsumerGroup bool
+    // Group state.
+    State ConsumerGroupState
+}
+
+

+ type + + ConsumerGroupMetadata + + +

+

+ ConsumerGroupMetadata reflects the current consumer group member metadata. +

+
type ConsumerGroupMetadata struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewTestConsumerGroupMetadata + + +

+
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)
+

+ NewTestConsumerGroupMetadata creates a new consumer group metadata instance +mainly for testing use. +Use GetConsumerGroupMetadata() to retrieve the real metadata. +

+

+ type + + ConsumerGroupResult + + +

+

+ ConsumerGroupResult provides per-group operation result (error) information. +

+
type ConsumerGroupResult struct {
+    // Group name
+    Group string
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ func (ConsumerGroupResult) + + String + + +

+
func (g ConsumerGroupResult) String() string
+

+ String returns a human-readable representation of a ConsumerGroupResult. +

+

+ type + + ConsumerGroupState + + +

+

+ ConsumerGroupState represents a consumer group state +

+
type ConsumerGroupState int
+

+ func + + ConsumerGroupStateFromString + + +

+
func ConsumerGroupStateFromString(stateString string) (ConsumerGroupState, error)
+

+ ConsumerGroupStateFromString translates a consumer group state name/string to +a ConsumerGroupStateFromString value. +

+

+ func (ConsumerGroupState) + + String + + +

+
func (t ConsumerGroupState) String() string
+

+ String returns the human-readable representation of a consumer_group_state +

+

+ type + + ConsumerGroupTopicPartitions + + +

+

+ ConsumerGroupTopicPartitions represents a consumer group's TopicPartitions. +

+
type ConsumerGroupTopicPartitions struct {
+    // Group name
+    Group string
+    // Partitions list
+    Partitions []TopicPartition
+}
+
+

+ func (ConsumerGroupTopicPartitions) + + String + + +

+
func (gtp ConsumerGroupTopicPartitions) String() string
+

+ type + + CreateACLResult + + +

+

+ CreateACLResult provides create ACL error information. +

+
type CreateACLResult struct {
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ type + + CreateACLsAdminOption + + +

+

+ CreateACLsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout +

+
type CreateACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + CreatePartitionsAdminOption + + +

+

+ CreatePartitionsAdminOption - see setters. +

+

+ See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +

+
type CreatePartitionsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + CreateTopicsAdminOption + + +

+

+ CreateTopicsAdminOption - see setters. +

+

+ See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +

+
type CreateTopicsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteACLsAdminOption + + +

+

+ DeleteACLsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout +

+
type DeleteACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteACLsResult + + +

+

+ DeleteACLsResult provides delete ACLs result or error information. +

+
type DeleteACLsResult = DescribeACLsResult
+

+ type + + DeleteConsumerGroupsAdminOption + + +

+

+ DeleteConsumerGroupsAdminOption - see setters. +

+

+ See SetAdminRequestTimeout. +

+
type DeleteConsumerGroupsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteConsumerGroupsResult + + +

+

+ DeleteConsumerGroupsResult represents the result of a DeleteConsumerGroups +call. +

+
type DeleteConsumerGroupsResult struct {
+    // Slice of ConsumerGroupResult.
+    ConsumerGroupResults []ConsumerGroupResult
+}
+
+

+ type + + DeleteTopicsAdminOption + + +

+

+ DeleteTopicsAdminOption - see setters. +

+

+ See SetAdminRequestTimeout, SetAdminOperationTimeout. +

+
type DeleteTopicsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeACLsAdminOption + + +

+

+ DescribeACLsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout +

+
type DescribeACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeACLsResult + + +

+

+ DescribeACLsResult provides describe ACLs result or error information. +

+
type DescribeACLsResult struct {
+    // Slice of ACL bindings matching the provided filter
+    ACLBindings ACLBindings
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ type + + DescribeClusterAdminOption + + +

+

+ DescribeClusterAdminOption - see setter. +

+

+ See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +

+
type DescribeClusterAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeClusterResult + + +

+

+ DescribeClusterResult represents the result of DescribeCluster. +

+
type DescribeClusterResult struct {
+    // Cluster id for the cluster (always available if broker version >= 0.10.1.0, otherwise nil).
+    ClusterID *string
+    // Current controller broker for the cluster (nil if there is none).
+    Controller *Node
+    // List of brokers in the cluster.
+    Nodes []Node
+    // Operations allowed for the cluster (nil if not available or not requested).
+    AuthorizedOperations []ACLOperation
+}
+
+

+ type + + DescribeConfigsAdminOption + + +

+

+ DescribeConfigsAdminOption - see setters. +

+

+ See SetAdminRequestTimeout. +

+
type DescribeConfigsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeConsumerGroupsAdminOption + + +

+

+ DescribeConsumerGroupsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +

+
type DescribeConsumerGroupsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeConsumerGroupsResult + + +

+

+ DescribeConsumerGroupsResult represents the result of a +DescribeConsumerGroups call. +

+
type DescribeConsumerGroupsResult struct {
+    // Slice of ConsumerGroupDescription.
+    ConsumerGroupDescriptions []ConsumerGroupDescription
+}
+
+

+ type + + DescribeTopicsAdminOption + + +

+

+ DescribeTopicsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +

+
type DescribeTopicsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeTopicsResult + + +

+

+ DescribeTopicsResult represents the result of a +DescribeTopics call. +

+
type DescribeTopicsResult struct {
+    // Slice of TopicDescription.
+    TopicDescriptions []TopicDescription
+}
+
+

+ type + + DescribeUserScramCredentialsAdminOption + + +

+

+ DescribeUserScramCredentialsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout. +

+
type DescribeUserScramCredentialsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeUserScramCredentialsResult + + +

+

+ DescribeUserScramCredentialsResult represents the result of a +DescribeUserScramCredentials call. +

+
type DescribeUserScramCredentialsResult struct {
+    // Descriptions - Map from user name
+    // to UserScramCredentialsDescription
+    Descriptions map[string]UserScramCredentialsDescription
+}
+
+

+ type + + Error + + +

+

+ Error provides a Kafka-specific error container +

+
type Error struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewError + + +

+
func NewError(code ErrorCode, str string, fatal bool) (err Error)
+

+ NewError creates a new Error. +

+

+ func (Error) + + Code + + +

+
func (e Error) Code() ErrorCode
+

+ Code returns the ErrorCode of an Error +

+

+ func (Error) + + Error + + +

+
func (e Error) Error() string
+

+ Error returns a human readable representation of an Error +Same as Error.String() +

+

+ func (Error) + + IsFatal + + +

+
func (e Error) IsFatal() bool
+

+ IsFatal returns true if the error is a fatal error. +A fatal error indicates the client instance is no longer operable and +should be terminated. Typical causes include non-recoverable +idempotent producer errors. +

+

+ func (Error) + + IsRetriable + + +

+
func (e Error) IsRetriable() bool
+

+ IsRetriable returns true if the operation that caused this error +may be retried. +This flag is currently only set by the Transactional producer API. +

+

+ func (Error) + + IsTimeout + + +

+
func (e Error) IsTimeout() bool
+

+ IsTimeout returns true if the error is a timeout error. +A timeout error indicates that the operation timed out locally. +

+

+ func (Error) + + String + + +

+
func (e Error) String() string
+

+ String returns a human readable representation of an Error +

+

+ func (Error) + + TxnRequiresAbort + + +

+
func (e Error) TxnRequiresAbort() bool
+

+ TxnRequiresAbort returns true if the error is an abortable transaction error +that requires the application to abort the current transaction with +AbortTransaction() and start a new transaction with BeginTransaction() +if it wishes to proceed with transactional operations. +This flag is only set by the Transactional producer API. +

+

+ type + + ErrorCode + + +

+

+ ErrorCode is the integer representation of local and broker error codes +

+
type ErrorCode int
+
const (
+    // ErrBadMsg Local: Bad message format
+    ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
+    // ErrBadCompression Local: Invalid compressed data
+    ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
+    // ErrDestroy Local: Broker handle destroyed
+    ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
+    // ErrFail Local: Communication failure with broker
+    ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
+    // ErrTransport Local: Broker transport failure
+    ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
+    // ErrCritSysResource Local: Critical system resource failure
+    ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
+    // ErrResolve Local: Host resolution failure
+    ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
+    // ErrMsgTimedOut Local: Message timed out
+    ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
+    // ErrPartitionEOF Broker: No more messages
+    ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
+    // ErrUnknownPartition Local: Unknown partition
+    ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+    // ErrFs Local: File or filesystem error
+    ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
+    // ErrUnknownTopic Local: Unknown topic
+    ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+    // ErrAllBrokersDown Local: All broker connections are down
+    ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
+    // ErrInvalidArg Local: Invalid argument or configuration
+    ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
+    // ErrTimedOut Local: Timed out
+    ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
+    // ErrQueueFull Local: Queue full
+    ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
+    // ErrIsrInsuff Local: ISR count insufficient
+    ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
+    // ErrNodeUpdate Local: Broker node update
+    ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
+    // ErrSsl Local: SSL error
+    ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
+    // ErrWaitCoord Local: Waiting for coordinator
+    ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
+    // ErrUnknownGroup Local: Unknown group
+    ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
+    // ErrInProgress Local: Operation in progress
+    ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
+    // ErrPrevInProgress Local: Previous operation in progress
+    ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
+    // ErrExistingSubscription Local: Existing subscription
+    ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
+    // ErrAssignPartitions Local: Assign partitions
+    ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+    // ErrRevokePartitions Local: Revoke partitions
+    ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
+    // ErrConflict Local: Conflicting use
+    ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
+    // ErrState Local: Erroneous state
+    ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
+    // ErrUnknownProtocol Local: Unknown protocol
+    ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
+    // ErrNotImplemented Local: Not implemented
+    ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
+    // ErrAuthentication Local: Authentication failure
+    ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
+    // ErrNoOffset Local: No offset stored
+    ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
+    // ErrOutdated Local: Outdated
+    ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
+    // ErrTimedOutQueue Local: Timed out in queue
+    ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
+    // ErrUnsupportedFeature Local: Required feature not supported by broker
+    ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE)
+    // ErrWaitCache Local: Awaiting cache update
+    ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE)
+    // ErrIntr Local: Operation interrupted
+    ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR)
+    // ErrKeySerialization Local: Key serialization error
+    ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION)
+    // ErrValueSerialization Local: Value serialization error
+    ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION)
+    // ErrKeyDeserialization Local: Key deserialization error
+    ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION)
+    // ErrValueDeserialization Local: Value deserialization error
+    ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION)
+    // ErrPartial Local: Partial response
+    ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL)
+    // ErrReadOnly Local: Read-only object
+    ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY)
+    // ErrNoent Local: No such entry
+    ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT)
+    // ErrUnderflow Local: Read underflow
+    ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW)
+    // ErrInvalidType Local: Invalid type
+    ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE)
+    // ErrRetry Local: Retry operation
+    ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY)
+    // ErrPurgeQueue Local: Purged in queue
+    ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE)
+    // ErrPurgeInflight Local: Purged in flight
+    ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT)
+    // ErrFatal Local: Fatal error
+    ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL)
+    // ErrInconsistent Local: Inconsistent state
+    ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT)
+    // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
+    ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE)
+    // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
+    ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
+    // ErrUnknownBroker Local: Unknown broker
+    ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER)
+    // ErrNotConfigured Local: Functionality not configured
+    ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED)
+    // ErrFenced Local: This instance has been fenced by a newer instance
+    ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED)
+    // ErrApplication Local: Application generated error
+    ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION)
+    // ErrAssignmentLost Local: Group partition assignment lost
+    ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST)
+    // ErrNoop Local: No operation performed
+    ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP)
+    // ErrAutoOffsetReset Local: No offset to automatically reset to
+    ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET)
+    // ErrLogTruncation Local: Partition log truncation detected
+    ErrLogTruncation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__LOG_TRUNCATION)
+    // ErrUnknown Unknown broker error
+    ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
+    // ErrNoError Success
+    ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
+    // ErrOffsetOutOfRange Broker: Offset out of range
+    ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
+    // ErrInvalidMsg Broker: Invalid message
+    ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
+    // ErrUnknownTopicOrPart Broker: Unknown topic or partition
+    ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+    // ErrInvalidMsgSize Broker: Invalid message size
+    ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
+    // ErrLeaderNotAvailable Broker: Leader not available
+    ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+    // ErrNotLeaderForPartition Broker: Not leader for partition
+    ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
+    // ErrRequestTimedOut Broker: Request timed out
+    ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
+    // ErrBrokerNotAvailable Broker: Broker not available
+    ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
+    // ErrReplicaNotAvailable Broker: Replica not available
+    ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
+    // ErrMsgSizeTooLarge Broker: Message size too large
+    ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
+    // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
+    ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
+    // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
+    ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
+    // ErrNetworkException Broker: Broker disconnected before response received
+    ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
+    // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
+    ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS)
+    // ErrCoordinatorNotAvailable Broker: Coordinator not available
+    ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)
+    // ErrNotCoordinator Broker: Not coordinator
+    ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR)
+    // ErrTopicException Broker: Invalid topic
+    ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
+    // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
+    ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
+    // ErrNotEnoughReplicas Broker: Not enough in-sync replicas
+    ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
+    // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
+    ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
+    // ErrInvalidRequiredAcks Broker: Invalid required acks value
+    ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
+    // ErrIllegalGeneration Broker: Specified group generation id is not valid
+    ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
+    // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
+    ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
+    // ErrInvalidGroupID Broker: Invalid group.id
+    ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
+    // ErrUnknownMemberID Broker: Unknown member
+    ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
+    // ErrInvalidSessionTimeout Broker: Invalid session timeout
+    ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
+    // ErrRebalanceInProgress Broker: Group rebalance in progress
+    ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
+    // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
+    ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
+    // ErrTopicAuthorizationFailed Broker: Topic authorization failed
+    ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
+    // ErrGroupAuthorizationFailed Broker: Group authorization failed
+    ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
+    // ErrClusterAuthorizationFailed Broker: Cluster authorization failed
+    ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
+    // ErrInvalidTimestamp Broker: Invalid timestamp
+    ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
+    // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
+    ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
+    // ErrIllegalSaslState Broker: Request not valid in current SASL state
+    ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
+    // ErrUnsupportedVersion Broker: API version not supported
+    ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
+    // ErrTopicAlreadyExists Broker: Topic already exists
+    ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS)
+    // ErrInvalidPartitions Broker: Invalid number of partitions
+    ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS)
+    // ErrInvalidReplicationFactor Broker: Invalid replication factor
+    ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR)
+    // ErrInvalidReplicaAssignment Broker: Invalid replica assignment
+    ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT)
+    // ErrInvalidConfig Broker: Configuration is invalid
+    ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG)
+    // ErrNotController Broker: Not controller for cluster
+    ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER)
+    // ErrInvalidRequest Broker: Invalid request
+    ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST)
+    // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
+    ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT)
+    // ErrPolicyViolation Broker: Policy violation
+    ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION)
+    // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
+    ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER)
+    // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
+    ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER)
+    // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
+    ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH)
+    // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
+    ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE)
+    // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
+    ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING)
+    // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
+    ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT)
+    // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
+    ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS)
+    // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
+    ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED)
+    // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
+    ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED)
+    // ErrSecurityDisabled Broker: Security features are disabled
+    ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED)
+    // ErrOperationNotAttempted Broker: Operation not attempted
+    ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED)
+    // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
+    ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
+    // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
+    ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND)
+    // ErrSaslAuthenticationFailed Broker: SASL Authentication failed
+    ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
+    // ErrUnknownProducerID Broker: Unknown Producer Id
+    ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID)
+    // ErrReassignmentInProgress Broker: Partition reassignment is in progress
+    ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS)
+    // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
+    ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED)
+    // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
+    ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND)
+    // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
+    ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH)
+    // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
+    ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED)
+    // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
+    ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED)
+    // ErrDelegationTokenExpired Broker: Delegation Token is expired
+    ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED)
+    // ErrInvalidPrincipalType Broker: Supplied principalType is not supported
+    ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE)
+    // ErrNonEmptyGroup Broker: The group is not empty
+    ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP)
+    // ErrGroupIDNotFound Broker: The group id does not exist
+    ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND)
+    // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
+    ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND)
+    // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
+    ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH)
+    // ErrListenerNotFound Broker: No matching listener
+    ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND)
+    // ErrTopicDeletionDisabled Broker: Topic deletion is disabled
+    ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED)
+    // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
+    ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH)
+    // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
+    ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH)
+    // ErrUnsupportedCompressionType Broker: Unsupported compression type
+    ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE)
+    // ErrStaleBrokerEpoch Broker: Broker epoch has changed
+    ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH)
+    // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
+    ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE)
+    // ErrMemberIDRequired Broker: Group member needs a valid member ID
+    ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)
+    // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
+    ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE)
+    // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
+    ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED)
+    // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
+    ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
+    // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
+    ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE)
+    // ErrElectionNotNeeded Broker: Leader election not needed for topic partition
+    ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED)
+    // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
+    ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS)
+    // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
+    ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC)
+    // ErrInvalidRecord Broker: Broker failed to validate record
+    ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD)
+    // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
+    ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
+    // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
+    ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED)
+    // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
+    ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED)
+    // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
+    ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND)
+    // ErrDuplicateResource Broker: Request illegally referred to the same resource twice
+    ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE)
+    // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
+    ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL)
+    // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
+    ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET)
+    // ErrInvalidUpdateVersion Broker: Invalid update version
+    ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION)
+    // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
+    ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED)
+    // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
+    ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE)
+)
+

+ func (ErrorCode) + + String + + +

+
func (c ErrorCode) String() string
+

+ String returns a human readable representation of an error code +

+

+ type + + Event + + +

+

+ Event generic interface +

+
type Event interface {
+    // String returns a human-readable representation of the event
+    String() string
+}
+

+ type + + Handle + + +

+

+ Handle represents a generic client handle containing common parts for +both Producer and Consumer. +

+
type Handle interface {
+    // SetOAuthBearerToken sets the the data to be transmitted
+    // to a broker during SASL/OAUTHBEARER authentication. It will return nil
+    // on success, otherwise an error if:
+    // 1) the token data is invalid (meaning an expiration time in the past
+    // or either a token value or an extension key or value that does not meet
+    // the regular expression requirements as per
+    // https://tools.ietf.org/html/rfc7628#section-3.1);
+    // 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
+    // 3) SASL/OAUTHBEARER is supported but is not configured as the client's
+    // authentication mechanism.
+    SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+
+    // SetOAuthBearerTokenFailure sets the error message describing why token
+    // retrieval/setting failed; it also schedules a new token refresh event for 10
+    // seconds later so the attempt may be retried. It will return nil on
+    // success, otherwise an error if:
+    // 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
+    // 2) SASL/OAUTHBEARER is supported but is not configured as the client's
+    // authentication mechanism.
+    SetOAuthBearerTokenFailure(errstr string) error
+
+    // IsClosed() returns the bool to check if the client is closed
+    IsClosed() bool
+    // contains filtered or unexported methods
+}
+ +

+ Header represents a single Kafka message header. +

+

+ Message headers are made up of a list of Header elements, retaining their original insert +order and allowing for duplicate Keys. +

+

+ Key is a human readable string identifying the header. +Value is the key's binary value, Kafka does not put any restrictions on the format of +of the Value but it should be made relatively compact. +The value may be a byte array, empty, or nil. +

+

+ NOTE: Message headers are not available on producer delivery report messages. +

+
type Header struct {
+    Key   string // Header name (utf-8 string)
+    Value []byte // Header value (nil, empty, or binary)
+}
+
+

+ func (Header) + + String + + +

+
func (h Header) String() string
+

+ String returns the Header Key and data in a human representable possibly truncated form +suitable for displaying to the user. +

+

+ type + + IsolationLevel + + +

+

+ IsolationLevel is a type which is used for AdminOptions to set the IsolationLevel. +

+
type IsolationLevel int
+

+ type + + ListConsumerGroupOffsetsAdminOption + + +

+

+ ListConsumerGroupOffsetsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout, SetAdminRequireStableOffsets. +

+
type ListConsumerGroupOffsetsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + ListConsumerGroupOffsetsResult + + +

+

+ ListConsumerGroupOffsetsResult represents the result of a +ListConsumerGroupOffsets operation. +

+
type ListConsumerGroupOffsetsResult struct {
+    // A slice of ConsumerGroupTopicPartitions, each element represents a group's
+    // TopicPartitions and Offsets.
+    ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions
+}
+
+

+ type + + ListConsumerGroupsAdminOption + + +

+

+ ListConsumerGroupsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout, SetAdminMatchConsumerGroupStates. +

+
type ListConsumerGroupsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + ListConsumerGroupsResult + + +

+

+ ListConsumerGroupsResult represents ListConsumerGroups results and errors. +

+
type ListConsumerGroupsResult struct {
+    // List of valid ConsumerGroupListings.
+    Valid []ConsumerGroupListing
+    // List of errors.
+    Errors []error
+}
+
+

+ type + + ListOffsetsAdminOption + + +

+

+ ListOffsetsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout, SetAdminIsolationLevel. +

+
type ListOffsetsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + ListOffsetsResult + + +

+

+ ListOffsetsResult holds the map of TopicPartition to ListOffsetsResultInfo for a request. +

+
type ListOffsetsResult struct {
+    ResultInfos map[TopicPartition]ListOffsetsResultInfo
+}
+
+

+ type + + ListOffsetsResultInfo + + +

+

+ ListOffsetsResultInfo describes the result of ListOffsets request for a Topic Partition. +

+
type ListOffsetsResultInfo struct {
+    Offset      Offset
+    Timestamp   int64
+    LeaderEpoch *int32
+    Error       Error
+}
+
+

+ type + + LogEvent + + +

+

+ LogEvent represent the log from librdkafka internal log queue +

+
type LogEvent struct {
+    Name      string    // Name of client instance
+    Tag       string    // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
+    Message   string    // Log message
+    Level     int       // Log syslog level, lower is more critical.
+    Timestamp time.Time // Log timestamp
+}
+
+

+ func (LogEvent) + + String + + +

+
func (logEvent LogEvent) String() string
+

+ type + + MemberAssignment + + +

+

+ MemberAssignment represents the assignment of a consumer group member. +

+
type MemberAssignment struct {
+    // Partitions assigned to current member.
+    TopicPartitions []TopicPartition
+}
+
+

+ type + + MemberDescription + + +

+

+ MemberDescription represents the description of a consumer group member. +

+
type MemberDescription struct {
+    // Client id.
+    ClientID string
+    // Group instance id.
+    GroupInstanceID string
+    // Consumer id.
+    ConsumerID string
+    // Group member host.
+    Host string
+    // Member assignment.
+    Assignment MemberAssignment
+}
+
+

+ type + + Message + + +

+

+ Message represents a Kafka message +

+
type Message struct {
+    TopicPartition TopicPartition
+    Value          []byte
+    Key            []byte
+    Timestamp      time.Time
+    TimestampType  TimestampType
+    Opaque         interface{}
+    Headers        []Header
+    LeaderEpoch    *int32 // Deprecated: LeaderEpoch or nil if not available. Use m.TopicPartition.LeaderEpoch instead.
+}
+
+

+ func (*Message) + + String + + +

+
func (m *Message) String() string
+

+ String returns a human readable representation of a Message. +Key and payload are not represented. +

+

+ type + + Metadata + + +

+

+ Metadata contains broker and topic metadata for all (matching) topics +

+
type Metadata struct {
+    Brokers []BrokerMetadata
+    Topics  map[string]TopicMetadata
+
+    OriginatingBroker BrokerMetadata
+}
+
+

+ type + + MockCluster + + +

+

+ MockCluster represents a Kafka mock cluster instance which can be used +for testing. +

+
type MockCluster struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewMockCluster + + +

+
func NewMockCluster(brokerCount int) (*MockCluster, error)
+

+ NewMockCluster provides a mock Kafka cluster with a configurable +number of brokers that support a reasonable subset of Kafka protocol +operations, error injection, etc. +

+

+ The broker ids will start at 1 up to and including brokerCount. +

+

+ Mock clusters provide localhost listeners that can be used as the bootstrap +servers by multiple Kafka client instances. +

+

+ Currently supported functionality: +- Producer +- Idempotent Producer +- Transactional Producer +- Low-level consumer +- High-level balanced consumer groups with offset commits +- Topic Metadata and auto creation +

+

+ Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. +

+

+ func (*MockCluster) + + BootstrapServers + + +

+
func (mc *MockCluster) BootstrapServers() string
+

+ BootstrapServers returns the bootstrap.servers property for this MockCluster +

+

+ func (*MockCluster) + + Close + + +

+
func (mc *MockCluster) Close()
+

+ Close and destroy the MockCluster +

+

+ func (*MockCluster) + + CreateTopic + + +

+
func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error
+

+ CreateTopic creates a topic without having to use a producer +

+

+ func (*MockCluster) + + SetBrokerDown + + +

+
func (mc *MockCluster) SetBrokerDown(brokerID int) error
+

+ SetBrokerDown disconnects the broker and disallows any new connections. +This does NOT trigger leader change. +Use brokerID -1 for all brokers, or >= 0 for a specific broker. +

+

+ func (*MockCluster) + + SetBrokerUp + + +

+
func (mc *MockCluster) SetBrokerUp(brokerID int) error
+

+ SetBrokerUp makes the broker accept connections again. +This does NOT trigger leader change. +Use brokerID -1 for all brokers, or >= 0 for a specific broker. +

+

+ func (*MockCluster) + + SetRoundtripDuration + + +

+
func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error
+

+ SetRoundtripDuration sets the broker round-trip-time delay for the given broker. +Use brokerID -1 for all brokers, or >= 0 for a specific broker. +

+

+ type + + Node + + +

+

+ Node represents a Kafka broker. +

+
type Node struct {
+    // Node id.
+    ID int
+    // Node host.
+    Host string
+    // Node port.
+    Port int
+    // Node rack (may be nil)
+    Rack *string
+}
+
+

+ func (Node) + + String + + +

+
func (n Node) String() string
+

+ type + + OAuthBearerToken + + +

+

+ OAuthBearerToken represents the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. +

+
type OAuthBearerToken struct {
+    // Token value, often (but not necessarily) a JWS compact serialization
+    // as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
+    // the regular expression for a SASL/OAUTHBEARER value defined at
+    // https://tools.ietf.org/html/rfc7628#section-3.1
+    TokenValue string
+    // Metadata about the token indicating when it expires (local time);
+    // it must represent a time in the future
+    Expiration time.Time
+    // Metadata about the token indicating the Kafka principal name
+    // to which it applies (for example, "admin")
+    Principal string
+    // SASL extensions, if any, to be communicated to the broker during
+    // authentication (all keys and values of which must meet the regular
+    // expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
+    // and it must not contain the reserved "auth" key)
+    Extensions map[string]string
+}
+
+

+ type + + OAuthBearerTokenRefresh + + +

+

+ OAuthBearerTokenRefresh indicates token refresh is required +

+
type OAuthBearerTokenRefresh struct {
+    // Config is the value of the sasl.oauthbearer.config property
+    Config string
+}
+
+

+ func (OAuthBearerTokenRefresh) + + String + + +

+
func (o OAuthBearerTokenRefresh) String() string
+

+ type + + Offset + + +

+

+ Offset type (int64) with support for canonical names +

+
type Offset int64
+

+ func + + NewOffset + + +

+
func NewOffset(offset interface{}) (Offset, error)
+

+ NewOffset creates a new Offset using the provided logical string, an +absolute int64 offset value, or a concrete Offset type. +Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored" +

+

+ func + + OffsetTail + + +

+
func OffsetTail(relativeOffset Offset) Offset
+

+ OffsetTail returns the logical offset relativeOffset from current end of partition +

+

+ func (*Offset) + + Set + + +

+
func (o *Offset) Set(offset interface{}) error
+

+ Set offset value, see NewOffset() +

+

+ func (Offset) + + String + + +

+
func (o Offset) String() string
+

+ type + + OffsetSpec + + +

+

+ OffsetSpec specifies desired offsets while using ListOffsets. +

+
type OffsetSpec int64
+

+ func + + NewOffsetSpecForTimestamp + + +

+
func NewOffsetSpecForTimestamp(timestamp int64) OffsetSpec
+

+ NewOffsetSpecForTimestamp creates an OffsetSpec corresponding to the timestamp. +

+

+ type + + OffsetsCommitted + + +

+

+ OffsetsCommitted reports committed offsets +

+
type OffsetsCommitted struct {
+    Error   error
+    Offsets []TopicPartition
+}
+
+

+ func (OffsetsCommitted) + + String + + +

+
func (o OffsetsCommitted) String() string
+

+ type + + PartitionEOF + + +

+

+ PartitionEOF consumer reached end of partition +Needs to be explicitly enabled by setting the `enable.partition.eof` +configuration property to true. +

+
type PartitionEOF TopicPartition
+

+ func (PartitionEOF) + + String + + +

+
func (p PartitionEOF) String() string
+

+ type + + PartitionMetadata + + +

+

+ PartitionMetadata contains per-partition metadata +

+
type PartitionMetadata struct {
+    ID       int32
+    Error    Error
+    Leader   int32
+    Replicas []int32
+    Isrs     []int32
+}
+
+

+ type + + PartitionsSpecification + + +

+

+ PartitionsSpecification holds parameters for creating additional partitions for a topic. +PartitionsSpecification is analogous to NewPartitions in the Java Topic Admin API. +

+
type PartitionsSpecification struct {
+    // Topic to create more partitions for.
+    Topic string
+    // New partition count for topic, must be higher than current partition count.
+    IncreaseTo int
+    // (Optional) Explicit replica assignment. The outer array is
+    // indexed by the new partition index (i.e., 0 for the first added
+    // partition), while the inner per-partition array
+    // contains the replica broker ids. The first broker in each
+    // broker id list will be the preferred replica.
+    ReplicaAssignment [][]int32
+}
+
+

+ type + + Producer + + +

+

+ Producer implements a High-level Apache Kafka Producer instance +

+
type Producer struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewProducer + + +

+
func NewProducer(conf *ConfigMap) (*Producer, error)
+

+ NewProducer creates a new high-level Producer instance. +

+

+ conf is a *ConfigMap with standard librdkafka configuration properties. +

+

+ Supported special configuration properties (type, default): +

+
go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance).
+                                  These batches do not relate to Kafka message batches in any way.
+                                  Note: timestamps and headers are not supported with this interface.
+go.delivery.reports (bool, true) - Forward per-message delivery reports to the
+                                   Events() channel.
+go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports.
+                                    Allowed values: all, none (or empty string), key, value, headers
+                                    Warning: There is a performance penalty to include headers in the delivery report.
+go.events.channel.size (int, 1000000) - Events().
+go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
+go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
+go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
+
+

+ func (*Producer) + + AbortTransaction + + +

+
func (p *Producer) AbortTransaction(ctx context.Context) error
+

+ AbortTransaction aborts the ongoing transaction. +

+

+ This function should also be used to recover from non-fatal abortable +transaction errors. +

+

+ Any outstanding messages will be purged and fail with +`ErrPurgeInflight` or `ErrPurgeQueue`. +

+

+ Parameters: +

+
- `ctx` - The maximum amount of time to block, or nil for indefinite.
+
+

+ Note: This function will block until all outstanding messages are purged +and the transaction abort request has been successfully +handled by the transaction coordinator, or until the `ctx` expires, +which ever comes first. On timeout the application may +call the function again. +

+

+ Note: Will automatically call `Purge()` and `Flush()` to ensure all queued +and in-flight messages are purged before attempting to abort the transaction. +The application MUST serve the `producer.Events()` channel for delivery +reports in a separate go-routine during this time. +

+

+ Returns nil on success or an error object on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error +has been raised by calling `err.(kafka.Error).IsFatal()`. +

+

+ func (*Producer) + + BeginTransaction + + +

+
func (p *Producer) BeginTransaction() error
+

+ BeginTransaction starts a new transaction. +

+

+ `InitTransactions()` must have been called successfully (once) +before this function is called. +

+

+ Any messages produced, offsets sent (`SendOffsetsToTransaction()`), +etc, after the successful return of this function will be part of +the transaction and committed or aborted atomatically. +

+

+ Finish the transaction by calling `CommitTransaction()` or +abort the transaction by calling `AbortTransaction()`. +

+

+ Returns nil on success or an error object on failure. +Check whether a fatal error has been raised by +calling `err.(kafka.Error).IsFatal()`. +

+

+ Note: With the transactional producer, `Produce()`, et.al, are only +allowed during an on-going transaction, as started with this function. +Any produce call outside an on-going transaction, or for a failed +transaction, will fail. +

+

+ func (*Producer) + + Close + + +

+
func (p *Producer) Close()
+

+ Close a Producer instance. +The Producer object or its channels are no longer usable after this call. +

+

+ func (*Producer) + + CommitTransaction + + +

+
func (p *Producer) CommitTransaction(ctx context.Context) error
+

+ CommitTransaction commits the current transaction. +

+

+ Any outstanding messages will be flushed (delivered) before actually +committing the transaction. +

+

+ If any of the outstanding messages fail permanently the current +transaction will enter the abortable error state and this +function will return an abortable error, in this case the application +must call `AbortTransaction()` before attempting a new +transaction with `BeginTransaction()`. +

+

+ Parameters: +

+
- `ctx` - The maximum amount of time to block, or nil for indefinite.
+
+

+ Note: This function will block until all outstanding messages are +delivered and the transaction commit request has been successfully +handled by the transaction coordinator, or until the `ctx` expires, +which ever comes first. On timeout the application may +call the function again. +

+

+ Note: Will automatically call `Flush()` to ensure all queued +messages are delivered before attempting to commit the transaction. +The application MUST serve the `producer.Events()` channel for delivery +reports in a separate go-routine during this time. +

+

+ Returns nil on success or an error object on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable +or fatal error has been raised by calling +`err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` +respectively. +

+

+ func (*Producer) + + Events + + +

+
func (p *Producer) Events() chan Event
+

+ Events returns the Events channel (read) +

+

+ func (*Producer) + + Flush + + +

+
func (p *Producer) Flush(timeoutMs int) int
+

+ Flush and wait for outstanding messages and requests to complete delivery. +Runs until value reaches zero or on timeoutMs. +Returns the number of outstanding events still un-flushed. +BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. +

+

+ func (*Producer) + + GetFatalError + + +

+
func (p *Producer) GetFatalError() error
+

+ GetFatalError returns an Error object if the client instance has raised a fatal error, else nil. +

+

+ func (*Producer) + + GetMetadata + + +

+
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +

+

+ func (*Producer) + + InitTransactions + + +

+
func (p *Producer) InitTransactions(ctx context.Context) error
+

+ InitTransactions Initializes transactions for the producer instance. +

+

+ This function ensures any transactions initiated by previous instances +of the producer with the same `transactional.id` are completed. +If the previous instance failed with a transaction in progress the +previous transaction will be aborted. +This function needs to be called before any other transactional or +produce functions are called when the `transactional.id` is configured. +

+

+ If the last transaction had begun completion (following transaction commit) +but not yet finished, this function will await the previous transaction's +completion. +

+

+ When any previous transactions have been fenced this function +will acquire the internal producer id and epoch, used in all future +transactional messages issued by this producer instance. +

+

+ Upon successful return from this function the application has to perform at +least one of the following operations within `transaction.timeout.ms` to +avoid timing out the transaction on the broker: +

+
- `Produce()` (et.al)
+- `SendOffsetsToTransaction()`
+- `CommitTransaction()`
+- `AbortTransaction()`
+
+

+ Parameters: +

+
- `ctx` - The maximum time to block, or nil for indefinite.
+  On timeout the operation may continue in the background,
+  depending on state, and it is okay to call `InitTransactions()`
+  again.
+  Providing a nil context or a context without a deadline uses
+  the timeout 2*transaction.timeout.ms.
+
+

+ Returns nil on success or an error on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal +error has been raised by calling `err.(kafka.Error).IsFatal()`. +

+

+ func (*Producer) + + IsClosed + + +

+
func (p *Producer) IsClosed() bool
+

+ IsClosed returns boolean representing if client is closed or not +

+

+ func (*Producer) + + Len + + +

+
func (p *Producer) Len() int
+

+ Len returns the number of messages and requests waiting to be transmitted to the broker +as well as delivery reports queued for the application. +BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. +

+

+ func (*Producer) + + Logs + + +

+
func (p *Producer) Logs() chan LogEvent
+

+ Logs returns the Log channel (if enabled), else nil +

+

+ func (*Producer) + + OffsetsForTimes + + +

+
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+

+ OffsetsForTimes looks up offsets by timestamp for the given partitions. +

+

+ The returned offset for each partition is the earliest offset whose +timestamp is greater than or equal to the given timestamp in the +corresponding partition. If the provided timestamp exceeds that of the +last message in the partition, a value of -1 will be returned. +

+

+ The timestamps to query are represented as `.Offset` in the `times` +argument and the looked up offsets are represented as `.Offset` in the returned +`offsets` list. +

+

+ The function will block for at most timeoutMs milliseconds. +

+

+ Duplicate Topic+Partitions are not supported. +Per-partition errors may be returned in the `.Error` field. +

+

+ func (*Producer) + + Produce + + +

+
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error
+

+ Produce single message. +This is an asynchronous call that enqueues the message on the internal +transmit queue, thus returning immediately. +The delivery report will be sent on the provided deliveryChan if specified, +or on the Producer object's Events() channel if not. +msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented), +api.version.request=true, and broker >= 0.10.0.0. +msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented), +api.version.request=true, and broker >= 0.11.0.0. +Returns an error if message could not be enqueued. +

+

+ func (*Producer) + + ProduceChannel + + +

+
func (p *Producer) ProduceChannel() chan *Message
+

+ ProduceChannel returns the produce *Message channel (write) +

+

+ Deprecated: ProduceChannel (channel based producer) is deprecated in favour +of Produce(). +Flush() and Len() are not guaranteed to be reliable with ProduceChannel. +

+

+ func (*Producer) + + Purge + + +

+
func (p *Producer) Purge(flags int) error
+

+ Purge messages currently handled by this producer instance. +

+

+ flags is a combination of PurgeQueue, PurgeInFlight and PurgeNonBlocking. +

+

+ The application will need to call Poll(), Flush() or read the Events() channel +after this call to serve delivery reports for the purged messages. +

+

+ Messages purged from internal queues fail with the delivery report +error code set to ErrPurgeQueue, while purged messages that +are in-flight to or from the broker will fail with the error code set to +ErrPurgeInflight. +

+

+ Warning: Purging messages that are in-flight to or from the broker +will ignore any sub-sequent acknowledgement for these messages +received from the broker, effectively making it impossible +for the application to know if the messages were successfully +produced or not. This may result in duplicate messages if the +application retries these messages at a later time. +

+

+ Note: This call may block for a short time while background thread +queues are purged. +

+

+ Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown. +

+

+ func (*Producer) + + QueryWatermarkOffsets + + +

+
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+

+ QueryWatermarkOffsets returns the broker's low and high offsets for the given topic +and partition. +

+

+ func (*Producer) + + SendOffsetsToTransaction + + +

+
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error
+

+ SendOffsetsToTransaction sends a list of topic partition offsets to the +consumer group coordinator for `consumerMetadata`, and marks the offsets +as part part of the current transaction. +These offsets will be considered committed only if the transaction is +committed successfully. +

+

+ The offsets should be the next message your application will consume, +i.e., the last processed message's offset + 1 for each partition. +Either track the offsets manually during processing or use +`consumer.Position()` (on the consumer) to get the current offsets for +the partitions assigned to the consumer. +

+

+ Use this method at the end of a consume-transform-produce loop prior +to committing the transaction with `CommitTransaction()`. +

+

+ Parameters: +

+
- `ctx` - The maximum amount of time to block, or nil for indefinite.
+- `offsets` - List of offsets to commit to the consumer group upon
+  successful commit of the transaction. Offsets should be
+  the next message to consume, e.g., last processed message + 1.
+- `consumerMetadata` - The current consumer group metadata as returned by
+  `consumer.GetConsumerGroupMetadata()` on the consumer
+  instance the provided offsets were consumed from.
+
+

+ Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer). +

+

+ Note: Logical and invalid offsets (e.g., OffsetInvalid) in +`offsets` will be ignored. If there are no valid offsets in +`offsets` the function will return nil and no action will be taken. +

+

+ Returns nil on success or an error object on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable +or fatal error has been raised by calling +`err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` +respectively. +

+

+ func (*Producer) + + SetOAuthBearerToken + + +

+
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+

+ SetOAuthBearerToken sets the the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. It will return nil +on success, otherwise an error if: +1) the token data is invalid (meaning an expiration time in the past +or either a token value or an extension key or value that does not meet +the regular expression requirements as per + + https://tools.ietf.org/html/rfc7628#section-3.1 + + ); +2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +3) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+

+ func (*Producer) + + SetOAuthBearerTokenFailure + + +

+
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error
+

+ SetOAuthBearerTokenFailure sets the error message describing why token +retrieval/setting failed; it also schedules a new token refresh event for 10 +seconds later so the attempt may be retried. It will return nil on +success, otherwise an error if: +1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +2) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+

+ func (*Producer) + + SetSaslCredentials + + +

+
func (p *Producer) SetSaslCredentials(username, password string) error
+

+ SetSaslCredentials sets the SASL credentials used for this producer. The new credentials +will overwrite the old ones (which were set when creating the producer or by a previous +call to SetSaslCredentials). The new credentials will be used the next time this +producer needs to authenticate to a broker. This method will not disconnect +existing broker connections that were established with the old credentials. +This method applies only to the SASL PLAIN and SCRAM mechanisms. +

+

+ func (*Producer) + + String + + +

+
func (p *Producer) String() string
+

+ String returns a human readable name for a Producer instance +

+

+ func (*Producer) + + TestFatalError + + +

+
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode
+

+ TestFatalError triggers a fatal error in the underlying client. +This is to be used strictly for testing purposes. +

+

+ type + + RebalanceCb + + +

+

+ RebalanceCb provides a per-Subscribe*() rebalance event callback. +The passed Event will be either AssignedPartitions or RevokedPartitions +

+
type RebalanceCb func(*Consumer, Event) error
+

+ type + + ResourcePatternType + + +

+

+ ResourcePatternType enumerates the different types of Kafka resource patterns. +

+
type ResourcePatternType int
+

+ func + + ResourcePatternTypeFromString + + +

+
func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error)
+

+ ResourcePatternTypeFromString translates a resource pattern type name to +a ResourcePatternType value. +

+

+ func (ResourcePatternType) + + String + + +

+
func (t ResourcePatternType) String() string
+

+ String returns the human-readable representation of a ResourcePatternType +

+

+ type + + ResourceType + + +

+

+ ResourceType represents an Apache Kafka resource type +

+
type ResourceType int
+

+ func + + ResourceTypeFromString + + +

+
func ResourceTypeFromString(typeString string) (ResourceType, error)
+

+ ResourceTypeFromString translates a resource type name/string to +a ResourceType value. +

+

+ func (ResourceType) + + String + + +

+
func (t ResourceType) String() string
+

+ String returns the human-readable representation of a ResourceType +

+

+ type + + RevokedPartitions + + +

+

+ RevokedPartitions consumer group rebalance event: revoked partition set +

+
type RevokedPartitions struct {
+    Partitions []TopicPartition
+}
+
+

+ func (RevokedPartitions) + + String + + +

+
func (e RevokedPartitions) String() string
+

+ type + + ScramCredentialInfo + + +

+

+ ScramCredentialInfo contains Mechanism and Iterations for a +SASL/SCRAM credential associated with a user. +

+
type ScramCredentialInfo struct {
+    // Iterations - positive number of iterations used when creating the credential
+    Iterations int
+    // Mechanism - SASL/SCRAM mechanism
+    Mechanism ScramMechanism
+}
+
+

+ type + + ScramMechanism + + +

+

+ ScramMechanism enumerates SASL/SCRAM mechanisms. +Used by `AdminClient.AlterUserScramCredentials` +and `AdminClient.DescribeUserScramCredentials`. +

+
type ScramMechanism int
+

+ func + + ScramMechanismFromString + + +

+
func ScramMechanismFromString(mechanism string) (ScramMechanism, error)
+

+ ScramMechanismFromString translates a Scram Mechanism name to +a ScramMechanism value. +

+

+ func (ScramMechanism) + + String + + +

+
func (o ScramMechanism) String() string
+

+ String returns the human-readable representation of an ScramMechanism +

+

+ type + + Stats + + +

+

+ Stats statistics event +

+
type Stats struct {
+    // contains filtered or unexported fields
+}
+
+

+ func (Stats) + + String + + +

+
func (e Stats) String() string
+

+ type + + TimestampType + + +

+

+ TimestampType is a the Message timestamp type or source +

+
type TimestampType int
+

+ func (TimestampType) + + String + + +

+
func (t TimestampType) String() string
+

+ type + + TopicCollection + + +

+

+ TopicCollection represents a collection of topics. +

+
type TopicCollection struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewTopicCollectionOfTopicNames + + +

+
func NewTopicCollectionOfTopicNames(names []string) TopicCollection
+

+ NewTopicCollectionOfTopicNames creates a new TopicCollection based on a list +of topic names. +

+

+ type + + TopicDescription + + +

+

+ TopicDescription represents the result of DescribeTopics for +a single topic. +

+
type TopicDescription struct {
+    // Topic name.
+    Name string
+    // Topic Id
+    TopicID UUID
+    // Error, if any, of the result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+    // Is the topic internal to Kafka?
+    IsInternal bool
+    // Partitions' information list.
+    Partitions []TopicPartitionInfo
+    // Operations allowed for the topic (nil if not available or not requested).
+    AuthorizedOperations []ACLOperation
+}
+
+

+ type + + TopicMetadata + + +

+

+ TopicMetadata contains per-topic metadata +

+
type TopicMetadata struct {
+    Topic      string
+    Partitions []PartitionMetadata
+    Error      Error
+}
+
+

+ type + + TopicPartition + + +

+

+ TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset. +

+
type TopicPartition struct {
+    Topic       *string
+    Partition   int32
+    Offset      Offset
+    Metadata    *string
+    Error       error
+    LeaderEpoch *int32 // LeaderEpoch or nil if not available
+}
+
+

+ func (TopicPartition) + + String + + +

+
func (p TopicPartition) String() string
+

+ type + + TopicPartitionInfo + + +

+

+ TopicPartitionInfo represents a specific partition's information inside a +TopicDescription. +

+
type TopicPartitionInfo struct {
+    // Partition id.
+    Partition int
+    // Leader broker.
+    Leader *Node
+    // Replicas of the partition.
+    Replicas []Node
+    // In-Sync-Replicas of the partition.
+    Isr []Node
+}
+
+

+ type + + TopicPartitions + + +

+

+ TopicPartitions is a slice of TopicPartitions that also implements +the sort interface +

+
type TopicPartitions []TopicPartition
+

+ func (TopicPartitions) + + Len + + +

+
func (tps TopicPartitions) Len() int
+

+ func (TopicPartitions) + + Less + + +

+
func (tps TopicPartitions) Less(i, j int) bool
+

+ func (TopicPartitions) + + Swap + + +

+
func (tps TopicPartitions) Swap(i, j int)
+

+ type + + TopicResult + + +

+

+ TopicResult provides per-topic operation result (error) information. +

+
type TopicResult struct {
+    // Topic name
+    Topic string
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ func (TopicResult) + + String + + +

+
func (t TopicResult) String() string
+

+ String returns a human-readable representation of a TopicResult. +

+

+ type + + TopicSpecification + + +

+

+ TopicSpecification holds parameters for creating a new topic. +TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +

+
type TopicSpecification struct {
+    // Topic name to create.
+    Topic string
+    // Number of partitions in topic.
+    NumPartitions int
+    // Default replication factor for the topic's partitions, or zero
+    // if an explicit ReplicaAssignment is set.
+    ReplicationFactor int
+    // (Optional) Explicit replica assignment. The outer array is
+    // indexed by the partition number, while the inner per-partition array
+    // contains the replica broker ids. The first broker in each
+    // broker id list will be the preferred replica.
+    ReplicaAssignment [][]int32
+    // Topic configuration.
+    Config map[string]string
+}
+
+

+ type + + UUID + + +

+

+ UUID Kafka UUID representation +

+
type UUID struct {
+    // contains filtered or unexported fields
+}
+
+

+ func (UUID) + + GetLeastSignificantBits + + +

+
func (uuid UUID) GetLeastSignificantBits() int64
+

+ GetLeastSignificantBits returns Least Significant 64 bits of the 128 bits UUID +

+

+ func (UUID) + + GetMostSignificantBits + + +

+
func (uuid UUID) GetMostSignificantBits() int64
+

+ GetMostSignificantBits returns Most Significant 64 bits of the 128 bits UUID +

+

+ func (UUID) + + String + + +

+
func (uuid UUID) String() string
+

+ Base64 string representation of the UUID +

+

+ type + + UserScramCredentialDeletion + + +

+

+ UserScramCredentialDeletion is a request to delete +a SASL/SCRAM credential for a user. +

+
type UserScramCredentialDeletion struct {
+    // User - user name
+    User string
+    // Mechanism - SASL/SCRAM mechanism.
+    Mechanism ScramMechanism
+}
+
+

+ type + + UserScramCredentialUpsertion + + +

+

+ UserScramCredentialUpsertion is a request to update/insert +a SASL/SCRAM credential for a user. +

+
type UserScramCredentialUpsertion struct {
+    // User - user name
+    User string
+    // ScramCredentialInfo - the mechanism and iterations.
+    ScramCredentialInfo ScramCredentialInfo
+    // Password - password to HMAC before storage.
+    Password []byte
+    // Salt - salt to use. Will be generated randomly if nil. (optional)
+    Salt []byte
+}
+
+

+ type + + UserScramCredentialsDescription + + +

+

+ UserScramCredentialsDescription represent all SASL/SCRAM credentials +associated with a user that can be retrieved, or an error indicating +why credentials could not be retrieved. +

+
type UserScramCredentialsDescription struct {
+    // User - the user name.
+    User string
+    // ScramCredentialInfos - SASL/SCRAM credential representations for the user.
+    ScramCredentialInfos []ScramCredentialInfo
+    // Error - error corresponding to this user description.
+    Error Error
+}
+
+ +
+ +
+ + + + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go new file mode 100644 index 000000000..3d871c75f --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go @@ -0,0 +1,13 @@ +// +build !dynamic + + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_amd64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static darwin_amd64 from librdkafka-static-bundle-v2.3.0.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go new file mode 100644 index 000000000..3f60b0480 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go @@ -0,0 +1,13 @@ +// +build !dynamic + + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_arm64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static darwin_arm64 from librdkafka-static-bundle-v2.3.0.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go new file mode 100644 index 000000000..92458af8a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go @@ -0,0 +1,10 @@ +//go:build dynamic +// +build dynamic + +package kafka + +// #cgo pkg-config: rdkafka +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "dynamically linked to librdkafka" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go new file mode 100644 index 000000000..122bc7214 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go @@ -0,0 +1,13 @@ +// +build !dynamic +// +build !musl + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_amd64.a -lm -ldl -lpthread -lrt +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static glibc_linux_amd64 from librdkafka-static-bundle-v2.3.0.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go new file mode 100644 index 000000000..8949aabe2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go @@ -0,0 +1,13 @@ +// +build !dynamic +// +build !musl + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_arm64.a -lm -ldl -lpthread -lrt +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static glibc_linux_arm64 from librdkafka-static-bundle-v2.3.0.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go new file mode 100644 index 000000000..19fca092e --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go @@ -0,0 +1,13 @@ +// +build !dynamic +// +build musl + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_amd64.a -lm -ldl -lpthread -lrt -lpthread -lrt +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static musl_linux_amd64 from librdkafka-static-bundle-v2.3.0.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go new file mode 100644 index 000000000..39f57b425 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go @@ -0,0 +1,13 @@ +// +build !dynamic +// +build musl + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_arm64.a -lm -ldl -lpthread -lrt -lpthread -lrt +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static musl_linux_arm64 from librdkafka-static-bundle-v2.3.0.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go new file mode 100644 index 000000000..aca6a9aff --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go @@ -0,0 +1,13 @@ +// +build !dynamic + + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_windows.a -lws2_32 -lsecur32 -lcrypt32 +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v2.3.0.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go new file mode 100644 index 000000000..d9174b49f --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go @@ -0,0 +1,299 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "go/types" + "reflect" + "strings" + "unsafe" +) + +/* +#include +#include "select_rdkafka.h" +*/ +import "C" + +// ConfigValue supports the following types: +// +// bool, int, string, any type with the standard String() interface +type ConfigValue interface{} + +// ConfigMap is a map containing standard librdkafka configuration properties as documented in: +// https://github.com/confluentinc/librdkafka/tree/master/CONFIGURATION.md +// +// The special property "default.topic.config" (optional) is a ConfigMap +// containing default topic configuration properties. +// +// The use of "default.topic.config" is deprecated, +// topic configuration properties shall be specified in the standard ConfigMap. +// For backwards compatibility, "default.topic.config" (if supplied) +// takes precedence. +type ConfigMap map[string]ConfigValue + +// SetKey sets configuration property key to value. +// +// For user convenience a key prefixed with {topic}. will be +// set on the "default.topic.config" sub-map, this use is deprecated. +func (m ConfigMap) SetKey(key string, value ConfigValue) error { + if strings.HasPrefix(key, "{topic}.") { + _, found := m["default.topic.config"] + if !found { + m["default.topic.config"] = ConfigMap{} + } + m["default.topic.config"].(ConfigMap)[strings.TrimPrefix(key, "{topic}.")] = value + } else { + m[key] = value + } + + return nil +} + +// Set implements flag.Set (command line argument parser) as a convenience +// for `-X key=value` config. +func (m ConfigMap) Set(kv string) error { + i := strings.Index(kv, "=") + if i == -1 { + return newErrorFromString(ErrInvalidArg, "Expected key=value") + } + + k := kv[:i] + v := kv[i+1:] + + return m.SetKey(k, v) +} + +func value2string(v ConfigValue) (ret string, errstr string) { + + errstr = "" + switch x := v.(type) { + case bool: + if x { + ret = "true" + } else { + ret = "false" + } + case int: + ret = fmt.Sprintf("%d", x) + case string: + ret = x + case types.Slice: + ret = "" + arr := v.([]ConfigValue) + for _, i := range arr { + temp, err := value2string(i) + if err != "" { + ret = "" + errstr = fmt.Sprintf("Invalid value type %T", v) + break + } + ret += temp + "," + } + if len(ret) != 0 { + ret = ret[:len(ret)-1] + } + case fmt.Stringer: + ret = x.String() + default: + ret = "" + errstr = fmt.Sprintf("Invalid value type %T", v) + } + + return ret, errstr +} + +// rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t +// into a common interface. +type rdkAnyconf interface { + set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t +} + +func anyconfSet(anyconf rdkAnyconf, key string, val ConfigValue) (err error) { + value, errstr := value2string(val) + if errstr != "" { + return newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s for key %s (expected string,bool,int,ConfigMap)", errstr, key)) + } + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + cVal := C.CString(value) + defer C.free(unsafe.Pointer(cVal)) + cErrstr := (*C.char)(C.malloc(C.size_t(128))) + defer C.free(unsafe.Pointer(cErrstr)) + + if anyconf.set(cKey, cVal, cErrstr, 128) != C.RD_KAFKA_CONF_OK { + return newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + return nil +} + +// we need these typedefs to workaround a crash in golint +// when parsing the set() methods below +type rdkConf C.rd_kafka_conf_t +type rdkTopicConf C.rd_kafka_topic_conf_t + +func (cConf *rdkConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { + return C.rd_kafka_conf_set((*C.rd_kafka_conf_t)(cConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) +} + +func (ctopicConf *rdkTopicConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { + return C.rd_kafka_topic_conf_set((*C.rd_kafka_topic_conf_t)(ctopicConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) +} + +func configConvertAnyconf(m ConfigMap, anyconf rdkAnyconf) (err error) { + // set plugins first, any plugin-specific configuration depends on + // the plugin to have already been set + pluginPaths, ok := m["plugin.library.paths"] + if ok { + err = anyconfSet(anyconf, "plugin.library.paths", pluginPaths) + if err != nil { + return err + } + } + for k, v := range m { + if k == "plugin.library.paths" { + continue + } + switch v.(type) { + case ConfigMap: + /* Special sub-ConfigMap, only used for default.topic.config */ + + if k != "default.topic.config" { + return newErrorFromString(ErrInvalidArg, fmt.Sprintf("Invalid type for key %s", k)) + } + + var cTopicConf = C.rd_kafka_topic_conf_new() + + err = configConvertAnyconf(v.(ConfigMap), + (*rdkTopicConf)(cTopicConf)) + if err != nil { + C.rd_kafka_topic_conf_destroy(cTopicConf) + return err + } + + C.rd_kafka_conf_set_default_topic_conf( + (*C.rd_kafka_conf_t)(anyconf.(*rdkConf)), + (*C.rd_kafka_topic_conf_t)((*rdkTopicConf)(cTopicConf))) + + default: + err = anyconfSet(anyconf, k, v) + if err != nil { + return err + } + } + } + + return nil +} + +// convert ConfigMap to C rd_kafka_conf_t * +func (m ConfigMap) convert() (cConf *C.rd_kafka_conf_t, err error) { + cConf = C.rd_kafka_conf_new() + + // Set the client.software.name and .version (use librdkafka version). + _, librdkafkaVersion := LibraryVersion() + anyconfSet((*rdkConf)(cConf), "client.software.name", "confluent-kafka-go") + anyconfSet((*rdkConf)(cConf), "client.software.version", librdkafkaVersion) + + err = configConvertAnyconf(m, (*rdkConf)(cConf)) + if err != nil { + C.rd_kafka_conf_destroy(cConf) + return nil, err + } + return cConf, nil +} + +// get finds key in the configmap and returns its value. +// If the key is not found defval is returned. +// If the key is found but the type is mismatched an error is returned. +func (m ConfigMap) get(key string, defval ConfigValue) (ConfigValue, error) { + if strings.HasPrefix(key, "{topic}.") { + defconfCv, found := m["default.topic.config"] + if !found { + return defval, nil + } + return defconfCv.(ConfigMap).get(strings.TrimPrefix(key, "{topic}."), defval) + } + + v, ok := m[key] + if !ok { + return defval, nil + } + + if defval != nil && reflect.TypeOf(defval) != reflect.TypeOf(v) { + return nil, newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s expects type %T, not %T", key, defval, v)) + } + + return v, nil +} + +// extract performs a get() and if found deletes the key. +func (m ConfigMap) extract(key string, defval ConfigValue) (ConfigValue, error) { + + v, err := m.get(key, defval) + if err != nil { + return nil, err + } + + delete(m, key) + + return v, nil +} + +// extractLogConfig extracts generic go.logs.* configuration properties. +func (m ConfigMap) extractLogConfig() (logsChanEnable bool, logsChan chan LogEvent, err error) { + v, err := m.extract("go.logs.channel.enable", false) + if err != nil { + return + } + + logsChanEnable = v.(bool) + + v, err = m.extract("go.logs.channel", nil) + if err != nil { + return + } + + if v != nil { + logsChan = v.(chan LogEvent) + } + + if logsChanEnable { + // Tell librdkafka to forward logs to the log queue + m.Set("log.queue=true") + } + + return +} + +func (m ConfigMap) clone() ConfigMap { + m2 := make(ConfigMap) + for k, v := range m { + m2[k] = v + } + return m2 +} + +// Get finds the given key in the ConfigMap and returns its value. +// If the key is not found `defval` is returned. +// If the key is found but the type does not match that of `defval` (unless nil) +// an ErrInvalidArg error is returned. +func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) { + return m.get(key, defval) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go new file mode 100644 index 000000000..efc54d00e --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go @@ -0,0 +1,1103 @@ +/** + * Copyright 2016-2020 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "math" + "sync/atomic" + "time" + "unsafe" +) + +/* +#include +#include "select_rdkafka.h" + + +static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { + return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; +} +*/ +import "C" + +// RebalanceCb provides a per-Subscribe*() rebalance event callback. +// The passed Event will be either AssignedPartitions or RevokedPartitions +type RebalanceCb func(*Consumer, Event) error + +// Consumer implements a High-level Apache Kafka Consumer instance +type Consumer struct { + events chan Event + handle handle + eventsChanEnable bool + readerTermChan chan bool + rebalanceCb RebalanceCb + appReassigned bool + appRebalanceEnable bool // SerializerConfig setting + + isClosed uint32 + isClosing uint32 +} + +// IsClosed returns boolean representing if client is closed or not +func (c *Consumer) IsClosed() bool { + return atomic.LoadUint32(&c.isClosed) == 1 +} + +func (c *Consumer) verifyClient() error { + if c.IsClosed() { + return getOperationNotAllowedErrorForClosedClient() + } + return nil +} + +// Strings returns a human readable name for a Consumer instance +func (c *Consumer) String() string { + return c.handle.String() +} + +// getHandle implements the Handle interface +func (c *Consumer) gethandle() *handle { + return &c.handle +} + +// Subscribe to a single topic +// This replaces the current subscription +func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error { + err := c.verifyClient() + if err != nil { + return err + } + return c.SubscribeTopics([]string{topic}, rebalanceCb) +} + +// SubscribeTopics subscribes to the provided list of topics. +// This replaces the current subscription. +func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) { + err = c.verifyClient() + if err != nil { + return err + } + ctopics := C.rd_kafka_topic_partition_list_new(C.int(len(topics))) + defer C.rd_kafka_topic_partition_list_destroy(ctopics) + + for _, topic := range topics { + ctopic := C.CString(topic) + defer C.free(unsafe.Pointer(ctopic)) + C.rd_kafka_topic_partition_list_add(ctopics, ctopic, C.RD_KAFKA_PARTITION_UA) + } + + e := C.rd_kafka_subscribe(c.handle.rk, ctopics) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(e) + } + + c.rebalanceCb = rebalanceCb + + return nil +} + +// Unsubscribe from the current subscription, if any. +func (c *Consumer) Unsubscribe() (err error) { + err = c.verifyClient() + if err != nil { + return err + } + C.rd_kafka_unsubscribe(c.handle.rk) + return nil +} + +// Assign an atomic set of partitions to consume. +// +// The .Offset field of each TopicPartition must either be set to an absolute +// starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +// but should typically be set to `kafka.OffsetStored` to have the consumer +// use the committed offset as a start position, with a fallback to +// `auto.offset.reset` if there is no committed offset. +// +// This replaces the current assignment. +func (c *Consumer) Assign(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } + c.appReassigned = true + + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + + e := C.rd_kafka_assign(c.handle.rk, cparts) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(e) + } + + return nil +} + +// Unassign the current set of partitions to consume. +func (c *Consumer) Unassign() (err error) { + err = c.verifyClient() + if err != nil { + return err + } + c.appReassigned = true + + e := C.rd_kafka_assign(c.handle.rk, nil) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(e) + } + + return nil +} + +// IncrementalAssign adds the specified partitions to the current set of +// partitions to consume. +// +// The .Offset field of each TopicPartition must either be set to an absolute +// starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +// but should typically be set to `kafka.OffsetStored` to have the consumer +// use the committed offset as a start position, with a fallback to +// `auto.offset.reset` if there is no committed offset. +// +// The new partitions must not be part of the current assignment. +func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } + c.appReassigned = true + + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + + cError := C.rd_kafka_incremental_assign(c.handle.rk, cparts) + if cError != nil { + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// IncrementalUnassign removes the specified partitions from the current set of +// partitions to consume. +// +// The .Offset field of the TopicPartition is ignored. +// +// The removed partitions must be part of the current assignment. +func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } + c.appReassigned = true + + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + + cError := C.rd_kafka_incremental_unassign(c.handle.rk, cparts) + if cError != nil { + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// GetRebalanceProtocol returns the current consumer group rebalance protocol, +// which is either "EAGER" or "COOPERATIVE". +// If the rebalance protocol is not known in the current state an empty string +// is returned. +// Should typically only be called during rebalancing. +func (c *Consumer) GetRebalanceProtocol() string { + cStr := C.rd_kafka_rebalance_protocol(c.handle.rk) + if cStr == nil { + return "" + } + + return C.GoString(cStr) +} + +// AssignmentLost returns true if current partition assignment has been lost. +// This method is only applicable for use with a subscribing consumer when +// handling a rebalance event or callback. +// Partitions that have been lost may already be owned by other members in the +// group and therefore commiting offsets, for example, may fail. +func (c *Consumer) AssignmentLost() bool { + return cint2bool(C.rd_kafka_assignment_lost(c.handle.rk)) +} + +// commit offsets for specified offsets. +// If offsets is nil the currently assigned partitions' offsets are committed. +// This is a blocking call, caller will need to wrap in go-routine to +// get async or throw-away behaviour. +func (c *Consumer) commit(offsets []TopicPartition) (committedOffsets []TopicPartition, err error) { + var rkqu *C.rd_kafka_queue_t + + rkqu = C.rd_kafka_queue_new(c.handle.rk) + defer C.rd_kafka_queue_destroy(rkqu) + + var coffsets *C.rd_kafka_topic_partition_list_t + if offsets != nil { + coffsets = newCPartsFromTopicPartitions(offsets) + defer C.rd_kafka_topic_partition_list_destroy(coffsets) + } + + cErr := C.rd_kafka_commit_queue(c.handle.rk, coffsets, rkqu, nil, nil) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + + rkev := C.rd_kafka_queue_poll(rkqu, C.int(-1)) + if rkev == nil { + // shouldn't happen + return nil, newError(C.RD_KAFKA_RESP_ERR__DESTROY) + } + defer C.rd_kafka_event_destroy(rkev) + + if C.rd_kafka_event_type(rkev) != C.RD_KAFKA_EVENT_OFFSET_COMMIT { + panic(fmt.Sprintf("Expected OFFSET_COMMIT, got %s", + C.GoString(C.rd_kafka_event_name(rkev)))) + } + + cErr = C.rd_kafka_event_error(rkev) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) + } + + cRetoffsets := C.rd_kafka_event_topic_partition_list(rkev) + if cRetoffsets == nil { + // no offsets, no error + return nil, nil + } + committedOffsets = newTopicPartitionsFromCparts(cRetoffsets) + + return committedOffsets, nil +} + +// Commit offsets for currently assigned partitions +// This is a blocking call. +// Returns the committed offsets on success. +func (c *Consumer) Commit() ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + return c.commit(nil) +} + +// CommitMessage commits offset based on the provided message. +// This is a blocking call. +// Returns the committed offsets on success. +func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + if m.TopicPartition.Error != nil { + return nil, newErrorFromString(ErrInvalidArg, "Can't commit errored message") + } + offsets := []TopicPartition{m.TopicPartition} + offsets[0].Offset++ + return c.commit(offsets) +} + +// CommitOffsets commits the provided list of offsets +// This is a blocking call. +// Returns the committed offsets on success. +func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + return c.commit(offsets) +} + +// StoreOffsets stores the provided list of offsets that will be committed +// to the offset store according to `auto.commit.interval.ms` or manual +// offset-less Commit(). +// +// Returns the stored offsets on success. If at least one offset couldn't be stored, +// an error and a list of offsets is returned. Each offset can be checked for +// specific errors via its `.Error` member. +func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } + coffsets := newCPartsFromTopicPartitions(offsets) + defer C.rd_kafka_topic_partition_list_destroy(coffsets) + + cErr := C.rd_kafka_offsets_store(c.handle.rk, coffsets) + + // coffsets might be annotated with an error + storedOffsets = newTopicPartitionsFromCparts(coffsets) + + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return storedOffsets, newError(cErr) + } + + return storedOffsets, nil +} + +// StoreMessage stores offset based on the provided message. +// This is a convenience method that uses StoreOffsets to do the actual work. +func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } + if m.TopicPartition.Error != nil { + return nil, newErrorFromString(ErrInvalidArg, "Can't store errored message") + } + if m.TopicPartition.Offset < 0 { + return nil, newErrorFromString(ErrInvalidArg, "Can't store message with offset less than 0") + } + + if m.TopicPartition.LeaderEpoch != nil && *m.TopicPartition.LeaderEpoch < 0 { + return nil, newErrorFromString(ErrInvalidArg, "Can't store message with leader epoch less than 0") + } + + offsets := []TopicPartition{m.TopicPartition} + offsets[0].Offset++ + return c.StoreOffsets(offsets) +} + +// Seek seeks the given topic partitions using the offset from the TopicPartition. +// +// The ignoredTimeoutMs parameter is ignored. Instead, this method blocks until +// the fetcher state is updated for the given partition with the new offset. +// This guarantees that no previously fetched messages for the old offset (or +// fetch position) will be passed to the application once this call returns. +// It will still take some time after the method returns until messages are +// fetched at the new offset. +// +// Seek() may only be used for partitions already being consumed +// (through Assign() or implicitly through a self-rebalanced Subscribe()). +// To set the starting offset it is preferred to use Assign() and provide +// a starting offset for each partition. +// +// Returns an error on failure or nil otherwise. +// Deprecated: Seek is deprecated in favour of SeekPartitions(). +func (c *Consumer) Seek(partition TopicPartition, ignoredTimeoutMs int) error { + err := c.verifyClient() + if err != nil { + return err + } + rkt := c.handle.getRkt(*partition.Topic) + cErr := C.rd_kafka_seek(rkt, + C.int32_t(partition.Partition), + C.int64_t(partition.Offset), + C.int(-1)) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cErr) + } + return nil +} + +// SeekPartitions seeks the given topic partitions to the per-partition offset +// stored in the .Offset field of each partition. +// +// The offset may be either absolute (>= 0) or a logical offset (e.g. OffsetEnd). +// +// SeekPartitions() may only be used for partitions already being consumed +// (through Assign() or implicitly through a self-rebalanced Subscribe()). +// To set the starting offset it is preferred to use Assign() in a +// kafka.AssignedPartitions handler and provide a starting offset for each +// partition. +// +// Returns an error on failure or nil otherwise. Individual partition errors +// should be checked in the per-partition .Error field. +func (c *Consumer) SeekPartitions(partitions []TopicPartition) ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + + cPartitions := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cPartitions) + + cErr := C.rd_kafka_seek_partitions( + c.handle.rk, cPartitions, -1 /* infinite timeout */) + if cErr != nil { + return nil, newErrorFromCErrorDestroy(cErr) + } + + return newTopicPartitionsFromCparts(cPartitions), nil +} + +// Poll the consumer for messages or events. +// +// # Will block for at most timeoutMs milliseconds +// +// The following callbacks may be triggered: +// +// Subscribe()'s rebalanceCb +// +// Returns nil on timeout, else an Event +func (c *Consumer) Poll(timeoutMs int) (event Event) { + ev, _ := c.handle.eventPoll(nil, timeoutMs, 1, nil) + return ev +} + +// Events returns the Events channel (if enabled) +// +// Deprecated: Events (channel based consumer) is deprecated in favour +// of Poll(). +func (c *Consumer) Events() chan Event { + return c.events +} + +// Logs returns the log channel if enabled, or nil otherwise. +func (c *Consumer) Logs() chan LogEvent { + return c.handle.logs +} + +// ReadMessage polls the consumer for a message. +// +// This is a convenience API that wraps Poll() and only returns +// messages or errors. All other event types are discarded. +// +// The call will block for at most `timeout` waiting for +// a new message or error. `timeout` may be set to -1 for +// indefinite wait. +// +// Timeout is returned as (nil, err) where `err.(kafka.Error).IsTimeout() == true`. +// +// Messages are returned as (msg, nil), +// while general errors are returned as (nil, err), +// and partition-specific errors are returned as (msg, err) where +// msg.TopicPartition provides partition-specific information (such as topic, partition and offset). +// +// All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded. +func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + + var absTimeout time.Time + var timeoutMs int + + if timeout > 0 { + absTimeout = time.Now().Add(timeout) + timeoutMs = (int)(timeout.Seconds() * 1000.0) + } else { + timeoutMs = (int)(timeout) + } + + for { + ev := c.Poll(timeoutMs) + + switch e := ev.(type) { + case *Message: + if e.TopicPartition.Error != nil { + return e, e.TopicPartition.Error + } + return e, nil + case Error: + return nil, e + default: + // Ignore other event types + } + + if timeout > 0 { + // Calculate remaining time + timeoutMs = int(math.Max(0.0, absTimeout.Sub(time.Now()).Seconds()*1000.0)) + } + + if timeoutMs == 0 && ev == nil { + return nil, newError(C.RD_KAFKA_RESP_ERR__TIMED_OUT) + } + + } + +} + +// Close Consumer instance. +// The object is no longer usable after this call. +func (c *Consumer) Close() (err error) { + // Check if the client is already closed. + err = c.verifyClient() + if err != nil { + return err + } + + // Client is in the process of closing. + if !atomic.CompareAndSwapUint32(&c.isClosing, 0, 1) { + return newErrorFromString(ErrState, "Consumer is already closing") + } + + // Wait for consumerReader() or pollLogEvents to terminate (by closing readerTermChan) + close(c.readerTermChan) + c.handle.waitGroup.Wait() + if c.eventsChanEnable { + close(c.events) + } + + C.rd_kafka_consumer_close_queue(c.handle.rk, c.handle.rkq) + + for C.rd_kafka_consumer_closed(c.handle.rk) != 1 { + c.Poll(100) + } + + // After this point, no more consumer methods may be called. + atomic.StoreUint32(&c.isClosed, 1) + + // Destroy our queue + C.rd_kafka_queue_destroy(c.handle.rkq) + c.handle.rkq = nil + + c.handle.cleanup() + + C.rd_kafka_destroy(c.handle.rk) + + return nil +} + +// NewConsumer creates a new high-level Consumer instance. +// +// conf is a *ConfigMap with standard librdkafka configuration properties. +// +// Supported special configuration properties: +// +// go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel. +// If set to true the app must handle the AssignedPartitions and +// RevokedPartitions events and call Assign() and Unassign() +// respectively. +// go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled. +// go.events.channel.size (int, 1000) - Events() channel size +// go.logs.channel.enable (bool, false) - Forward log to Logs() channel. +// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true. +// +// WARNING: Due to the buffering nature of channels (and queues in general) the +// use of the events channel risks receiving outdated events and +// messages. Minimizing go.events.channel.size reduces the risk +// and number of outdated events and messages but does not eliminate +// the factor completely. With a channel size of 1 at most one +// event or message may be outdated. +func NewConsumer(conf *ConfigMap) (*Consumer, error) { + + err := versionCheck() + if err != nil { + return nil, err + } + + // before we do anything with the configuration, create a copy such that + // the original is not mutated. + confCopy := conf.clone() + + groupid, _ := confCopy.get("group.id", nil) + if groupid == nil { + // without a group.id the underlying cgrp subsystem in librdkafka wont get started + // and without it there is no way to consume assigned partitions. + // So for now require the group.id, this might change in the future. + return nil, newErrorFromString(ErrInvalidArg, "Required property group.id not set") + } + + c := &Consumer{} + c.isClosed = 0 + + v, err := confCopy.extract("go.application.rebalance.enable", false) + if err != nil { + return nil, err + } + c.appRebalanceEnable = v.(bool) + + v, err = confCopy.extract("go.events.channel.enable", false) + if err != nil { + return nil, err + } + c.eventsChanEnable = v.(bool) + + v, err = confCopy.extract("go.events.channel.size", 1000) + if err != nil { + return nil, err + } + eventsChanSize := v.(int) + + logsChanEnable, logsChan, err := confCopy.extractLogConfig() + if err != nil { + return nil, err + } + + cConf, err := confCopy.convert() + if err != nil { + return nil, err + } + cErrstr := (*C.char)(C.malloc(C.size_t(256))) + defer C.free(unsafe.Pointer(cErrstr)) + + C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_REBALANCE|C.RD_KAFKA_EVENT_OFFSET_COMMIT|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) + + c.handle.rk = C.rd_kafka_new(C.RD_KAFKA_CONSUMER, cConf, cErrstr, 256) + if c.handle.rk == nil { + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + C.rd_kafka_poll_set_consumer(c.handle.rk) + + c.handle.c = c + c.handle.setup() + c.readerTermChan = make(chan bool) + c.handle.rkq = C.rd_kafka_queue_get_consumer(c.handle.rk) + if c.handle.rkq == nil { + // no cgrp (no group.id configured), revert to main queue. + c.handle.rkq = C.rd_kafka_queue_get_main(c.handle.rk) + } + + if logsChanEnable { + c.handle.setupLogQueue(logsChan, c.readerTermChan) + } + + if c.eventsChanEnable { + c.events = make(chan Event, eventsChanSize) + /* Start rdkafka consumer queue reader -> events writer goroutine */ + c.handle.waitGroup.Add(1) + go func() { + consumerReader(c, c.readerTermChan) + c.handle.waitGroup.Done() + }() + } + + return c, nil +} + +// consumerReader reads messages and events from the librdkafka consumer queue +// and posts them on the consumer channel. +// Runs until termChan closes +func consumerReader(c *Consumer, termChan chan bool) { + for { + select { + case _ = <-termChan: + return + default: + _, term := c.handle.eventPoll(c.events, 100, 1000, termChan) + if term { + return + } + + } + } +} + +// GetMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + return getMetadata(c, topic, allTopics, timeoutMs) +} + +// QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition. +func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { + err = c.verifyClient() + if err != nil { + return -1, -1, err + } + return queryWatermarkOffsets(c, topic, partition, timeoutMs) +} + +// GetWatermarkOffsets returns the cached low and high offsets for the given topic +// and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets. +// The low offset is populated every statistics.interval.ms if that value is set. +// OffsetInvalid will be returned if there is no cached offset for either value. +func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error) { + err = c.verifyClient() + if err != nil { + return -1, -1, err + } + return getWatermarkOffsets(c, topic, partition) +} + +// OffsetsForTimes looks up offsets by timestamp for the given partitions. +// +// The returned offset for each partition is the earliest offset whose +// timestamp is greater than or equal to the given timestamp in the +// corresponding partition. If the provided timestamp exceeds that of the +// last message in the partition, a value of -1 will be returned. +// +// The timestamps to query are represented as `.Offset` in the `times` +// argument and the looked up offsets are represented as `.Offset` in the returned +// `offsets` list. +// +// The function will block for at most timeoutMs milliseconds. +// +// Duplicate Topic+Partitions are not supported. +// Per-partition errors may be returned in the `.Error` field. +func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } + return offsetsForTimes(c, times, timeoutMs) +} + +// Subscription returns the current subscription as set by Subscribe() +func (c *Consumer) Subscription() (topics []string, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } + var cTopics *C.rd_kafka_topic_partition_list_t + + cErr := C.rd_kafka_subscription(c.handle.rk, &cTopics) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + defer C.rd_kafka_topic_partition_list_destroy(cTopics) + + topicCnt := int(cTopics.cnt) + topics = make([]string, topicCnt) + for i := 0; i < topicCnt; i++ { + crktpar := C._c_rdkafka_topic_partition_list_entry(cTopics, + C.int(i)) + topics[i] = C.GoString(crktpar.topic) + } + + return topics, nil +} + +// Assignment returns the current partition assignments +func (c *Consumer) Assignment() (partitions []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } + var cParts *C.rd_kafka_topic_partition_list_t + + cErr := C.rd_kafka_assignment(c.handle.rk, &cParts) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + defer C.rd_kafka_topic_partition_list_destroy(cParts) + + partitions = newTopicPartitionsFromCparts(cParts) + + return partitions, nil +} + +// Committed retrieves committed offsets for the given set of partitions +func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_committed(c.handle.rk, cparts, C.int(timeoutMs)) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cerr) + } + + return newTopicPartitionsFromCparts(cparts), nil +} + +// Position returns the current consume position for the given partitions. +// Typical use is to call Assignment() to get the partition list +// and then pass it to Position() to get the current consume position for +// each of the assigned partitions. +// The consume position is the next message to read from the partition. +// i.e., the offset of the last message seen by the application + 1. +func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_position(c.handle.rk, cparts) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cerr) + } + + return newTopicPartitionsFromCparts(cparts), nil +} + +// Pause consumption for the provided list of partitions +// +// Note that messages already enqueued on the consumer's Event channel +// (if `go.events.channel.enable` has been set) will NOT be purged by +// this call, set `go.events.channel.size` accordingly. +func (c *Consumer) Pause(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_pause_partitions(c.handle.rk, cparts) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cerr) + } + return nil +} + +// Resume consumption for the provided list of partitions +func (c *Consumer) Resume(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_resume_partitions(c.handle.rk, cparts) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cerr) + } + return nil +} + +// SetOAuthBearerToken sets the the data to be transmitted +// to a broker during SASL/OAUTHBEARER authentication. It will return nil +// on success, otherwise an error if: +// 1) the token data is invalid (meaning an expiration time in the past +// or either a token value or an extension key or value that does not meet +// the regular expression requirements as per +// https://tools.ietf.org/html/rfc7628#section-3.1); +// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 3) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { + err := c.verifyClient() + if err != nil { + return err + } + return c.handle.setOAuthBearerToken(oauthBearerToken) +} + +// SetOAuthBearerTokenFailure sets the error message describing why token +// retrieval/setting failed; it also schedules a new token refresh event for 10 +// seconds later so the attempt may be retried. It will return nil on +// success, otherwise an error if: +// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 2) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error { + err := c.verifyClient() + if err != nil { + return err + } + return c.handle.setOAuthBearerTokenFailure(errstr) +} + +// ConsumerGroupMetadata reflects the current consumer group member metadata. +type ConsumerGroupMetadata struct { + serialized []byte +} + +// serializeConsumerGroupMetadata converts a C metadata object to its +// binary representation so we don't have to hold on to the C object, +// which would require an explicit .Close(). +func serializeConsumerGroupMetadata(cgmd *C.rd_kafka_consumer_group_metadata_t) ([]byte, error) { + var cBuffer *C.void + var cSize C.size_t + cError := C.rd_kafka_consumer_group_metadata_write(cgmd, + (*unsafe.Pointer)(unsafe.Pointer(&cBuffer)), &cSize) + if cError != nil { + return nil, newErrorFromCErrorDestroy(cError) + } + defer C.rd_kafka_mem_free(nil, unsafe.Pointer(cBuffer)) + + return C.GoBytes(unsafe.Pointer(cBuffer), C.int(cSize)), nil +} + +// deserializeConsumerGroupMetadata converts a serialized metadata object +// back to a C object. +func deserializeConsumerGroupMetadata(serialized []byte) (*C.rd_kafka_consumer_group_metadata_t, error) { + var cgmd *C.rd_kafka_consumer_group_metadata_t + + cSerialized := C.CBytes(serialized) + defer C.free(cSerialized) + + cError := C.rd_kafka_consumer_group_metadata_read( + &cgmd, cSerialized, C.size_t(len(serialized))) + if cError != nil { + return nil, newErrorFromCErrorDestroy(cError) + } + + return cgmd, nil +} + +// GetConsumerGroupMetadata returns the consumer's current group metadata. +// This object should be passed to the transactional producer's +// SendOffsetsToTransaction() API. +func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + cgmd := C.rd_kafka_consumer_group_metadata(c.handle.rk) + if cgmd == nil { + return nil, NewError(ErrState, "Consumer group metadata not available", false) + } + defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) + + serialized, err := serializeConsumerGroupMetadata(cgmd) + if err != nil { + return nil, err + } + + return &ConsumerGroupMetadata{serialized}, nil +} + +// NewTestConsumerGroupMetadata creates a new consumer group metadata instance +// mainly for testing use. +// Use GetConsumerGroupMetadata() to retrieve the real metadata. +func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error) { + cGroupID := C.CString(groupID) + defer C.free(unsafe.Pointer(cGroupID)) + + cgmd := C.rd_kafka_consumer_group_metadata_new(cGroupID) + if cgmd == nil { + return nil, NewError(ErrInvalidArg, "Failed to create metadata object", false) + } + + defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) + serialized, err := serializeConsumerGroupMetadata(cgmd) + if err != nil { + return nil, err + } + + return &ConsumerGroupMetadata{serialized}, nil +} + +// cEventToRebalanceEvent returns an Event (AssignedPartitions or RevokedPartitions) +// based on the specified rkev. +func cEventToRebalanceEvent(rkev *C.rd_kafka_event_t) Event { + if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS { + var ev AssignedPartitions + ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) + return ev + } else if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS { + var ev RevokedPartitions + ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) + return ev + } else { + panic(fmt.Sprintf("Unable to create rebalance event from C type %s", + C.GoString(C.rd_kafka_err2name(C.rd_kafka_event_error(rkev))))) + } + +} + +// handleRebalanceEvent handles a assign/rebalance rebalance event. +// +// If the app provided a RebalanceCb to Subscribe*() or +// has go.application.rebalance.enable=true we create an event +// and forward it to the application thru the RebalanceCb or the +// Events channel respectively. +// Since librdkafka requires the rebalance event to be "acked" by +// the application (by calling *assign()) to synchronize state we keep track +// of if the application performed *Assign() or *Unassign(), but this only +// works for the non-channel case. For the channel case we assume the +// application calls *Assign() or *Unassign(). +// Failure to do so will "hang" the consumer, e.g., it wont start consuming +// and it wont close cleanly, so this error case should be visible +// immediately to the application developer. +// +// In the polling case (not channel based consumer) the rebalance event +// is returned in retval, else nil is returned. + +func (c *Consumer) handleRebalanceEvent(channel chan Event, rkev *C.rd_kafka_event_t) (retval Event) { + + var ev Event + + if c.rebalanceCb != nil || c.appRebalanceEnable { + // Application has a rebalance callback or has enabled + // rebalances on the events channel, create the appropriate Event. + ev = cEventToRebalanceEvent(rkev) + + } + + if channel != nil && c.appRebalanceEnable && c.rebalanceCb == nil { + // Channel-based consumer with rebalancing enabled, + // return the rebalance event and rely on the application + // to call *Assign() / *Unassign(). + return ev + } + + // Call the application's rebalance callback, if any. + if c.rebalanceCb != nil { + // Mark .appReassigned as false to keep track of whether the + // application called *Assign() / *Unassign(). + c.appReassigned = false + + c.rebalanceCb(c, ev) + + if c.appReassigned { + // Rebalance event handled by application. + return nil + } + } + + // Either there was no rebalance callback, or the application + // did not call *Assign / *Unassign, so we need to do it. + + isCooperative := c.GetRebalanceProtocol() == "COOPERATIVE" + var cError *C.rd_kafka_error_t + var cErr C.rd_kafka_resp_err_t + + if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS { + // Assign partitions + if isCooperative { + cError = C.rd_kafka_incremental_assign( + c.handle.rk, + C.rd_kafka_event_topic_partition_list(rkev)) + } else { + cErr = C.rd_kafka_assign( + c.handle.rk, + C.rd_kafka_event_topic_partition_list(rkev)) + } + } else { + // Revoke partitions + + if isCooperative { + cError = C.rd_kafka_incremental_unassign( + c.handle.rk, + C.rd_kafka_event_topic_partition_list(rkev)) + } else { + cErr = C.rd_kafka_assign(c.handle.rk, nil) + } + } + + // If the *assign() call returned error, forward it to the + // the consumer's Events() channel for visibility. + if cError != nil { + c.events <- newErrorFromCErrorDestroy(cError) + } else if cErr != 0 { + c.events <- newError(cErr) + } + + return nil +} + +// SetSaslCredentials sets the SASL credentials used for this consumer. The new credentials +// will overwrite the old ones (which were set when creating the consumer or by a previous +// call to SetSaslCredentials). The new credentials will be used the next time the +// consumer needs to authenticate to a broker. This method will not disconnect +// existing broker connections that were established with the old credentials. +// This method applies only to the SASL PLAIN and SCRAM mechanisms. +func (c *Consumer) SetSaslCredentials(username, password string) error { + err := c.verifyClient() + if err != nil { + return err + } + return setSaslCredentials(c.handle.rk, username, password) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/context.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/context.go new file mode 100644 index 000000000..85709be0f --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/context.go @@ -0,0 +1,31 @@ +/** + * Copyright 2019 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "context" + "time" +) + +// Timeout returns the remaining time after which work done on behalf of this context should be +// canceled, or ok==false if no deadline/timeout is set. +func timeout(ctx context.Context) (timeout time.Duration, ok bool) { + if deadline, ok := ctx.Deadline(); ok { + return deadline.Sub(time.Now()), true + } + return 0, false +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go new file mode 100644 index 000000000..06c94bb4e --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go @@ -0,0 +1,169 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +// Automatically generate error codes from librdkafka +// See README for instructions +//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go + +/* +#include +#include "select_rdkafka.h" +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +// Error provides a Kafka-specific error container +type Error struct { + code ErrorCode + str string + fatal bool + retriable bool + txnRequiresAbort bool +} + +func newError(code C.rd_kafka_resp_err_t) (err Error) { + return Error{code: ErrorCode(code)} +} + +// NewError creates a new Error. +func NewError(code ErrorCode, str string, fatal bool) (err Error) { + return Error{code: code, str: str, fatal: fatal} +} + +func newErrorFromString(code ErrorCode, str string) (err Error) { + return Error{code: code, str: str} +} + +func newErrorFromCString(code C.rd_kafka_resp_err_t, cstr *C.char) (err Error) { + var str string + if cstr != nil { + str = C.GoString(cstr) + } else { + str = "" + } + return Error{code: ErrorCode(code), str: str} +} + +func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) { + return newErrorFromString(ErrorCode(code), str) +} + +// newErrorFromCError creates a new Error instance +func newErrorFromCError(cError *C.rd_kafka_error_t) Error { + return Error{ + code: ErrorCode(C.rd_kafka_error_code(cError)), + str: C.GoString(C.rd_kafka_error_string(cError)), + fatal: cint2bool(C.rd_kafka_error_is_fatal(cError)), + retriable: cint2bool(C.rd_kafka_error_is_retriable(cError)), + txnRequiresAbort: cint2bool(C.rd_kafka_error_txn_requires_abort(cError)), + } +} + +// newErrorFromCErrorDestroy creates a new Error instance and destroys +// the passed cError. +func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { + defer C.rd_kafka_error_destroy(cError) + return newErrorFromCError(cError) +} + +// Error returns a human readable representation of an Error +// Same as Error.String() +func (e Error) Error() string { + return e.String() +} + +// String returns a human readable representation of an Error +func (e Error) String() string { + var errstr string + if len(e.str) > 0 { + errstr = e.str + } else { + errstr = e.code.String() + } + + if e.IsFatal() { + return fmt.Sprintf("Fatal error: %s", errstr) + } + + return errstr +} + +// Code returns the ErrorCode of an Error +func (e Error) Code() ErrorCode { + return e.code +} + +// IsFatal returns true if the error is a fatal error. +// A fatal error indicates the client instance is no longer operable and +// should be terminated. Typical causes include non-recoverable +// idempotent producer errors. +func (e Error) IsFatal() bool { + return e.fatal +} + +// IsRetriable returns true if the operation that caused this error +// may be retried. +// This flag is currently only set by the Transactional producer API. +func (e Error) IsRetriable() bool { + return e.retriable +} + +// IsTimeout returns true if the error is a timeout error. +// A timeout error indicates that the operation timed out locally. +func (e Error) IsTimeout() bool { + return e.code == ErrTimedOut || e.code == ErrTimedOutQueue +} + +// TxnRequiresAbort returns true if the error is an abortable transaction error +// that requires the application to abort the current transaction with +// AbortTransaction() and start a new transaction with BeginTransaction() +// if it wishes to proceed with transactional operations. +// This flag is only set by the Transactional producer API. +func (e Error) TxnRequiresAbort() bool { + return e.txnRequiresAbort +} + +// getFatalError returns an Error object if the client instance has raised a fatal error, else nil. +func getFatalError(H Handle) error { + cErrstr := (*C.char)(C.malloc(C.size_t(512))) + defer C.free(unsafe.Pointer(cErrstr)) + + cErr := C.rd_kafka_fatal_error(H.gethandle().rk, cErrstr, 512) + if int(cErr) == 0 { + return nil + } + + err := newErrorFromCString(cErr, cErrstr) + err.fatal = true + + return err +} + +// testFatalError triggers a fatal error in the underlying client. +// This is to be used strictly for testing purposes. +func testFatalError(H Handle, code ErrorCode, str string) ErrorCode { + return ErrorCode(C.rd_kafka_test_fatal_error(H.gethandle().rk, C.rd_kafka_resp_err_t(code), C.CString(str))) +} + +func getOperationNotAllowedErrorForClosedClient() error { + return newErrorFromString(ErrState, "Operation not allowed on closed client") +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go new file mode 100644 index 000000000..b90af61a0 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go @@ -0,0 +1,112 @@ +/** + * Copyright 2020 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +// Automatically generate error codes from librdkafka +// See README for instructions +//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go + +/* +#include +#include "select_rdkafka.h" + +static const char *errdesc_to_string (const struct rd_kafka_err_desc *ed, int idx) { + return ed[idx].name; +} + +static const char *errdesc_to_desc (const struct rd_kafka_err_desc *ed, int idx) { + return ed[idx].desc; +} +*/ +import "C" + +import ( + "fmt" + "os" + "strings" + "time" +) + +// camelCase transforms a snake_case string to camelCase. +func camelCase(s string) string { + ret := "" + for _, v := range strings.Split(s, "_") { + if len(v) == 0 { + continue + } + ret += strings.ToUpper((string)(v[0])) + strings.ToLower(v[1:]) + } + return ret +} + +// WriteErrorCodes writes Go error code constants to file from the +// librdkafka error codes. +// This function is not intended for public use. +func WriteErrorCodes(f *os.File) { + f.WriteString("package kafka\n\n") + now := time.Now() + f.WriteString(fmt.Sprintf("// Copyright 2016-%d Confluent Inc.\n", now.Year())) + f.WriteString(fmt.Sprintf("// AUTOMATICALLY GENERATED ON %v USING librdkafka %s\n", + now, C.GoString(C.rd_kafka_version_str()))) + + var errdescs *C.struct_rd_kafka_err_desc + var csize C.size_t + C.rd_kafka_get_err_descs(&errdescs, &csize) + + f.WriteString(` +/* +#include "select_rdkafka.h" +*/ +import "C" + +// ErrorCode is the integer representation of local and broker error codes +type ErrorCode int + +// String returns a human readable representation of an error code +func (c ErrorCode) String() string { + return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) +} + +const ( +`) + + for i := 0; i < int(csize); i++ { + orig := C.GoString(C.errdesc_to_string(errdescs, C.int(i))) + if len(orig) == 0 { + continue + } + desc := C.GoString(C.errdesc_to_desc(errdescs, C.int(i))) + if len(desc) == 0 { + continue + } + + errname := "Err" + camelCase(orig) + + // Special handling to please golint + // Eof -> EOF + // Id -> ID + errname = strings.Replace(errname, "Eof", "EOF", -1) + errname = strings.Replace(errname, "Id", "ID", -1) + + f.WriteString(fmt.Sprintf("\t// %s %s\n", errname, desc)) + f.WriteString(fmt.Sprintf("\t%s ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_%s)\n", + errname, orig)) + } + + f.WriteString(")\n") + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/event.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/event.go new file mode 100644 index 000000000..aefa87bea --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/event.go @@ -0,0 +1,316 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "os" + "unsafe" +) + +/* +#include +#include "select_rdkafka.h" +#include "glue_rdkafka.h" + + +void chdrs_to_tmphdrs (glue_msg_t *gMsg) { + size_t i = 0; + const char *name; + const void *val; + size_t size; + rd_kafka_headers_t *chdrs; + + if (rd_kafka_message_headers(gMsg->msg, &chdrs)) { + gMsg->tmphdrs = NULL; + gMsg->tmphdrsCnt = 0; + return; + } + + gMsg->tmphdrsCnt = rd_kafka_header_cnt(chdrs); + gMsg->tmphdrs = malloc(sizeof(*gMsg->tmphdrs) * gMsg->tmphdrsCnt); + + while (!rd_kafka_header_get_all(chdrs, i, + &gMsg->tmphdrs[i].key, + &gMsg->tmphdrs[i].val, + (size_t *)&gMsg->tmphdrs[i].size)) + i++; +} + +rd_kafka_event_t *_rk_queue_poll (rd_kafka_queue_t *rkq, int timeoutMs, + rd_kafka_event_type_t *evtype, + glue_msg_t *gMsg, + rd_kafka_event_t *prev_rkev) { + rd_kafka_event_t *rkev; + + if (prev_rkev) + rd_kafka_event_destroy(prev_rkev); + + rkev = rd_kafka_queue_poll(rkq, timeoutMs); + *evtype = rd_kafka_event_type(rkev); + + if (*evtype == RD_KAFKA_EVENT_FETCH) { + gMsg->msg = (rd_kafka_message_t *)rd_kafka_event_message_next(rkev); + gMsg->ts = rd_kafka_message_timestamp(gMsg->msg, &gMsg->tstype); + + if (gMsg->want_hdrs) + chdrs_to_tmphdrs(gMsg); + } + + return rkev; +} +*/ +import "C" + +func chdrsToTmphdrs(gMsg *C.glue_msg_t) { + C.chdrs_to_tmphdrs(gMsg) +} + +// Event generic interface +type Event interface { + // String returns a human-readable representation of the event + String() string +} + +// Specific event types + +// Stats statistics event +type Stats struct { + statsJSON string +} + +func (e Stats) String() string { + return e.statsJSON +} + +// AssignedPartitions consumer group rebalance event: assigned partition set +type AssignedPartitions struct { + Partitions []TopicPartition +} + +func (e AssignedPartitions) String() string { + return fmt.Sprintf("AssignedPartitions: %v", e.Partitions) +} + +// RevokedPartitions consumer group rebalance event: revoked partition set +type RevokedPartitions struct { + Partitions []TopicPartition +} + +func (e RevokedPartitions) String() string { + return fmt.Sprintf("RevokedPartitions: %v", e.Partitions) +} + +// PartitionEOF consumer reached end of partition +// Needs to be explicitly enabled by setting the `enable.partition.eof` +// configuration property to true. +type PartitionEOF TopicPartition + +func (p PartitionEOF) String() string { + return fmt.Sprintf("EOF at %s", TopicPartition(p)) +} + +// OffsetsCommitted reports committed offsets +type OffsetsCommitted struct { + Error error + Offsets []TopicPartition +} + +func (o OffsetsCommitted) String() string { + return fmt.Sprintf("OffsetsCommitted (%v, %v)", o.Error, o.Offsets) +} + +// OAuthBearerTokenRefresh indicates token refresh is required +type OAuthBearerTokenRefresh struct { + // Config is the value of the sasl.oauthbearer.config property + Config string +} + +func (o OAuthBearerTokenRefresh) String() string { + return "OAuthBearerTokenRefresh" +} + +// eventPoll polls an event from the handler's C rd_kafka_queue_t, +// translates it into an Event type and then sends on `channel` if non-nil, else returns the Event. +// term_chan is an optional channel to monitor along with producing to channel +// to indicate that `channel` is being terminated. +// returns (event Event, terminate Bool) tuple, where Terminate indicates +// if termChan received a termination event. +func (h *handle) eventPoll(channel chan Event, timeoutMs int, maxEvents int, termChan chan bool) (Event, bool) { + + var prevRkev *C.rd_kafka_event_t + term := false + + var retval Event + + if channel == nil { + maxEvents = 1 + } +out: + for evcnt := 0; evcnt < maxEvents; evcnt++ { + var evtype C.rd_kafka_event_type_t + var gMsg C.glue_msg_t + gMsg.want_hdrs = C.int8_t(bool2cint(h.msgFields.Headers)) + rkev := C._rk_queue_poll(h.rkq, C.int(timeoutMs), &evtype, &gMsg, prevRkev) + prevRkev = rkev + timeoutMs = 0 + + retval = nil + + switch evtype { + case C.RD_KAFKA_EVENT_FETCH: + // Consumer fetch event, new message. + // Extracted into temporary gMsg for optimization + retval = h.newMessageFromGlueMsg(&gMsg) + + case C.RD_KAFKA_EVENT_REBALANCE: + // Consumer rebalance event + retval = h.c.handleRebalanceEvent(channel, rkev) + + case C.RD_KAFKA_EVENT_ERROR: + // Error event + cErr := C.rd_kafka_event_error(rkev) + if cErr == C.RD_KAFKA_RESP_ERR__PARTITION_EOF { + crktpar := C.rd_kafka_event_topic_partition(rkev) + if crktpar == nil { + break + } + + defer C.rd_kafka_topic_partition_destroy(crktpar) + var peof PartitionEOF + setupTopicPartitionFromCrktpar((*TopicPartition)(&peof), crktpar) + + retval = peof + + } else if int(C.rd_kafka_event_error_is_fatal(rkev)) != 0 { + // A fatal error has been raised. + // Extract the actual error from the client + // instance and return a new Error with + // fatal set to true. + cFatalErrstrSize := C.size_t(512) + cFatalErrstr := (*C.char)(C.malloc(cFatalErrstrSize)) + defer C.free(unsafe.Pointer(cFatalErrstr)) + cFatalErr := C.rd_kafka_fatal_error(h.rk, cFatalErrstr, cFatalErrstrSize) + fatalErr := newErrorFromCString(cFatalErr, cFatalErrstr) + fatalErr.fatal = true + retval = fatalErr + + } else { + retval = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) + } + + case C.RD_KAFKA_EVENT_STATS: + retval = &Stats{C.GoString(C.rd_kafka_event_stats(rkev))} + + case C.RD_KAFKA_EVENT_DR: + // Producer Delivery Report event + // Each such event contains delivery reports for all + // messages in the produced batch. + // Forward delivery reports to per-message's response channel + // or to the global Producer.Events channel, or none. + rkmessages := make([]*C.rd_kafka_message_t, int(C.rd_kafka_event_message_count(rkev))) + + cnt := int(C.rd_kafka_event_message_array(rkev, (**C.rd_kafka_message_t)(unsafe.Pointer(&rkmessages[0])), C.size_t(len(rkmessages)))) + + for _, rkmessage := range rkmessages[:cnt] { + msg := h.newMessageFromC(rkmessage) + var ch *chan Event + + if rkmessage._private != nil { + // Find cgoif by id + cg, found := h.cgoGet((int)((uintptr)(rkmessage._private))) + if found { + cdr := cg.(cgoDr) + + if cdr.deliveryChan != nil { + ch = &cdr.deliveryChan + } + msg.Opaque = cdr.opaque + } + } + + if ch == nil && h.fwdDr { + ch = &channel + } + + if ch != nil { + select { + case *ch <- msg: + case <-termChan: + retval = nil + term = true + break out + } + + } else { + retval = msg + break out + } + } + + case C.RD_KAFKA_EVENT_OFFSET_COMMIT: + // Offsets committed + cErr := C.rd_kafka_event_error(rkev) + coffsets := C.rd_kafka_event_topic_partition_list(rkev) + var offsets []TopicPartition + if coffsets != nil { + offsets = newTopicPartitionsFromCparts(coffsets) + } + + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + retval = OffsetsCommitted{newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)), offsets} + } else { + retval = OffsetsCommitted{nil, offsets} + } + + case C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: + ev := OAuthBearerTokenRefresh{C.GoString(C.rd_kafka_event_config_string(rkev))} + retval = ev + + case C.RD_KAFKA_EVENT_NONE: + // poll timed out: no events available + break out + + default: + if rkev != nil { + fmt.Fprintf(os.Stderr, "Ignored event %s\n", + C.GoString(C.rd_kafka_event_name(rkev))) + } + + } + + if retval != nil { + if channel != nil { + select { + case channel <- retval: + case <-termChan: + retval = nil + term = true + break out + } + } else { + break out + } + } + } + + if prevRkev != nil { + C.rd_kafka_event_destroy(prevRkev) + } + + return retval, term +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go new file mode 100644 index 000000000..619e6ae9d --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go @@ -0,0 +1,340 @@ +package kafka + +// Copyright 2016-2023 Confluent Inc. +// AUTOMATICALLY GENERATED ON 2023-10-25 15:32:05.267754826 +0200 CEST m=+0.000622161 USING librdkafka 2.3.0 + +/* +#include "select_rdkafka.h" +*/ +import "C" + +// ErrorCode is the integer representation of local and broker error codes +type ErrorCode int + +// String returns a human readable representation of an error code +func (c ErrorCode) String() string { + return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) +} + +const ( + // ErrBadMsg Local: Bad message format + ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG) + // ErrBadCompression Local: Invalid compressed data + ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION) + // ErrDestroy Local: Broker handle destroyed + ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY) + // ErrFail Local: Communication failure with broker + ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL) + // ErrTransport Local: Broker transport failure + ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT) + // ErrCritSysResource Local: Critical system resource failure + ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE) + // ErrResolve Local: Host resolution failure + ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE) + // ErrMsgTimedOut Local: Message timed out + ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) + // ErrPartitionEOF Broker: No more messages + ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF) + // ErrUnknownPartition Local: Unknown partition + ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + // ErrFs Local: File or filesystem error + ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS) + // ErrUnknownTopic Local: Unknown topic + ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + // ErrAllBrokersDown Local: All broker connections are down + ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) + // ErrInvalidArg Local: Invalid argument or configuration + ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG) + // ErrTimedOut Local: Timed out + ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT) + // ErrQueueFull Local: Queue full + ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL) + // ErrIsrInsuff Local: ISR count insufficient + ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF) + // ErrNodeUpdate Local: Broker node update + ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE) + // ErrSsl Local: SSL error + ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL) + // ErrWaitCoord Local: Waiting for coordinator + ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD) + // ErrUnknownGroup Local: Unknown group + ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP) + // ErrInProgress Local: Operation in progress + ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS) + // ErrPrevInProgress Local: Previous operation in progress + ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS) + // ErrExistingSubscription Local: Existing subscription + ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION) + // ErrAssignPartitions Local: Assign partitions + ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) + // ErrRevokePartitions Local: Revoke partitions + ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) + // ErrConflict Local: Conflicting use + ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT) + // ErrState Local: Erroneous state + ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE) + // ErrUnknownProtocol Local: Unknown protocol + ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL) + // ErrNotImplemented Local: Not implemented + ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) + // ErrAuthentication Local: Authentication failure + ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION) + // ErrNoOffset Local: No offset stored + ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET) + // ErrOutdated Local: Outdated + ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED) + // ErrTimedOutQueue Local: Timed out in queue + ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) + // ErrUnsupportedFeature Local: Required feature not supported by broker + ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) + // ErrWaitCache Local: Awaiting cache update + ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE) + // ErrIntr Local: Operation interrupted + ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR) + // ErrKeySerialization Local: Key serialization error + ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION) + // ErrValueSerialization Local: Value serialization error + ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION) + // ErrKeyDeserialization Local: Key deserialization error + ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION) + // ErrValueDeserialization Local: Value deserialization error + ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION) + // ErrPartial Local: Partial response + ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL) + // ErrReadOnly Local: Read-only object + ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY) + // ErrNoent Local: No such entry + ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT) + // ErrUnderflow Local: Read underflow + ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW) + // ErrInvalidType Local: Invalid type + ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE) + // ErrRetry Local: Retry operation + ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY) + // ErrPurgeQueue Local: Purged in queue + ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE) + // ErrPurgeInflight Local: Purged in flight + ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT) + // ErrFatal Local: Fatal error + ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL) + // ErrInconsistent Local: Inconsistent state + ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT) + // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding + ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) + // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded + ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) + // ErrUnknownBroker Local: Unknown broker + ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER) + // ErrNotConfigured Local: Functionality not configured + ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED) + // ErrFenced Local: This instance has been fenced by a newer instance + ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED) + // ErrApplication Local: Application generated error + ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION) + // ErrAssignmentLost Local: Group partition assignment lost + ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST) + // ErrNoop Local: No operation performed + ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP) + // ErrAutoOffsetReset Local: No offset to automatically reset to + ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) + // ErrLogTruncation Local: Partition log truncation detected + ErrLogTruncation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__LOG_TRUNCATION) + // ErrUnknown Unknown broker error + ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) + // ErrNoError Success + ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR) + // ErrOffsetOutOfRange Broker: Offset out of range + ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE) + // ErrInvalidMsg Broker: Invalid message + ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG) + // ErrUnknownTopicOrPart Broker: Unknown topic or partition + ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + // ErrInvalidMsgSize Broker: Invalid message size + ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE) + // ErrLeaderNotAvailable Broker: Leader not available + ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) + // ErrNotLeaderForPartition Broker: Not leader for partition + ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) + // ErrRequestTimedOut Broker: Request timed out + ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT) + // ErrBrokerNotAvailable Broker: Broker not available + ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE) + // ErrReplicaNotAvailable Broker: Replica not available + ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE) + // ErrMsgSizeTooLarge Broker: Message size too large + ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) + // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode + ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH) + // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large + ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE) + // ErrNetworkException Broker: Broker disconnected before response received + ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION) + // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress + ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS) + // ErrCoordinatorNotAvailable Broker: Coordinator not available + ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE) + // ErrNotCoordinator Broker: Not coordinator + ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR) + // ErrTopicException Broker: Invalid topic + ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION) + // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size + ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE) + // ErrNotEnoughReplicas Broker: Not enough in-sync replicas + ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS) + // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas + ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND) + // ErrInvalidRequiredAcks Broker: Invalid required acks value + ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS) + // ErrIllegalGeneration Broker: Specified group generation id is not valid + ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) + // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol + ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL) + // ErrInvalidGroupID Broker: Invalid group.id + ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID) + // ErrUnknownMemberID Broker: Unknown member + ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) + // ErrInvalidSessionTimeout Broker: Invalid session timeout + ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT) + // ErrRebalanceInProgress Broker: Group rebalance in progress + ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS) + // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid + ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE) + // ErrTopicAuthorizationFailed Broker: Topic authorization failed + ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) + // ErrGroupAuthorizationFailed Broker: Group authorization failed + ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED) + // ErrClusterAuthorizationFailed Broker: Cluster authorization failed + ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED) + // ErrInvalidTimestamp Broker: Invalid timestamp + ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP) + // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism + ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM) + // ErrIllegalSaslState Broker: Request not valid in current SASL state + ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE) + // ErrUnsupportedVersion Broker: API version not supported + ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) + // ErrTopicAlreadyExists Broker: Topic already exists + ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) + // ErrInvalidPartitions Broker: Invalid number of partitions + ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS) + // ErrInvalidReplicationFactor Broker: Invalid replication factor + ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR) + // ErrInvalidReplicaAssignment Broker: Invalid replica assignment + ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT) + // ErrInvalidConfig Broker: Configuration is invalid + ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG) + // ErrNotController Broker: Not controller for cluster + ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER) + // ErrInvalidRequest Broker: Invalid request + ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST) + // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request + ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT) + // ErrPolicyViolation Broker: Policy violation + ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION) + // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number + ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER) + // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number + ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER) + // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch + ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) + // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state + ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE) + // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id + ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING) + // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms + ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT) + // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing + ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS) + // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer + ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED) + // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed + ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED) + // ErrSecurityDisabled Broker: Security features are disabled + ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED) + // ErrOperationNotAttempted Broker: Operation not attempted + ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED) + // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk + ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR) + // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config + ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND) + // ErrSaslAuthenticationFailed Broker: SASL Authentication failed + ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED) + // ErrUnknownProducerID Broker: Unknown Producer Id + ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) + // ErrReassignmentInProgress Broker: Partition reassignment is in progress + ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS) + // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled + ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED) + // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server + ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND) + // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer + ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH) + // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection + ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED) + // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed + ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED) + // ErrDelegationTokenExpired Broker: Delegation Token is expired + ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED) + // ErrInvalidPrincipalType Broker: Supplied principalType is not supported + ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE) + // ErrNonEmptyGroup Broker: The group is not empty + ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP) + // ErrGroupIDNotFound Broker: The group id does not exist + ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND) + // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found + ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND) + // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid + ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH) + // ErrListenerNotFound Broker: No matching listener + ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND) + // ErrTopicDeletionDisabled Broker: Topic deletion is disabled + ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED) + // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch + ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH) + // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch + ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) + // ErrUnsupportedCompressionType Broker: Unsupported compression type + ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE) + // ErrStaleBrokerEpoch Broker: Broker epoch has changed + ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH) + // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up + ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) + // ErrMemberIDRequired Broker: Group member needs a valid member ID + ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) + // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available + ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE) + // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size + ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED) + // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id + ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) + // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available + ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE) + // ErrElectionNotNeeded Broker: Leader election not needed for topic partition + ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED) + // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress + ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS) + // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it + ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC) + // ErrInvalidRecord Broker: Broker failed to validate record + ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD) + // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared + ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) + // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded + ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED) + // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one + ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED) + // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist + ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND) + // ErrDuplicateResource Broker: Request illegally referred to the same resource twice + ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE) + // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability + ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL) + // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters + ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET) + // ErrInvalidUpdateVersion Broker: Invalid update version + ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION) + // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error + ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED) + // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding + ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE) +) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/glue_rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/glue_rdkafka.h new file mode 100644 index 000000000..bbff50752 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/glue_rdkafka.h @@ -0,0 +1,48 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + + +/** + * Glue between Go, Cgo and librdkafka + */ + + +/** + * Temporary C to Go header representation + */ +typedef struct tmphdr_s { + const char *key; + const void *val; // producer: malloc()ed by Go code if size > 0 + // consumer: owned by librdkafka + ssize_t size; +} tmphdr_t; + + + +/** + * @struct This is a glue struct used by the C code in this client to + * effectively map fields from a librdkafka rd_kafka_message_t + * to something usable in Go with as few CGo calls as possible. + */ +typedef struct glue_msg_s { + rd_kafka_message_t *msg; + rd_kafka_timestamp_type_t tstype; + int64_t ts; + tmphdr_t *tmphdrs; + size_t tmphdrsCnt; + int8_t want_hdrs; /**< If true, copy headers */ +} glue_msg_t; diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go new file mode 100644 index 000000000..643d80bc6 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go @@ -0,0 +1,385 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "strings" + "sync" + "time" + "unsafe" +) + +/* +#include "select_rdkafka.h" +#include +*/ +import "C" + +// OAuthBearerToken represents the data to be transmitted +// to a broker during SASL/OAUTHBEARER authentication. +type OAuthBearerToken struct { + // Token value, often (but not necessarily) a JWS compact serialization + // as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet + // the regular expression for a SASL/OAUTHBEARER value defined at + // https://tools.ietf.org/html/rfc7628#section-3.1 + TokenValue string + // Metadata about the token indicating when it expires (local time); + // it must represent a time in the future + Expiration time.Time + // Metadata about the token indicating the Kafka principal name + // to which it applies (for example, "admin") + Principal string + // SASL extensions, if any, to be communicated to the broker during + // authentication (all keys and values of which must meet the regular + // expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1, + // and it must not contain the reserved "auth" key) + Extensions map[string]string +} + +// Handle represents a generic client handle containing common parts for +// both Producer and Consumer. +type Handle interface { + // SetOAuthBearerToken sets the the data to be transmitted + // to a broker during SASL/OAUTHBEARER authentication. It will return nil + // on success, otherwise an error if: + // 1) the token data is invalid (meaning an expiration time in the past + // or either a token value or an extension key or value that does not meet + // the regular expression requirements as per + // https://tools.ietf.org/html/rfc7628#section-3.1); + // 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; + // 3) SASL/OAUTHBEARER is supported but is not configured as the client's + // authentication mechanism. + SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error + + // SetOAuthBearerTokenFailure sets the error message describing why token + // retrieval/setting failed; it also schedules a new token refresh event for 10 + // seconds later so the attempt may be retried. It will return nil on + // success, otherwise an error if: + // 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; + // 2) SASL/OAUTHBEARER is supported but is not configured as the client's + // authentication mechanism. + SetOAuthBearerTokenFailure(errstr string) error + + // gethandle() returns the internal handle struct pointer + gethandle() *handle + + // verifyClient() returns the validity of client + verifyClient() error + + // IsClosed() returns the bool to check if the client is closed + IsClosed() bool +} + +// Common instance handle for both Producer and Consumer +type handle struct { + rk *C.rd_kafka_t + rkq *C.rd_kafka_queue_t + + // Forward logs from librdkafka log queue to logs channel. + logs chan LogEvent + logq *C.rd_kafka_queue_t + closeLogsChan bool + + // Topic <-> rkt caches + rktCacheLock sync.Mutex + // topic name -> rkt cache + rktCache map[string]*C.rd_kafka_topic_t + // rkt -> topic name cache + rktNameCache map[*C.rd_kafka_topic_t]string + + // Cached instance name to avoid CGo call in String() + name string + + // + // cgo map + // Maps C callbacks based on cgoid back to its Go object + cgoLock sync.Mutex + cgoidNext uintptr + cgomap map[int]cgoif + + // + // producer + // + p *Producer + + // Forward delivery reports on Producer.Events channel + fwdDr bool + + // Enabled message fields for delivery reports and consumed messages. + msgFields *messageFields + + // + // consumer + // + c *Consumer + + // WaitGroup to wait for spawned go-routines to finish. + waitGroup sync.WaitGroup +} + +func (h *handle) String() string { + return h.name +} + +func (h *handle) setup() { + h.rktCache = make(map[string]*C.rd_kafka_topic_t) + h.rktNameCache = make(map[*C.rd_kafka_topic_t]string) + h.cgomap = make(map[int]cgoif) + h.name = C.GoString(C.rd_kafka_name(h.rk)) + if h.msgFields == nil { + h.msgFields = newMessageFields() + } +} + +func (h *handle) cleanup() { + if h.logs != nil { + C.rd_kafka_queue_destroy(h.logq) + if h.closeLogsChan { + close(h.logs) + } + } + + for _, crkt := range h.rktCache { + C.rd_kafka_topic_destroy(crkt) + } + + if h.rkq != nil { + C.rd_kafka_queue_destroy(h.rkq) + } +} + +func (h *handle) setupLogQueue(logsChan chan LogEvent, termChan chan bool) { + if logsChan == nil { + logsChan = make(chan LogEvent, 10000) + h.closeLogsChan = true + } + + h.logs = logsChan + + // Let librdkafka forward logs to our log queue instead of the main queue + h.logq = C.rd_kafka_queue_new(h.rk) + C.rd_kafka_set_log_queue(h.rk, h.logq) + + // Start a polling goroutine to consume the log queue + h.waitGroup.Add(1) + go func() { + h.pollLogEvents(h.logs, 100, termChan) + h.waitGroup.Done() + }() + +} + +// getRkt0 finds or creates and returns a C topic_t object from the local cache. +func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) { + if doLock { + h.rktCacheLock.Lock() + defer h.rktCacheLock.Unlock() + } + crkt, ok := h.rktCache[topic] + if ok { + return crkt + } + + if ctopic == nil { + ctopic = C.CString(topic) + defer C.free(unsafe.Pointer(ctopic)) + } + + crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil) + if crkt == nil { + panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s", + topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error())))) + } + + h.rktCache[topic] = crkt + h.rktNameCache[crkt] = topic + + return crkt +} + +// getRkt finds or creates and returns a C topic_t object from the local cache. +func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) { + return h.getRkt0(topic, nil, true) +} + +// getTopicNameFromRkt returns the topic name for a C topic_t object, preferably +// using the local cache to avoid a cgo call. +func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) { + h.rktCacheLock.Lock() + defer h.rktCacheLock.Unlock() + + topic, ok := h.rktNameCache[crkt] + if ok { + return topic + } + + // we need our own copy/refcount of the crkt + ctopic := C.rd_kafka_topic_name(crkt) + topic = C.GoString(ctopic) + + crkt = h.getRkt0(topic, ctopic, false /* dont lock */) + + return topic +} + +// cgoif is a generic interface for holding Go state passed as opaque +// value to the C code. +// Since pointers to complex Go types cannot be passed to C we instead create +// a cgoif object, generate a unique id that is added to the cgomap, +// and then pass that id to the C code. When the C code callback is called we +// use the id to look up the cgoif object in the cgomap. +type cgoif interface{} + +// delivery report cgoif container +type cgoDr struct { + deliveryChan chan Event + opaque interface{} +} + +// cgoPut adds object cg to the handle's cgo map and returns a +// unique id for the added entry. +// Thread-safe. +// FIXME: the uniquity of the id is questionable over time. +func (h *handle) cgoPut(cg cgoif) (cgoid int) { + h.cgoLock.Lock() + defer h.cgoLock.Unlock() + + h.cgoidNext++ + if h.cgoidNext == 0 { + h.cgoidNext++ + } + cgoid = (int)(h.cgoidNext) + h.cgomap[cgoid] = cg + return cgoid +} + +// cgoGet looks up cgoid in the cgo map, deletes the reference from the map +// and returns the object, if found. Else returns nil, false. +// Thread-safe. +func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) { + if cgoid == 0 { + return nil, false + } + + h.cgoLock.Lock() + defer h.cgoLock.Unlock() + cg, found = h.cgomap[cgoid] + if found { + delete(h.cgomap, cgoid) + } + + return cg, found +} + +// setOauthBearerToken - see rd_kafka_oauthbearer_set_token() +func (h *handle) setOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { + cTokenValue := C.CString(oauthBearerToken.TokenValue) + defer C.free(unsafe.Pointer(cTokenValue)) + + cPrincipal := C.CString(oauthBearerToken.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cExtensions := make([]*C.char, 2*len(oauthBearerToken.Extensions)) + extensionSize := 0 + for key, value := range oauthBearerToken.Extensions { + cExtensions[extensionSize] = C.CString(key) + defer C.free(unsafe.Pointer(cExtensions[extensionSize])) + extensionSize++ + cExtensions[extensionSize] = C.CString(value) + defer C.free(unsafe.Pointer(cExtensions[extensionSize])) + extensionSize++ + } + + var cExtensionsToUse **C.char + if extensionSize > 0 { + cExtensionsToUse = (**C.char)(unsafe.Pointer(&cExtensions[0])) + } + + cErr := C.rd_kafka_oauthbearer_set_token(h.rk, cTokenValue, + C.int64_t(oauthBearerToken.Expiration.UnixNano()/(1000*1000)), cPrincipal, + cExtensionsToUse, C.size_t(extensionSize), cErrstr, cErrstrSize) + if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil + } + return newErrorFromCString(cErr, cErrstr) +} + +// setOauthBearerTokenFailure - see rd_kafka_oauthbearer_set_token_failure() +func (h *handle) setOAuthBearerTokenFailure(errstr string) error { + cerrstr := C.CString(errstr) + defer C.free(unsafe.Pointer(cerrstr)) + cErr := C.rd_kafka_oauthbearer_set_token_failure(h.rk, cerrstr) + if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil + } + return newError(cErr) +} + +// messageFields controls which fields are made available for producer delivery reports & consumed messages. +// true values indicate that the field should be included +type messageFields struct { + Key bool + Value bool + Headers bool +} + +// disableAll disable all fields +func (mf *messageFields) disableAll() { + mf.Key = false + mf.Value = false + mf.Headers = false +} + +// newMessageFields returns a new messageFields with all fields enabled +func newMessageFields() *messageFields { + return &messageFields{ + Key: true, + Value: true, + Headers: true, + } +} + +// newMessageFieldsFrom constructs a new messageFields from the given configuration value +func newMessageFieldsFrom(v ConfigValue) (*messageFields, error) { + msgFields := newMessageFields() + switch v { + case "all": + // nothing to do + case "", "none": + msgFields.disableAll() + default: + msgFields.disableAll() + for _, value := range strings.Split(v.(string), ",") { + switch value { + case "key": + msgFields.Key = true + case "value": + msgFields.Value = true + case "headers": + msgFields.Headers = true + default: + return nil, fmt.Errorf("unknown message field: %s", value) + } + } + } + return msgFields, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/header.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/header.go new file mode 100644 index 000000000..6baa3812a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/header.go @@ -0,0 +1,67 @@ +/** + * Copyright 2018 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "strconv" +) + +/* +#include +#include "select_rdkafka.h" +#include "glue_rdkafka.h" +*/ +import "C" + +// Header represents a single Kafka message header. +// +// Message headers are made up of a list of Header elements, retaining their original insert +// order and allowing for duplicate Keys. +// +// Key is a human readable string identifying the header. +// Value is the key's binary value, Kafka does not put any restrictions on the format of +// of the Value but it should be made relatively compact. +// The value may be a byte array, empty, or nil. +// +// NOTE: Message headers are not available on producer delivery report messages. +type Header struct { + Key string // Header name (utf-8 string) + Value []byte // Header value (nil, empty, or binary) +} + +// String returns the Header Key and data in a human representable possibly truncated form +// suitable for displaying to the user. +func (h Header) String() string { + if h.Value == nil { + return fmt.Sprintf("%s=nil", h.Key) + } + + valueLen := len(h.Value) + if valueLen == 0 { + return fmt.Sprintf("%s=", h.Key) + } + + truncSize := valueLen + trunc := "" + if valueLen > 50+15 { + truncSize = 50 + trunc = fmt.Sprintf("(%d more bytes)", valueLen-truncSize) + } + + return fmt.Sprintf("%s=%s%s", h.Key, strconv.Quote(string(h.Value[:truncSize])), trunc) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go new file mode 100644 index 000000000..d349f0e86 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go @@ -0,0 +1,483 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package kafka provides high-level Apache Kafka producer and consumers +// using bindings on-top of the librdkafka C library. +// +// # High-level Consumer +// +// * Decide if you want to read messages and events by calling `.Poll()` or +// the deprecated option of using the `.Events()` channel. (If you want to use +// `.Events()` channel then set `"go.events.channel.enable": true`). +// +// * Create a Consumer with `kafka.NewConsumer()` providing at +// least the `bootstrap.servers` and `group.id` configuration properties. +// +// * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics) +// to join the group with the specified subscription set. +// Subscriptions are atomic, calling `.Subscribe*()` again will leave +// the group and rejoin with the new set of topics. +// +// * Start reading events and messages from either the `.Events` channel +// or by calling `.Poll()`. +// +// * When the group has rebalanced each client member is assigned a +// (sub-)set of topic+partitions. +// By default the consumer will start fetching messages for its assigned +// partitions at this point, but your application may enable rebalance +// events to get an insight into what the assigned partitions where +// as well as set the initial offsets. To do this you need to pass +// `"go.application.rebalance.enable": true` to the `NewConsumer()` call +// mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event +// with the assigned partition set. You can optionally modify the initial +// offsets (they'll default to stored offsets and if there are no previously stored +// offsets it will fall back to `"auto.offset.reset"` +// which defaults to the `latest` message) and then call `.Assign(partitions)` +// to start consuming. If you don't need to modify the initial offsets you will +// not need to call `.Assign()`, the client will do so automatically for you if +// you dont, unless you are using the channel-based consumer in which case +// you MUST call `.Assign()` when receiving the `AssignedPartitions` and +// `RevokedPartitions` events. +// +// * As messages are fetched they will be made available on either the +// `.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`. +// +// * Handle messages, events and errors to your liking. +// +// * When you are done consuming call `.Close()` to commit final offsets +// and leave the consumer group. +// +// # Producer +// +// * Create a Producer with `kafka.NewProducer()` providing at least +// the `bootstrap.servers` configuration properties. +// +// * Messages may now be produced either by sending a `*kafka.Message` +// on the `.ProduceChannel` or by calling `.Produce()`. +// +// * Producing is an asynchronous operation so the client notifies the application +// of per-message produce success or failure through something called delivery reports. +// Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message` +// and you should check `msg.TopicPartition.Error` for `nil` to find out if the message +// was succesfully delivered or not. +// It is also possible to direct delivery reports to alternate channels +// by providing a non-nil `chan Event` channel to `.Produce()`. +// If no delivery reports are wanted they can be completely disabled by +// setting configuration property `"go.delivery.reports": false`. +// +// * When you are done producing messages you will need to make sure all messages +// are indeed delivered to the broker (or failed), remember that this is +// an asynchronous client so some of your messages may be lingering in internal +// channels or tranmission queues. +// To do this you can either keep track of the messages you've produced +// and wait for their corresponding delivery reports, or call the convenience +// function `.Flush()` that will block until all message deliveries are done +// or the provided timeout elapses. +// +// * Finally call `.Close()` to decommission the producer. +// +// # Transactional producer API +// +// The transactional producer operates on top of the idempotent producer, +// and provides full exactly-once semantics (EOS) for Apache Kafka when used +// with the transaction aware consumer (`isolation.level=read_committed`). +// +// A producer instance is configured for transactions by setting the +// `transactional.id` to an identifier unique for the application. This +// id will be used to fence stale transactions from previous instances of +// the application, typically following an outage or crash. +// +// After creating the transactional producer instance using `NewProducer()` +// the transactional state must be initialized by calling +// `InitTransactions()`. This is a blocking call that will +// acquire a runtime producer id from the transaction coordinator broker +// as well as abort any stale transactions and fence any still running producer +// instances with the same `transactional.id`. +// +// Once transactions are initialized the application may begin a new +// transaction by calling `BeginTransaction()`. +// A producer instance may only have one single on-going transaction. +// +// Any messages produced after the transaction has been started will +// belong to the ongoing transaction and will be committed or aborted +// atomically. +// It is not permitted to produce messages outside a transaction +// boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`, +// `AbortTransaction()` or if the current transaction has failed. +// +// If consumed messages are used as input to the transaction, the consumer +// instance must be configured with `enable.auto.commit` set to `false`. +// To commit the consumed offsets along with the transaction pass the +// list of consumed partitions and the last offset processed + 1 to +// `SendOffsetsToTransaction()` prior to committing the transaction. +// This allows an aborted transaction to be restarted using the previously +// committed offsets. +// +// To commit the produced messages, and any consumed offsets, to the +// current transaction, call `CommitTransaction()`. +// This call will block until the transaction has been fully committed or +// failed (typically due to fencing by a newer producer instance). +// +// Alternatively, if processing fails, or an abortable transaction error is +// raised, the transaction needs to be aborted by calling +// `AbortTransaction()` which marks any produced messages and +// offset commits as aborted. +// +// After the current transaction has been committed or aborted a new +// transaction may be started by calling `BeginTransaction()` again. +// +// Retriable errors: +// Some error cases allow the attempted operation to be retried, this is +// indicated by the error object having the retriable flag set which can +// be detected by calling `err.(kafka.Error).IsRetriable()`. +// When this flag is set the application may retry the operation immediately +// or preferably after a shorter grace period (to avoid busy-looping). +// Retriable errors include timeouts, broker transport failures, etc. +// +// Abortable errors: +// An ongoing transaction may fail permanently due to various errors, +// such as transaction coordinator becoming unavailable, write failures to the +// Apache Kafka log, under-replicated partitions, etc. +// At this point the producer application must abort the current transaction +// using `AbortTransaction()` and optionally start a new transaction +// by calling `BeginTransaction()`. +// Whether an error is abortable or not is detected by calling +// `err.(kafka.Error).TxnRequiresAbort()` on the returned error object. +// +// Fatal errors: +// While the underlying idempotent producer will typically only raise +// fatal errors for unrecoverable cluster errors where the idempotency +// guarantees can't be maintained, most of these are treated as abortable by +// the transactional producer since transactions may be aborted and retried +// in their entirety; +// The transactional producer on the other hand introduces a set of additional +// fatal errors which the application needs to handle by shutting down the +// producer and terminate. There is no way for a producer instance to recover +// from fatal errors. +// Whether an error is fatal or not is detected by calling +// `err.(kafka.Error).IsFatal()` on the returned error object or by checking +// the global `GetFatalError()`. +// +// Handling of other errors: +// For errors that have neither retriable, abortable or the fatal flag set +// it is not always obvious how to handle them. While some of these errors +// may be indicative of bugs in the application code, such as when +// an invalid parameter is passed to a method, other errors might originate +// from the broker and be passed thru as-is to the application. +// The general recommendation is to treat these errors, that have +// neither the retriable or abortable flags set, as fatal. +// +// Error handling example: +// +// retry: +// +// err := producer.CommitTransaction(...) +// if err == nil { +// return nil +// } else if err.(kafka.Error).TxnRequiresAbort() { +// do_abort_transaction_and_reset_inputs() +// } else if err.(kafka.Error).IsRetriable() { +// goto retry +// } else { // treat all other errors as fatal errors +// panic(err) +// } +// +// # Events +// +// Apart from emitting messages and delivery reports the client also communicates +// with the application through a number of different event types. +// An application may choose to handle or ignore these events. +// +// # Consumer events +// +// * `*kafka.Message` - a fetched message. +// +// * `AssignedPartitions` - The assigned partition set for this client following a rebalance. +// Requires `go.application.rebalance.enable` +// +// * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance. +// `AssignedPartitions` and `RevokedPartitions` are symmetrical. +// Requires `go.application.rebalance.enable` +// +// * `PartitionEOF` - Consumer has reached the end of a partition. +// NOTE: The consumer will keep trying to fetch new messages for the partition. +// +// * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled). +// +// # Producer events +// +// * `*kafka.Message` - delivery report for produced message. +// Check `.TopicPartition.Error` for delivery result. +// +// # Generic events for both Consumer and Producer +// +// * `KafkaError` - client (error codes are prefixed with _) or broker error. +// These errors are normally just informational since the +// client will try its best to automatically recover (eventually). +// +// * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required. +// This event only occurs with sasl.mechanism=OAUTHBEARER. +// Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient +// instance when a successful token retrieval is completed, otherwise be sure to +// invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or +// if setting the token failed, which could happen if an extension doesn't meet +// the required regular expression); invoking SetOAuthBearerTokenFailure() will +// schedule a new event for 10 seconds later so another retrieval can be attempted. +// +// Hint: If your application registers a signal notification +// (signal.Notify) makes sure the signals channel is buffered to avoid +// possible complications with blocking Poll() calls. +// +// Note: The Confluent Kafka Go client is safe for concurrent use. +package kafka + +import ( + "fmt" + "unsafe" + + // Make sure librdkafka_vendor/ sub-directory is included in vendor pulls. + _ "github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor" +) + +/* +#include +#include +#include "select_rdkafka.h" + +static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { + return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; +} + +static const rd_kafka_group_result_t * +group_result_by_idx (const rd_kafka_group_result_t **groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return groups[idx]; +} +*/ +import "C" + +// PartitionAny represents any partition (for partitioning), +// or unspecified value (for all other cases) +const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA) + +// TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset. +type TopicPartition struct { + Topic *string + Partition int32 + Offset Offset + Metadata *string + Error error + LeaderEpoch *int32 // LeaderEpoch or nil if not available +} + +func (p TopicPartition) String() string { + topic := "" + if p.Topic != nil { + topic = *p.Topic + } + if p.Error != nil { + return fmt.Sprintf("%s[%d]@%s(%s)", + topic, p.Partition, p.Offset, p.Error) + } + return fmt.Sprintf("%s[%d]@%s", + topic, p.Partition, p.Offset) +} + +// TopicPartitions is a slice of TopicPartitions that also implements +// the sort interface +type TopicPartitions []TopicPartition + +func (tps TopicPartitions) Len() int { + return len(tps) +} + +func (tps TopicPartitions) Less(i, j int) bool { + if *tps[i].Topic < *tps[j].Topic { + return true + } else if *tps[i].Topic > *tps[j].Topic { + return false + } + return tps[i].Partition < tps[j].Partition +} + +func (tps TopicPartitions) Swap(i, j int) { + tps[i], tps[j] = tps[j], tps[i] +} + +// Node represents a Kafka broker. +type Node struct { + // Node id. + ID int + // Node host. + Host string + // Node port. + Port int + // Node rack (may be nil) + Rack *string +} + +func (n Node) String() string { + return fmt.Sprintf("[%s:%d]/%d", n.Host, n.Port, n.ID) +} + +// UUID Kafka UUID representation +type UUID struct { + // Most Significant Bits. + mostSignificantBits int64 + // Least Significant Bits. + leastSignificantBits int64 + // Base64 representation + base64str string +} + +// Base64 string representation of the UUID +func (uuid UUID) String() string { + return uuid.base64str +} + +// GetMostSignificantBits returns Most Significant 64 bits of the 128 bits UUID +func (uuid UUID) GetMostSignificantBits() int64 { + return uuid.mostSignificantBits +} + +// GetLeastSignificantBits returns Least Significant 64 bits of the 128 bits UUID +func (uuid UUID) GetLeastSignificantBits() int64 { + return uuid.leastSignificantBits +} + +// ConsumerGroupTopicPartitions represents a consumer group's TopicPartitions. +type ConsumerGroupTopicPartitions struct { + // Group name + Group string + // Partitions list + Partitions []TopicPartition +} + +func (gtp ConsumerGroupTopicPartitions) String() string { + res := gtp.Group + res += "[ " + for _, tp := range gtp.Partitions { + res += tp.String() + " " + } + res += "]" + return res +} + +// new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t +// from a TopicPartition array. +func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) { + cparts = C.rd_kafka_topic_partition_list_new(C.int(len(partitions))) + for _, part := range partitions { + ctopic := C.CString(*part.Topic) + defer C.free(unsafe.Pointer(ctopic)) + rktpar := C.rd_kafka_topic_partition_list_add(cparts, ctopic, C.int32_t(part.Partition)) + rktpar.offset = C.int64_t(part.Offset) + + if part.Metadata != nil { + cmetadata := C.CString(*part.Metadata) + rktpar.metadata = unsafe.Pointer(cmetadata) + rktpar.metadata_size = C.size_t(len(*part.Metadata)) + } + + if part.LeaderEpoch != nil { + cLeaderEpoch := C.int32_t(*part.LeaderEpoch) + C.rd_kafka_topic_partition_set_leader_epoch(rktpar, cLeaderEpoch) + } + } + + return cparts +} + +func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kafka_topic_partition_t) { + + topic := C.GoString(crktpar.topic) + partition.Topic = &topic + partition.Partition = int32(crktpar.partition) + partition.Offset = Offset(crktpar.offset) + if crktpar.metadata_size > 0 { + size := C.int(crktpar.metadata_size) + cstr := (*C.char)(unsafe.Pointer(crktpar.metadata)) + metadata := C.GoStringN(cstr, size) + partition.Metadata = &metadata + } + if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR { + partition.Error = newError(crktpar.err) + } + + cLeaderEpoch := int32(C.rd_kafka_topic_partition_get_leader_epoch(crktpar)) + if cLeaderEpoch >= 0 { + partition.LeaderEpoch = &cLeaderEpoch + } +} + +func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) { + + partcnt := int(cparts.cnt) + + partitions = make([]TopicPartition, partcnt) + for i := 0; i < partcnt; i++ { + crktpar := C._c_rdkafka_topic_partition_list_entry(cparts, C.int(i)) + setupTopicPartitionFromCrktpar(&partitions[i], crktpar) + } + + return partitions +} + +// cToConsumerGroupTopicPartitions converts a C rd_kafka_group_result_t array to a +// ConsumerGroupTopicPartitions slice. +func (a *AdminClient) cToConsumerGroupTopicPartitions( + cGroupResults **C.rd_kafka_group_result_t, + cGroupCount C.size_t) (result []ConsumerGroupTopicPartitions) { + result = make([]ConsumerGroupTopicPartitions, uint(cGroupCount)) + + for i := uint(0); i < uint(cGroupCount); i++ { + cGroupResult := C.group_result_by_idx(cGroupResults, cGroupCount, C.size_t(i)) + cGroupPartitions := C.rd_kafka_group_result_partitions(cGroupResult) + result[i] = ConsumerGroupTopicPartitions{ + Group: C.GoString(C.rd_kafka_group_result_name(cGroupResult)), + Partitions: newTopicPartitionsFromCparts(cGroupPartitions), + } + } + return +} + +// LibraryVersion returns the underlying librdkafka library version as a +// (version_int, version_str) tuple. +func LibraryVersion() (int, string) { + ver := (int)(C.rd_kafka_version()) + verstr := C.GoString(C.rd_kafka_version_str()) + return ver, verstr +} + +// setSaslCredentials sets the SASL credentials used for the specified Kafka client. +// The new credentials will overwrite the old ones (which were set when creating the +// client or by a previous call to setSaslCredentials). The new credentials will be +// used the next time the client needs to establish a connection to the broker. This +// function will *not* break existing broker connections that were established with the +// old credentials. This method applies only to the SASL PLAIN and SCRAM mechanisms. +func setSaslCredentials(rk *C.rd_kafka_t, username, password string) error { + cUsername := C.CString(username) + defer C.free(unsafe.Pointer(cUsername)) + cPassword := C.CString(password) + defer C.free(unsafe.Pointer(cPassword)) + + if err := C.rd_kafka_sasl_set_credentials(rk, cUsername, cPassword); err != nil { + return newErrorFromCErrorDestroy(err) + } + + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/.gitignore b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/.gitignore new file mode 100644 index 000000000..a2f65a763 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/.gitignore @@ -0,0 +1,3 @@ +*.tar.gz +*.tgz +tmp* diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt new file mode 100644 index 000000000..ed8921491 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt @@ -0,0 +1,393 @@ +LICENSE +-------------------------------------------------------------- +librdkafka - Apache Kafka C driver library + +Copyright (c) 2012-2022, Magnus Edenhill + 2023, Confluent Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +LICENSE.cjson +-------------------------------------------------------------- +For cJSON.c and cJSON.h: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +LICENSE.crc32c +-------------------------------------------------------------- +# For src/crc32c.c copied (with modifications) from +# http://stackoverflow.com/a/17646775/1821055 + +/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction + * Copyright (C) 2013 Mark Adler + * Version 1.1 1 Aug 2013 Mark Adler + */ + +/* + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Mark Adler + madler@alumni.caltech.edu + */ + + +LICENSE.fnv1a +-------------------------------------------------------------- +parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c + + +Please do not copyright this code. This code is in the public domain. + +LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO +EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + +By: + chongo /\oo/\ + http://www.isthe.com/chongo/ + +Share and Enjoy! :-) + + +LICENSE.hdrhistogram +-------------------------------------------------------------- +This license covers src/rdhdrhistogram.c which is a C port of +Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram +at revision 3a0bb77429bd3a61596f5e8a3172445844342120 + +----------------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2014 Coda Hale + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE + + +LICENSE.lz4 +-------------------------------------------------------------- +src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3 + +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +LICENSE.murmur2 +-------------------------------------------------------------- +parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git + + +MurMurHash2 Library +//----------------------------------------------------------------------------- +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +LICENSE.pycrc +-------------------------------------------------------------- +The following license applies to the files rdcrc32.c and rdcrc32.h which +have been generated by the pycrc tool. +============================================================================ + +Copyright (c) 2006-2012, Thomas Pircher + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +LICENSE.queue +-------------------------------------------------------------- +For sys/queue.h: + + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + * $FreeBSD$ + +LICENSE.regexp +-------------------------------------------------------------- +regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684 + +" +These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution. +" + + +LICENSE.snappy +-------------------------------------------------------------- +###################################################################### +# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h # +# originally retrieved from http://github.com/andikleen/snappy-c # +# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 # +###################################################################### + +The snappy-c code is under the same license as the original snappy source + +Copyright 2011 Intel Corporation All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Intel Corporation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +LICENSE.tinycthread +-------------------------------------------------------------- +From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9 + +License +------- + +Copyright (c) 2012 Marcus Geelnard + 2013-2014 Evan Nemerson + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. + + +LICENSE.wingetopt +-------------------------------------------------------------- +For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt + +/* + * Copyright (c) 2002 Todd C. Miller + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Sponsored in part by the Defense Advanced Research Projects + * Agency (DARPA) and Air Force Research Laboratory, Air Force + * Materiel Command, USAF, under agreement number F39502-99-1-0512. + */ +/*- + * Copyright (c) 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Dieter Baron and Thomas Klausner. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md new file mode 100644 index 000000000..cdbaf533e --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md @@ -0,0 +1,25 @@ +# Bundling prebuilt librdkafka + +confluent-kafka-go bundles prebuilt statically linked +versions of librdkafka for the following platforms: + + * MacOSX x64, arm64 (aka Darwin) + * Linux glibc x64, arm64 (Ubuntu, CentOS, etc) + * Linux musl x64, arm64 (Alpine) + * Windows x64 + +## Import static librdkafka bundle + +First create the static librdkafka bundle following the instructions in +librdkafka's packaging/nuget/README.md. + +Then import the new version by using the import.sh script here, this script +will create a branch, import the bundle, create a commit and push the +branch to Github for PR review. This PR must be manually opened, reviewed +and then finally merged (make sure to merge it, DO NOT squash or rebase). + + $ ./import.sh ~/path/to/librdkafka-static-bundle-v1.4.0.tgz + +This will copy the static library and the rdkafka.h header file +to this directory, as well as generate a new ../build_..go file +for this platform + variant. diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/bundle-import.sh b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/bundle-import.sh new file mode 100644 index 000000000..065c6f875 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/bundle-import.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# +# Updates the bundled prebuilt librdkafka libraries to specified version. +# + +set -e + + +usage() { + echo "Usage: $0 librdkafka-static-bundle-.tgz" + echo "" + echo "This tool must be run from the TOPDIR/kafka/librdkafka_vendor directory" + exit 1 +} + + +# Parse dynamic libraries from linker command line. +# Will print a list matching -lfoo and -framework X.. +parse_dynlibs() { + local libs= + while [[ $# -gt 0 ]]; do + if [[ $1 == -l* ]]; then + libs="${libs} $1" + elif [[ $1 == -framework ]]; then + libs="${libs} $1 $2" + shift # remove one (extra) arg + fi + shift # remove one arg + done + + echo "$libs" +} + +# Parse dynamic library dependecies from pkg-config file and print +# them to stdout. +parse_pc_dynlibs() { + local pc=$1 + parse_dynlibs $(sed -n 's/^Libs: \(..*\)/\1/p' "$pc") +} + +setup_build() { + # Copies static library from the temp directory into final location, + # extracts dynamic lib list from the pkg-config file, + # and generates the build_..go file + local btype=$1 + local apath=$2 + local pc=$3 + local srcinfo=$4 + local build_tag= + local gpath="../build_${btype}.go" + local dpath="librdkafka_${btype}.a" + + if [[ $btype =~ ^glibc_linux.*$ ]]; then + build_tag="// +build !musl" + elif [[ $btype =~ ^musl_linux.*$ ]]; then + build_tag="// +build musl" + fi + + local dynlibs=$(parse_pc_dynlibs $pc) + + echo "Copying $apath to $dpath" + cp "$apath" "$dpath" + + echo "Generating $gpath (extra build tag: $build_tag)" + + cat >$gpath <.tgz" + echo "" + echo "This tool must be run from the TOPDIR/kafka/librdkafka directory" + echo "" + echo "Options:" + echo " --devel - Development use: No branch checks and does not push to github" + exit 1 +} + +error_cleanup() { + echo "Error occurred, cleaning up" + git checkout $currbranch + git branch -D $import_branch + exit 1 +} + +devel=0 +if [[ $1 == --devel ]]; then + devel=1 + shift +fi + +bundle="$1" +[[ -f $bundle ]] || usage + +# Parse the librdkafka version from the bundle +bundlename=$(basename $bundle) +version=${bundlename#librdkafka-static-bundle-} +version=${version%.tgz} + +if [[ -z $version ]]; then + echo "Error: Could not parse version from bundle $bundle" + exit 1 +fi + +# Verify branch state +curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-) +uncommitted=$(git status --untracked-files=no --porcelain) + +if [[ ! -z $uncommitted ]]; then + echo "Error: This script must be run on a clean branch with no uncommitted changes" + echo "Uncommitted files:" + echo "$uncommitted" + exit 1 +fi + +if [[ $devel != 1 ]] && [[ $curr_branch != master ]] ; then + echo "Error: This script must be run on an up-to-date, clean, master branch" + exit 1 +fi + + +# Create import branch, import bundle, commit. +import_branch="import_$version" + +exists=$(git branch -rlq | grep "/$import_branch\$" || true) +if [[ ! -z $exists ]]; then + echo "Error: This version branch already seems to exist: $exists: already imorted?" + [[ $devel != 1 ]] && exit 1 +fi + +echo "Checking for existing commits that match this version (should be none)" +git log --oneline | grep "^librdkafka static bundle $version\$" && exit 1 + + +echo "Creating import branch $import_branch" +git checkout -b $import_branch + +echo "Importing bundle $bundle" +./bundle-import.sh "$bundle" || error_cleanup + +echo "Committing $version" +git commit -a -m "librdkafka static bundle $version" || error_cleanup + +echo "Updating error codes and docs" +pushd ../../ +make -f mk/Makefile docs || error_cleanup +git commit -a -m "Documentation and error code update for librdkafka $version" \ + || error_cleanup +popd + +if [[ $devel != 1 ]]; then + echo "Pushing branch" + git push origin $import_branch || error_cleanup +fi + +git checkout $curr_branch + +if [[ $devel != 1 ]]; then + git branch -D $import_branch +fi + +echo "" +echo "############## IMPORT OF $version COMPLETE ##############" +if [[ $devel != 1 ]]; then + echo "Branch $import_branch has been pushed." + echo "Create a PR, have it reviewed and then merge it (do NOT squash or rebase)." +fi + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka.go new file mode 100644 index 000000000..52e1ad769 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka.go @@ -0,0 +1,21 @@ +/** + * Copyright 2020 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package librdkafka + +// LibrdkafkaGoSubdir is a dummy variable needed to export something so the +// file is not empty. +var LibrdkafkaGoSubdir = true diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a new file mode 100644 index 000000000..6ef4ca70d Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a new file mode 100644 index 000000000..1056fd40d Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_amd64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_amd64.a new file mode 100644 index 000000000..60cadd825 Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_amd64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_arm64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_arm64.a new file mode 100644 index 000000000..cbf26d0f4 Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_arm64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_amd64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_amd64.a new file mode 100644 index 000000000..1e5b3002c Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_amd64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_arm64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_arm64.a new file mode 100644 index 000000000..4e2c03ead Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_arm64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_windows.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_windows.a new file mode 100644 index 000000000..41dc68ddd Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_windows.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h new file mode 100644 index 000000000..de620284f --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h @@ -0,0 +1,10337 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file rdkafka.h + * @brief Apache Kafka C/C++ consumer and producer client library. + * + * rdkafka.h contains the public API for librdkafka. + * The API is documented in this file as comments prefixing the function, type, + * enum, define, etc. + * + * @sa For the C++ interface see rdkafkacpp.h + * + * @tableofcontents + */ + + +/* @cond NO_DOC */ +#ifndef _RDKAFKA_H_ +#define _RDKAFKA_H_ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* Restore indent */ +#endif +#endif + +#ifdef _WIN32 +#include +#ifndef WIN32_MEAN_AND_LEAN +#define WIN32_MEAN_AND_LEAN +#endif +#include /* for sockaddr, .. */ +#ifndef _SSIZE_T_DEFINED +#define _SSIZE_T_DEFINED +typedef SSIZE_T ssize_t; +#endif +#define RD_UNUSED +#define RD_INLINE __inline +#define RD_DEPRECATED __declspec(deprecated) +#define RD_FORMAT(...) +#undef RD_EXPORT +#ifdef LIBRDKAFKA_STATICLIB +#define RD_EXPORT +#else +#ifdef LIBRDKAFKA_EXPORTS +#define RD_EXPORT __declspec(dllexport) +#else +#define RD_EXPORT __declspec(dllimport) +#endif +#ifndef LIBRDKAFKA_TYPECHECKS +#define LIBRDKAFKA_TYPECHECKS 0 +#endif +#endif + +#else +#include /* for sockaddr, .. */ + +#define RD_UNUSED __attribute__((unused)) +#define RD_INLINE inline +#define RD_EXPORT +#define RD_DEPRECATED __attribute__((deprecated)) + +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) +#define RD_HAS_STATEMENT_EXPRESSIONS +#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) +#else +#define RD_FORMAT(...) +#endif + +#ifndef LIBRDKAFKA_TYPECHECKS +#define LIBRDKAFKA_TYPECHECKS 1 +#endif +#endif + + +/** + * @brief Type-checking macros + * Compile-time checking that \p ARG is of type \p TYPE. + * @returns \p RET + */ +#if LIBRDKAFKA_TYPECHECKS +#define _LRK_TYPECHECK(RET, TYPE, ARG) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + TYPE3 __t3 RD_UNUSED = (ARG3); \ + } \ + RET; \ + }) +#else +#define _LRK_TYPECHECK(RET, TYPE, ARG) (RET) +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) (RET) +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) (RET) +#endif + +/* @endcond */ + + +/** + * @name librdkafka version + * @{ + * + * + */ + +/** + * @brief librdkafka version + * + * Interpreted as hex \c MM.mm.rr.xx: + * - MM = Major + * - mm = minor + * - rr = revision + * - xx = pre-release id (0xff is the final release) + * + * E.g.: \c 0x000801ff = 0.8.1 + * + * @remark This value should only be used during compile time, + * for runtime checks of version use rd_kafka_version() + */ +#define RD_KAFKA_VERSION 0x020300ff + +/** + * @brief Returns the librdkafka version as integer. + * + * @returns Version integer. + * + * @sa See RD_KAFKA_VERSION for how to parse the integer format. + * @sa Use rd_kafka_version_str() to retreive the version as a string. + */ +RD_EXPORT +int rd_kafka_version(void); + +/** + * @brief Returns the librdkafka version as string. + * + * @returns Version string + */ +RD_EXPORT +const char *rd_kafka_version_str(void); + +/**@}*/ + + +/** + * @name Constants, errors, types + * @{ + * + * + */ + + +/** + * @enum rd_kafka_type_t + * + * @brief rd_kafka_t handle type. + * + * @sa rd_kafka_new() + */ +typedef enum rd_kafka_type_t { + RD_KAFKA_PRODUCER, /**< Producer client */ + RD_KAFKA_CONSUMER /**< Consumer client */ +} rd_kafka_type_t; + + +/*! + * Timestamp types + * + * @sa rd_kafka_message_timestamp() + */ +typedef enum rd_kafka_timestamp_type_t { + RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ + RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ + RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ +} rd_kafka_timestamp_type_t; + + + +/** + * @brief Retrieve supported debug contexts for use with the \c \"debug\" + * configuration property. (runtime) + * + * @returns Comma-separated list of available debugging contexts. + */ +RD_EXPORT +const char *rd_kafka_get_debug_contexts(void); + +/** + * @brief Supported debug contexts. (compile time) + * + * @deprecated This compile time value may be outdated at runtime due to + * linking another version of the library. + * Use rd_kafka_get_debug_contexts() instead. + */ +#define RD_KAFKA_DEBUG_CONTEXTS \ + "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp," \ + "security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor," \ + "conf" + + +/* @cond NO_DOC */ +/* Private types to provide ABI compatibility */ +typedef struct rd_kafka_s rd_kafka_t; +typedef struct rd_kafka_topic_s rd_kafka_topic_t; +typedef struct rd_kafka_conf_s rd_kafka_conf_t; +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; +typedef struct rd_kafka_queue_s rd_kafka_queue_t; +typedef struct rd_kafka_op_s rd_kafka_event_t; +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t; +typedef struct rd_kafka_consumer_group_metadata_s + rd_kafka_consumer_group_metadata_t; +typedef struct rd_kafka_error_s rd_kafka_error_t; +typedef struct rd_kafka_headers_s rd_kafka_headers_t; +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t; +typedef struct rd_kafka_Uuid_s rd_kafka_Uuid_t; +/* @endcond */ + + +/** + * @enum rd_kafka_resp_err_t + * @brief Error codes. + * + * The negative error codes delimited by two underscores + * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are + * displayed as \c \"Local: \\", while the error codes + * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker + * errors and are displayed as \c \"Broker: \\". + * + * @sa Use rd_kafka_err2str() to translate an error code a human readable string + */ +typedef enum { + /* Internal errors to rdkafka: */ + /** Begin internal error codes */ + RD_KAFKA_RESP_ERR__BEGIN = -200, + /** Received message is incorrect */ + RD_KAFKA_RESP_ERR__BAD_MSG = -199, + /** Bad/unknown compression */ + RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, + /** Broker is going away */ + RD_KAFKA_RESP_ERR__DESTROY = -197, + /** Generic failure */ + RD_KAFKA_RESP_ERR__FAIL = -196, + /** Broker transport failure */ + RD_KAFKA_RESP_ERR__TRANSPORT = -195, + /** Critical system resource */ + RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, + /** Failed to resolve broker */ + RD_KAFKA_RESP_ERR__RESOLVE = -193, + /** Produced message timed out*/ + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, + /** Reached the end of the topic+partition queue on + * the broker. Not really an error. + * This event is disabled by default, + * see the `enable.partition.eof` configuration property. */ + RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, + /** Permanent: Partition does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, + /** File or filesystem error */ + RD_KAFKA_RESP_ERR__FS = -189, + /** Permanent: Topic does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, + /** All broker connections are down. */ + RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, + /** Invalid argument, or invalid configuration */ + RD_KAFKA_RESP_ERR__INVALID_ARG = -186, + /** Operation timed out */ + RD_KAFKA_RESP_ERR__TIMED_OUT = -185, + /** Queue is full */ + RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, + /** ISR count < required.acks */ + RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, + /** Broker node update */ + RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, + /** SSL error */ + RD_KAFKA_RESP_ERR__SSL = -181, + /** Waiting for coordinator to become available. */ + RD_KAFKA_RESP_ERR__WAIT_COORD = -180, + /** Unknown client group */ + RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, + /** Operation in progress */ + RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, + /** Previous operation in progress, wait for it to finish. */ + RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, + /** This operation would interfere with an existing subscription */ + RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, + /** Assigned partitions (rebalance_cb) */ + RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, + /** Revoked partitions (rebalance_cb) */ + RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, + /** Conflicting use */ + RD_KAFKA_RESP_ERR__CONFLICT = -173, + /** Wrong state */ + RD_KAFKA_RESP_ERR__STATE = -172, + /** Unknown protocol */ + RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, + /** Not implemented */ + RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, + /** Authentication failure*/ + RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, + /** No stored offset */ + RD_KAFKA_RESP_ERR__NO_OFFSET = -168, + /** Outdated */ + RD_KAFKA_RESP_ERR__OUTDATED = -167, + /** Timed out in queue */ + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, + /** Feature not supported by broker */ + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, + /** Awaiting cache update */ + RD_KAFKA_RESP_ERR__WAIT_CACHE = -164, + /** Operation interrupted (e.g., due to yield)) */ + RD_KAFKA_RESP_ERR__INTR = -163, + /** Key serialization error */ + RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162, + /** Value serialization error */ + RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161, + /** Key deserialization error */ + RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160, + /** Value deserialization error */ + RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159, + /** Partial response */ + RD_KAFKA_RESP_ERR__PARTIAL = -158, + /** Modification attempted on read-only object */ + RD_KAFKA_RESP_ERR__READ_ONLY = -157, + /** No such entry / item not found */ + RD_KAFKA_RESP_ERR__NOENT = -156, + /** Read underflow */ + RD_KAFKA_RESP_ERR__UNDERFLOW = -155, + /** Invalid type */ + RD_KAFKA_RESP_ERR__INVALID_TYPE = -154, + /** Retry operation */ + RD_KAFKA_RESP_ERR__RETRY = -153, + /** Purged in queue */ + RD_KAFKA_RESP_ERR__PURGE_QUEUE = -152, + /** Purged in flight */ + RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = -151, + /** Fatal error: see rd_kafka_fatal_error() */ + RD_KAFKA_RESP_ERR__FATAL = -150, + /** Inconsistent state */ + RD_KAFKA_RESP_ERR__INCONSISTENT = -149, + /** Gap-less ordering would not be guaranteed if proceeding */ + RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148, + /** Maximum poll interval exceeded */ + RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147, + /** Unknown broker */ + RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146, + /** Functionality not configured */ + RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145, + /** Instance has been fenced */ + RD_KAFKA_RESP_ERR__FENCED = -144, + /** Application generated error */ + RD_KAFKA_RESP_ERR__APPLICATION = -143, + /** Assignment lost */ + RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142, + /** No operation performed */ + RD_KAFKA_RESP_ERR__NOOP = -141, + /** No offset to automatically reset to */ + RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, + /** Partition log truncation detected */ + RD_KAFKA_RESP_ERR__LOG_TRUNCATION = -139, + + /** End internal error codes */ + RD_KAFKA_RESP_ERR__END = -100, + + /* Kafka broker errors: */ + /** Unknown broker error */ + RD_KAFKA_RESP_ERR_UNKNOWN = -1, + /** Success */ + RD_KAFKA_RESP_ERR_NO_ERROR = 0, + /** Offset out of range */ + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, + /** Invalid message */ + RD_KAFKA_RESP_ERR_INVALID_MSG = 2, + /** Unknown topic or partition */ + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, + /** Invalid message size */ + RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, + /** Leader not available */ + RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, +/** Not leader for partition */ +#define RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER \ + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, + /** Request timed out */ + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, + /** Broker not available */ + RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, + /** Replica not available */ + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, + /** Message size too large */ + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, + /** StaleControllerEpochCode */ + RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, + /** Offset metadata string too large */ + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, + /** Broker disconnected before response received */ + RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, + /** Coordinator load in progress */ + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, +/** Group coordinator load in progress */ +#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS + /** Coordinator not available */ + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15, +/** Group coordinator not available */ +#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE + /** Not coordinator */ + RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16, +/** Not coordinator for group */ +#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ + RD_KAFKA_RESP_ERR_NOT_COORDINATOR + /** Invalid topic */ + RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, + /** Message batch larger than configured server segment size */ + RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, + /** Not enough in-sync replicas */ + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, + /** Message(s) written to insufficient number of in-sync replicas */ + RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, + /** Invalid required acks value */ + RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, + /** Specified group generation id is not valid */ + RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, + /** Inconsistent group protocol */ + RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, + /** Invalid group.id */ + RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, + /** Unknown member */ + RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, + /** Invalid session timeout */ + RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, + /** Group rebalance in progress */ + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, + /** Commit offset data size is not valid */ + RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, + /** Topic authorization failed */ + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, + /** Group authorization failed */ + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, + /** Cluster authorization failed */ + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, + /** Invalid timestamp */ + RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, + /** Unsupported SASL mechanism */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, + /** Illegal SASL state */ + RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, + /** Unuspported version */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, + /** Topic already exists */ + RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, + /** Invalid number of partitions */ + RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, + /** Invalid replication factor */ + RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, + /** Invalid replica assignment */ + RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, + /** Invalid config */ + RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, + /** Not controller for cluster */ + RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, + /** Invalid request */ + RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, + /** Message format on broker does not support request */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, + /** Policy violation */ + RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44, + /** Broker received an out of order sequence number */ + RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, + /** Broker received a duplicate sequence number */ + RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46, + /** Producer attempted an operation with an old epoch */ + RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47, + /** Producer attempted a transactional operation in an invalid state */ + RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48, + /** Producer attempted to use a producer id which is not + * currently assigned to its transactional id */ + RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49, + /** Transaction timeout is larger than the maximum + * value allowed by the broker's max.transaction.timeout.ms */ + RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50, + /** Producer attempted to update a transaction while another + * concurrent operation on the same transaction was ongoing */ + RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51, + /** Indicates that the transaction coordinator sending a + * WriteTxnMarker is no longer the current coordinator for a + * given producer */ + RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52, + /** Transactional Id authorization failed */ + RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, + /** Security features are disabled */ + RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54, + /** Operation not attempted */ + RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55, + /** Disk error when trying to access log file on the disk */ + RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56, + /** The user-specified log directory is not found in the broker config + */ + RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57, + /** SASL Authentication failed */ + RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58, + /** Unknown Producer Id */ + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59, + /** Partition reassignment is in progress */ + RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60, + /** Delegation Token feature is not enabled */ + RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, + /** Delegation Token is not found on server */ + RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62, + /** Specified Principal is not valid Owner/Renewer */ + RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, + /** Delegation Token requests are not allowed on this connection */ + RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, + /** Delegation Token authorization failed */ + RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, + /** Delegation Token is expired */ + RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66, + /** Supplied principalType is not supported */ + RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67, + /** The group is not empty */ + RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68, + /** The group id does not exist */ + RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69, + /** The fetch session ID was not found */ + RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70, + /** The fetch session epoch is invalid */ + RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71, + /** No matching listener */ + RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72, + /** Topic deletion is disabled */ + RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73, + /** Leader epoch is older than broker epoch */ + RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74, + /** Leader epoch is newer than broker epoch */ + RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75, + /** Unsupported compression type */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, + /** Broker epoch has changed */ + RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77, + /** Leader high watermark is not caught up */ + RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78, + /** Group member needs a valid member ID */ + RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79, + /** Preferred leader was not available */ + RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, + /** Consumer group has reached maximum size */ + RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81, + /** Static consumer fenced by other consumer with same + * group.instance.id. */ + RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82, + /** Eligible partition leaders are not available */ + RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, + /** Leader election not needed for topic partition */ + RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84, + /** No partition reassignment is in progress */ + RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, + /** Deleting offsets of a topic while the consumer group is + * subscribed to it */ + RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, + /** Broker failed to validate record */ + RD_KAFKA_RESP_ERR_INVALID_RECORD = 87, + /** There are unstable offsets that need to be cleared */ + RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88, + /** Throttling quota has been exceeded */ + RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89, + /** There is a newer producer with the same transactionalId + * which fences the current one */ + RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90, + /** Request illegally referred to resource that does not exist */ + RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91, + /** Request illegally referred to the same resource twice */ + RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92, + /** Requested credential would not meet criteria for acceptability */ + RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93, + /** Indicates that the either the sender or recipient of a + * voter-only request is not one of the expected voters */ + RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94, + /** Invalid update version */ + RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95, + /** Unable to update finalized features due to server error */ + RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96, + /** Request principal deserialization failed during forwarding */ + RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97, + + RD_KAFKA_RESP_ERR_END_ALL, +} rd_kafka_resp_err_t; + + +/** + * @brief Error code value, name and description. + * Typically for use with language bindings to automatically expose + * the full set of librdkafka error codes. + */ +struct rd_kafka_err_desc { + rd_kafka_resp_err_t code; /**< Error code */ + const char *name; /**< Error name, same as code enum sans prefix */ + const char *desc; /**< Human readable error description. */ +}; + + +/** + * @brief Returns the full list of error codes. + */ +RD_EXPORT +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, + size_t *cntp); + + + +/** + * @brief Returns a human readable representation of a kafka error. + * + * @param err Error code to translate + */ +RD_EXPORT +const char *rd_kafka_err2str(rd_kafka_resp_err_t err); + + + +/** + * @brief Returns the error code name (enum name). + * + * @param err Error code to translate + */ +RD_EXPORT +const char *rd_kafka_err2name(rd_kafka_resp_err_t err); + + +/** + * @brief Returns the last error code generated by a legacy API call + * in the current thread. + * + * The legacy APIs are the ones using errno to propagate error value, namely: + * - rd_kafka_topic_new() + * - rd_kafka_consume_start() + * - rd_kafka_consume_stop() + * - rd_kafka_consume() + * - rd_kafka_consume_batch() + * - rd_kafka_consume_callback() + * - rd_kafka_consume_queue() + * - rd_kafka_produce() + * + * The main use for this function is to avoid converting system \p errno + * values to rd_kafka_resp_err_t codes for legacy APIs. + * + * @remark The last error is stored per-thread, if multiple rd_kafka_t handles + * are used in the same application thread the developer needs to + * make sure rd_kafka_last_error() is called immediately after + * a failed API call. + * + * @remark errno propagation from librdkafka is not safe on Windows + * and should not be used, use rd_kafka_last_error() instead. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_last_error(void); + + +/** + * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t + * error code upon failure from the following functions: + * - rd_kafka_topic_new() + * - rd_kafka_consume_start() + * - rd_kafka_consume_stop() + * - rd_kafka_consume() + * - rd_kafka_consume_batch() + * - rd_kafka_consume_callback() + * - rd_kafka_consume_queue() + * - rd_kafka_produce() + * + * @param errnox System errno value to convert + * + * @returns Appropriate error code for \p errnox + * + * @remark A better alternative is to call rd_kafka_last_error() immediately + * after any of the above functions return -1 or NULL. + * + * @deprecated Use rd_kafka_last_error() to retrieve the last error code + * set by the legacy librdkafka APIs. + * + * @sa rd_kafka_last_error() + */ +RD_EXPORT RD_DEPRECATED rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); + + +/** + * @brief Returns the thread-local system errno + * + * On most platforms this is the same as \p errno but in case of different + * runtimes between library and application (e.g., Windows static DLLs) + * this provides a means for exposing the errno librdkafka uses. + * + * @remark The value is local to the current calling thread. + * + * @deprecated Use rd_kafka_last_error() to retrieve the last error code + * set by the legacy librdkafka APIs. + */ +RD_EXPORT RD_DEPRECATED int rd_kafka_errno(void); + + + +/** + * @brief Returns the first fatal error set on this client instance, + * or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred. + * + * This function is to be used with the Idempotent Producer and \c error_cb + * to detect fatal errors. + * + * Generally all errors raised by \c error_cb are to be considered + * informational and temporary, the client will try to recover from all + * errors in a graceful fashion (by retrying, etc). + * + * However, some errors should logically be considered fatal to retain + * consistency; in particular a set of errors that may occur when using the + * Idempotent Producer and the in-order or exactly-once producer guarantees + * can't be satisfied. + * + * @param rk Client instance. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written to if there is a fatal error. + * @param errstr_size Writable size in \p errstr. + * + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else + * any other error code. + */ +RD_EXPORT +rd_kafka_resp_err_t +rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size); + + +/** + * @brief Trigger a fatal error for testing purposes. + * + * Since there is no practical way to trigger real fatal errors in the + * idempotent producer, this method allows an application to trigger + * fabricated fatal errors in tests to check its error handling code. + * + * @param rk Client instance. + * @param err The underlying error code. + * @param reason A human readable error reason. + * Will be prefixed with "test_fatal_error: " to differentiate + * from real fatal errors. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or + * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error + * has already been triggered. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); + + +/** + * @returns the error code for \p error or RD_KAFKA_RESP_ERR_NO_ERROR if + * \p error is NULL. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error); + +/** + * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID", + * or an empty string if \p error is NULL. + * + * @remark The lifetime of the returned pointer is the same as the error object. + * + * @sa rd_kafka_err2name() + */ +RD_EXPORT +const char *rd_kafka_error_name(const rd_kafka_error_t *error); + +/** + * @returns a human readable error string for \p error, + * or an empty string if \p error is NULL. + * + * @remark The lifetime of the returned pointer is the same as the error object. + */ +RD_EXPORT +const char *rd_kafka_error_string(const rd_kafka_error_t *error); + + +/** + * @returns 1 if the error is a fatal error, indicating that the client + * instance is no longer usable, else 0 (also if \p error is NULL). + */ +RD_EXPORT +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error); + + +/** + * @returns 1 if the operation may be retried, + * else 0 (also if \p error is NULL). + */ +RD_EXPORT +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error); + + +/** + * @returns 1 if the error is an abortable transaction error in which case + * the application must call rd_kafka_abort_transaction() and + * start a new transaction with rd_kafka_begin_transaction() if it + * wishes to proceed with transactions. + * Else returns 0 (also if \p error is NULL). + * + * @remark The return value of this method is only valid for errors returned + * by the transactional API. + */ +RD_EXPORT +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error); + +/** + * @brief Free and destroy an error object. + * + * @remark As a conveniance it is permitted to pass a NULL \p error. + */ +RD_EXPORT +void rd_kafka_error_destroy(rd_kafka_error_t *error); + + +/** + * @brief Create a new error object with error \p code and optional + * human readable error string in \p fmt. + * + * This method is mainly to be used for mocking errors in application test code. + * + * The returned object must be destroyed with rd_kafka_error_destroy(). + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); + + +/** + * @brief Topic+Partition place holder + * + * Generic place holder for a Topic+Partition and its related information + * used for multiple purposes: + * - consumer offset (see rd_kafka_commit(), et.al.) + * - group rebalancing callback (rd_kafka_conf_set_rebalance_cb()) + * - offset commit result callback (rd_kafka_conf_set_offset_commit_cb()) + */ + +/** + * @brief Generic place holder for a specific Topic+Partition. + * + * @sa rd_kafka_topic_partition_list_new() + */ +typedef struct rd_kafka_topic_partition_s { + char *topic; /**< Topic name */ + int32_t partition; /**< Partition */ + int64_t offset; /**< Offset */ + void *metadata; /**< Metadata */ + size_t metadata_size; /**< Metadata size */ + void *opaque; /**< Opaque value for application use */ + rd_kafka_resp_err_t err; /**< Error code, depending on use. */ + void *_private; /**< INTERNAL USE ONLY, + * INITIALIZE TO ZERO, DO NOT TOUCH, + * DO NOT COPY, DO NOT SHARE WITH OTHER + * rd_kafka_t INSTANCES. */ +} rd_kafka_topic_partition_t; + +/** + * @brief Destroy a rd_kafka_topic_partition_t. + * @remark This must not be called for elements in a topic partition list. + */ +RD_EXPORT +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar); + + +/** + * @brief Sets the offset leader epoch (use -1 to clear). + * + * @param rktpar Partition object. + * @param leader_epoch Offset leader epoch, use -1 to reset. + * + * @remark See KIP-320 for more information. + */ +RD_EXPORT +void rd_kafka_topic_partition_set_leader_epoch( + rd_kafka_topic_partition_t *rktpar, + int32_t leader_epoch); + +/** + * @returns the offset leader epoch, if relevant and known, + * else -1. + * + * @param rktpar Partition object. + * + * @remark See KIP-320 for more information. + */ +RD_EXPORT +int32_t rd_kafka_topic_partition_get_leader_epoch( + const rd_kafka_topic_partition_t *rktpar); + +/** + * @brief A growable list of Topic+Partitions. + * + */ +typedef struct rd_kafka_topic_partition_list_s { + int cnt; /**< Current number of elements */ + int size; /**< Current allocated size */ + rd_kafka_topic_partition_t *elems; /**< Element array[] */ +} rd_kafka_topic_partition_list_t; + +/** + * @brief Create a new list/vector Topic+Partition container. + * + * @param size Initial allocated size used when the expected number of + * elements is known or can be estimated. + * Avoids reallocation and possibly relocation of the + * elems array. + * + * @returns A newly allocated Topic+Partition list. + * + * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources + * in use by a list and the list itself. + * @sa rd_kafka_topic_partition_list_add() + */ +RD_EXPORT +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size); + +/** + * @brief Free all resources used by the list and the list itself. + */ +RD_EXPORT +void rd_kafka_topic_partition_list_destroy( + rd_kafka_topic_partition_list_t *rkparlist); + +/** + * @brief Add topic+partition to list + * + * @param rktparlist List to extend + * @param topic Topic name (copied) + * @param partition Partition id + * + * @returns The object which can be used to fill in additionals fields. + */ +RD_EXPORT +rd_kafka_topic_partition_t * +rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); + + +/** + * @brief Add range of partitions from \p start to \p stop inclusive. + * + * @param rktparlist List to extend + * @param topic Topic name (copied) + * @param start Start partition of range + * @param stop Last partition of range (inclusive) + */ +RD_EXPORT +void rd_kafka_topic_partition_list_add_range( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t start, + int32_t stop); + + + +/** + * @brief Delete partition from list. + * + * @param rktparlist List to modify + * @param topic Topic name to match + * @param partition Partition to match + * + * @returns 1 if partition was found (and removed), else 0. + * + * @remark Any held indices to elems[] are unusable after this call returns 1. + */ +RD_EXPORT +int rd_kafka_topic_partition_list_del( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); + + +/** + * @brief Delete partition from list by elems[] index. + * + * @returns 1 if partition was found (and removed), else 0. + * + * @sa rd_kafka_topic_partition_list_del() + */ +RD_EXPORT +int rd_kafka_topic_partition_list_del_by_idx( + rd_kafka_topic_partition_list_t *rktparlist, + int idx); + + +/** + * @brief Make a copy of an existing list. + * + * @param src The existing list to copy. + * + * @returns A new list fully populated to be identical to \p src + */ +RD_EXPORT +rd_kafka_topic_partition_list_t * +rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src); + + + +/** + * @brief Set offset to \p offset for \p topic and \p partition + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or + * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found + * in the list. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int64_t offset); + + + +/** + * @brief Find element by \p topic and \p partition. + * + * @returns a pointer to the first matching element, or NULL if not found. + */ +RD_EXPORT +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); + + +/** + * @brief Sort list using comparator \p cmp. + * + * If \p cmp is NULL the default comparator will be used that + * sorts by ascending topic name and partition. + * + * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp. + * + */ +RD_EXPORT void rd_kafka_topic_partition_list_sort( + rd_kafka_topic_partition_list_t *rktparlist, + int (*cmp)(const void *a, const void *b, void *cmp_opaque), + void *cmp_opaque); + + +/**@}*/ + + + +/** + * @name Var-arg tag types + * @{ + * + */ + +/** + * @enum rd_kafka_vtype_t + * + * @brief Var-arg tag types + * + * @sa rd_kafka_producev() + */ +typedef enum rd_kafka_vtype_t { + RD_KAFKA_VTYPE_END, /**< va-arg sentinel */ + RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ + RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ + RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ + RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ + RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ + RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque + * value. This is the same as + * the _private field in + * rd_kafka_message_t, also known + * as the msg_opaque. */ + RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ + RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ + RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t) + * Message Header */ + RD_KAFKA_VTYPE_HEADERS, /**< (rd_kafka_headers_t *) Headers list */ +} rd_kafka_vtype_t; + + +/** + * @brief VTYPE + argument container for use with rd_kafka_produce_va() + * + * See RD_KAFKA_V_..() macros below for which union field corresponds + * to which RD_KAFKA_VTYPE_... + */ +typedef struct rd_kafka_vu_s { + rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ + /** Value union, see RD_KAFKA_V_.. macros for which field to use. */ + union { + const char *cstr; + rd_kafka_topic_t *rkt; + int i; + int32_t i32; + int64_t i64; + struct { + void *ptr; + size_t size; + } mem; + struct { + const char *name; + const void *val; + ssize_t size; + } header; + rd_kafka_headers_t *headers; + void *ptr; + char _pad[64]; /**< Padding size for future-proofness */ + } u; +} rd_kafka_vu_t; + +/** + * @brief Convenience macros for rd_kafka_vtype_t that takes the + * correct arguments for each vtype. + */ + +/*! + * va-arg end sentinel used to terminate the variable argument list + */ +#define RD_KAFKA_V_END RD_KAFKA_VTYPE_END + +/*! + * Topic name (const char *) + * + * rd_kafka_vu_t field: u.cstr + */ +#define RD_KAFKA_V_TOPIC(topic) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ + (const char *)topic +/*! + * Topic object (rd_kafka_topic_t *) + * + * rd_kafka_vu_t field: u.rkt + */ +#define RD_KAFKA_V_RKT(rkt) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ + (rd_kafka_topic_t *)rkt +/*! + * Partition (int32_t) + * + * rd_kafka_vu_t field: u.i32 + */ +#define RD_KAFKA_V_PARTITION(partition) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ + (int32_t)partition +/*! + * Message value/payload pointer and length (void *, size_t) + * + * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size + */ +#define RD_KAFKA_V_VALUE(VALUE, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ + (void *)VALUE, (size_t)LEN +/*! + * Message key pointer and length (const void *, size_t) + * + * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size + */ +#define RD_KAFKA_V_KEY(KEY, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ + (void *)KEY, (size_t)LEN +/*! + * Message opaque pointer (void *) + * Same as \c msg_opaque, \c produce(.., msg_opaque), + * and \c rkmessage->_private . + * + * rd_kafka_vu_t field: u.ptr + */ +#define RD_KAFKA_V_OPAQUE(msg_opaque) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ + (void *)msg_opaque +/*! + * Message flags (int) + * @sa RD_KAFKA_MSG_F_COPY, et.al. + * + * rd_kafka_vu_t field: u.i + */ +#define RD_KAFKA_V_MSGFLAGS(msgflags) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), (int)msgflags +/*! + * Timestamp in milliseconds since epoch UTC (int64_t). + * A value of 0 will use the current wall-clock time. + * + * rd_kafka_vu_t field: u.i64 + */ +#define RD_KAFKA_V_TIMESTAMP(timestamp) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ + (int64_t)timestamp +/*! + * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN). + * @sa rd_kafka_header_add() + * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed + * in the same call to producev(). + * + * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size + */ +#define RD_KAFKA_V_HEADER(NAME, VALUE, LEN) \ + _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ + const void *, VALUE, ssize_t, LEN), \ + (const char *)NAME, (const void *)VALUE, (ssize_t)LEN + +/*! + * Message Headers list (rd_kafka_headers_t *). + * The message object will assume ownership of the headers (unless producev() + * fails). + * Any existing headers will be replaced. + * @sa rd_kafka_message_set_headers() + * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed + * in the same call to producev(). + * + * rd_kafka_vu_t fields: u.headers + */ +#define RD_KAFKA_V_HEADERS(HDRS) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ + (rd_kafka_headers_t *)HDRS + + +/**@}*/ + + +/** + * @name Message headers + * @{ + * + * @brief Message headers consist of a list of (string key, binary value) pairs. + * Duplicate keys are supported and the order in which keys were + * added are retained. + * + * Header values are considered binary and may have three types of + * value: + * - proper value with size > 0 and a valid pointer + * - empty value with size = 0 and any non-NULL pointer + * - null value with size = 0 and a NULL pointer + * + * Headers require Apache Kafka broker version v0.11.0.0 or later. + * + * Header operations are O(n). + */ + + +/** + * @brief Create a new headers list. + * + * @param initial_count Preallocate space for this number of headers. + * Any number of headers may be added, updated and + * removed regardless of the initial count. + */ +RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count); + +/** + * @brief Destroy the headers list. The object and any returned value pointers + * are not usable after this call. + */ +RD_EXPORT void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs); + +/** + * @brief Make a copy of headers list \p src. + */ +RD_EXPORT rd_kafka_headers_t * +rd_kafka_headers_copy(const rd_kafka_headers_t *src); + +/** + * @brief Add header with name \p name and value \p val (copied) of size + * \p size (not including null-terminator). + * + * @param hdrs Headers list. + * @param name Header name. + * @param name_size Header name size (not including the null-terminator). + * If -1 the \p name length is automatically acquired using + * strlen(). + * @param value Pointer to header value, or NULL (set size to 0 or -1). + * @param value_size Size of header value. If -1 the \p value is assumed to be a + * null-terminated string and the length is automatically + * acquired using strlen(). + * + * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, + * else RD_KAFKA_RESP_ERR_NO_ERROR. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, + const char *name, + ssize_t name_size, + const void *value, + ssize_t value_size); + +/** + * @brief Remove all headers for the given key (if any). + * + * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, + * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, + * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, + const char *name); + + +/** + * @brief Find last header in list \p hdrs matching \p name. + * + * @param hdrs Headers list. + * @param name Header to find (last match). + * @param valuep (out) Set to a (null-terminated) const pointer to the value + * (may be NULL). + * @param sizep (out) Set to the value's size (not including null-terminator). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else + * RD_KAFKA_RESP_ERR__NOENT. + * + * @remark The returned pointer in \p valuep includes a trailing null-terminator + * that is not accounted for in \p sizep. + * @remark The returned pointer is only valid as long as the headers list and + * the header item is valid. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, + const char *name, + const void **valuep, + size_t *sizep); + +/** + * @brief Iterator for headers matching \p name. + * + * Same semantics as rd_kafka_header_get_last() + * + * @param hdrs Headers to iterate. + * @param idx Iterator index, start at 0 and increment by one for each call + * as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned. + * @param name Header name to match. + * @param valuep (out) Set to a (null-terminated) const pointer to the value + * (may be NULL). + * @param sizep (out) Set to the value's size (not including null-terminator). + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_header_get(const rd_kafka_headers_t *hdrs, + size_t idx, + const char *name, + const void **valuep, + size_t *sizep); + + +/** + * @brief Iterator for all headers. + * + * Same semantics as rd_kafka_header_get() + * + * @sa rd_kafka_header_get() + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, + size_t idx, + const char **namep, + const void **valuep, + size_t *sizep); + + + +/**@}*/ + + + +/** + * @name Kafka messages + * @{ + * + */ + + + +// FIXME: This doesn't show up in docs for some reason +// "Compound rd_kafka_message_t is not documented." + +/** + * @brief A Kafka message as returned by the \c rd_kafka_consume*() family + * of functions as well as provided to the Producer \c dr_msg_cb(). + * + * For the consumer this object has two purposes: + * - provide the application with a consumed message. (\c err == 0) + * - report per-topic+partition consumer errors (\c err != 0) + * + * The application must check \c err to decide what action to take. + * + * When the application is finished with a message it must call + * rd_kafka_message_destroy() unless otherwise noted. + */ +typedef struct rd_kafka_message_s { + rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ + rd_kafka_topic_t *rkt; /**< Topic */ + int32_t partition; /**< Partition */ + void *payload; /**< Producer: original message payload. + * Consumer: Depends on the value of \c err : + * - \c err==0: Message payload. + * - \c err!=0: Error string */ + size_t len; /**< Depends on the value of \c err : + * - \c err==0: Message payload length + * - \c err!=0: Error string length */ + void *key; /**< Depends on the value of \c err : + * - \c err==0: Optional message key */ + size_t key_len; /**< Depends on the value of \c err : + * - \c err==0: Optional message key length*/ + int64_t offset; /**< Consumer: + * - Message offset (or offset for error + * if \c err!=0 if applicable). + * Producer, dr_msg_cb: + * Message offset assigned by broker. + * May be RD_KAFKA_OFFSET_INVALID + * for retried messages when + * idempotence is enabled. */ + void *_private; /**< Consumer: + * - rdkafka private pointer: + * DO NOT MODIFY, DO NOT COPY. + * Producer: + * - dr_msg_cb: + * msg_opaque from produce() call or + * RD_KAFKA_V_OPAQUE from producev(). */ +} rd_kafka_message_t; + + +/** + * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka. + */ +RD_EXPORT +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); + + + +/** + * @brief Returns the error string for an errored rd_kafka_message_t or NULL if + * there was no error. + * + * @remark This function MUST NOT be used with the producer. + */ +RD_EXPORT +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage); + + +/** + * @brief Returns the message timestamp for a consumed message. + * + * The timestamp is the number of milliseconds since the epoch (UTC). + * + * \p tstype (if not NULL) is updated to indicate the type of timestamp. + * + * @returns message timestamp, or -1 if not available. + * + * @remark Message timestamps require broker version 0.10.0 or later. + */ +RD_EXPORT +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, + rd_kafka_timestamp_type_t *tstype); + + + +/** + * @brief Returns the latency for a produced message measured from + * the produce() call. + * + * @returns the latency in microseconds, or -1 if not available. + */ +RD_EXPORT +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage); + + +/** + * @brief Returns the broker id of the broker the message was produced to + * or fetched from. + * + * @returns a broker id if known, else -1. + */ +RD_EXPORT +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage); + + +/** + * @brief Get the message header list. + * + * The returned pointer in \p *hdrsp is associated with the \p rkmessage and + * must not be used after destruction of the message object or the header + * list is replaced with rd_kafka_message_set_headers(). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned, + * RD_KAFKA_RESP_ERR__NOENT if the message has no headers, + * or another error code if the headers could not be parsed. + * + * @remark Headers require broker version 0.11.0.0 or later. + * + * @remark As an optimization the raw protocol headers are parsed on + * the first call to this function. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); + +/** + * @brief Get the message header list and detach the list from the message + * making the application the owner of the headers. + * The application must eventually destroy the headers using + * rd_kafka_headers_destroy(). + * The message's headers will be set to NULL. + * + * Otherwise same semantics as rd_kafka_message_headers() + * + * @sa rd_kafka_message_headers + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); + + +/** + * @brief Replace the message's current headers with a new list. + * + * @param rkmessage The message to set headers. + * @param hdrs New header list. The message object assumes ownership of + * the list, the list will be destroyed automatically with + * the message object. + * The new headers list may be updated until the message object + * is passed or returned to librdkafka. + * + * @remark The existing headers object, if any, will be destroyed. + */ +RD_EXPORT +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t *hdrs); + + +/** + * @brief Returns the number of header key/value pairs + * + * @param hdrs Headers to count + */ +RD_EXPORT size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs); + + +/** + * @enum rd_kafka_msg_status_t + * @brief Message persistence status can be used by the application to + * find out if a produced message was persisted in the topic log. + */ +typedef enum { + /** Message was never transmitted to the broker, or failed with + * an error indicating it was not written to the log. + * Application retry risks ordering, but not duplication. */ + RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0, + + /** Message was transmitted to broker, but no acknowledgement was + * received. + * Application retry risks ordering and duplication. */ + RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1, + + /** Message was written to the log and acknowledged by the broker. + * No reason for application to retry. + * Note: this value should only be trusted with \c acks=all. */ + RD_KAFKA_MSG_STATUS_PERSISTED = 2 +} rd_kafka_msg_status_t; + + +/** + * @brief Returns the message's persistence status in the topic log. + * + * @remark The message status is not available in on_acknowledgement + * interceptors. + */ +RD_EXPORT rd_kafka_msg_status_t +rd_kafka_message_status(const rd_kafka_message_t *rkmessage); + + +/** + * @returns the message's partition leader epoch at the time the message was + * fetched and if known, else -1. + * + * @remark This API must only be used on consumed messages without error. + * @remark Requires broker version >= 2.10 (KIP-320). + */ +RD_EXPORT int32_t +rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage); + + +/**@}*/ + + +/** + * @name UUID + * @{ + * + */ + +/** + * @brief Computes base64 encoding for the given uuid string. + * @param uuid UUID for which base64 encoding is required. + * + * @return base64 encoded string for the given UUID or NULL in case of some + * issue with the conversion or the conversion is not supported. + */ +RD_EXPORT const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid); + +/** + * @brief Gets least significant 64 bits for the given UUID. + * + * @param uuid UUID + * + * @return least significant 64 bits for the given UUID. + */ +RD_EXPORT int64_t +rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid); + + +/** + * @brief Gets most significant 64 bits for the given UUID. + * + * @param uuid UUID + * + * @return most significant 64 bits for the given UUID. + */ +RD_EXPORT int64_t +rd_kafka_Uuid_most_significant_bits(const rd_kafka_Uuid_t *uuid); + + +/** + * @brief Creates a new UUID. + * + * @param most_significant_bits most significant 64 bits of the 128 bits UUID. + * @param least_significant_bits least significant 64 bits of the 128 bits UUID. + * + * @return A newly allocated UUID. + * @remark Must be freed after use using rd_kafka_Uuid_destroy() + */ +RD_EXPORT rd_kafka_Uuid_t *rd_kafka_Uuid_new(int64_t most_significant_bits, + int64_t least_significant_bits); + +/** + * @brief Copies the given UUID. + * + * @param uuid UUID to be copied. + * + * @return A newly allocated copy of the provided UUID. + * @remark Must be freed after use using rd_kafka_Uuid_destroy() + */ +RD_EXPORT rd_kafka_Uuid_t *rd_kafka_Uuid_copy(const rd_kafka_Uuid_t *uuid); + +/** + * @brief Destroy the provided uuid. + * + * @param uuid UUID + */ +RD_EXPORT void rd_kafka_Uuid_destroy(rd_kafka_Uuid_t *uuid); + +/**@}*/ + + +/** + * @name Configuration interface + * @{ + * + * @brief Main/global configuration property interface + * + */ + +/** + * @enum rd_kafka_conf_res_t + * @brief Configuration result type + */ +typedef enum { + RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ + RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or + * property or value not supported in + * this build. */ + RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ +} rd_kafka_conf_res_t; + + +/** + * @brief Create configuration object. + * + * When providing your own configuration to the \c rd_kafka_*_new_*() calls + * the rd_kafka_conf_t objects needs to be created with this function + * which will set up the defaults. + * I.e.: + * @code + * rd_kafka_conf_t *myconf; + * rd_kafka_conf_res_t res; + * + * myconf = rd_kafka_conf_new(); + * res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600", + * errstr, sizeof(errstr)); + * if (res != RD_KAFKA_CONF_OK) + * die("%s\n", errstr); + * + * rk = rd_kafka_new(..., myconf); + * @endcode + * + * Please see CONFIGURATION.md for the default settings or use + * rd_kafka_conf_properties_show() to provide the information at runtime. + * + * The properties are identical to the Apache Kafka configuration properties + * whenever possible. + * + * @remark A successful call to rd_kafka_new() will assume ownership of + * the conf object and rd_kafka_conf_destroy() must not be called. + * + * @returns A new rd_kafka_conf_t object with defaults set. + * + * @sa rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy() + */ +RD_EXPORT +rd_kafka_conf_t *rd_kafka_conf_new(void); + + +/** + * @brief Destroys a conf object. + */ +RD_EXPORT +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf); + + +/** + * @brief Creates a copy/duplicate of configuration object \p conf + * + * @remark Interceptors are NOT copied to the new configuration object. + * @sa rd_kafka_interceptor_f_on_conf_dup + */ +RD_EXPORT +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); + + +/** + * @brief Same as rd_kafka_conf_dup() but with an array of property name + * prefixes to filter out (ignore) when copying. + */ +RD_EXPORT +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, + size_t filter_cnt, + const char **filter); + + + +/** + * @returns the configuration object used by an rd_kafka_t instance. + * For use with rd_kafka_conf_get(), et.al., to extract configuration + * properties from a running client. + * + * @remark the returned object is read-only and its lifetime is the same + * as the rd_kafka_t object. + */ +RD_EXPORT +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk); + + +/** + * @brief Sets a configuration property. + * + * \p conf must have been previously created with rd_kafka_conf_new(). + * + * Fallthrough: + * Topic-level configuration properties may be set using this interface + * in which case they are applied on the \c default_topic_conf. + * If no \c default_topic_conf has been set one will be created. + * Any subsequent rd_kafka_conf_set_default_topic_conf() calls will + * replace the current default topic configuration. + * + * @returns \c rd_kafka_conf_res_t to indicate success or failure. + * In case of failure \p errstr is updated to contain a human readable + * error string. + * + * @remark Setting properties or values that were disabled at build time due to + * missing dependencies will return RD_KAFKA_CONF_INVALID. + */ +RD_EXPORT +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size); + + +/** + * @brief Enable event sourcing. + * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable + * for consumption by `rd_kafka_queue_poll()`. + */ +RD_EXPORT +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); + + +/** + * @brief Generic event callback to be used with the event API to trigger + * callbacks for \c rd_kafka_event_t objects from a background + * thread serving the background queue. + * + * How to use: + * 1. First set the event callback on the configuration object with this + * function, followed by creating an rd_kafka_t instance + * with rd_kafka_new(). + * 2. Get the instance's background queue with rd_kafka_queue_get_background() + * and pass it as the reply/response queue to an API that takes an + * event queue, such as rd_kafka_CreateTopics(). + * 3. As the response event is ready and enqueued on the background queue the + * event callback will be triggered from the background thread. + * 4. Prior to destroying the client instance, loose your reference to the + * background queue by calling rd_kafka_queue_destroy(). + * + * The application must destroy the \c rkev passed to \p event cb using + * rd_kafka_event_destroy(). + * + * The \p event_cb \c opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * @remark This callback is a specialized alternative to the poll-based + * event API described in the Event interface section. + * + * @remark The \p event_cb will be called spontaneously from a background + * thread completely managed by librdkafka. + * Take care to perform proper locking of application objects. + * + * @warning The application MUST NOT call rd_kafka_destroy() from the + * event callback. + * + * @sa rd_kafka_queue_get_background + */ +RD_EXPORT void rd_kafka_conf_set_background_event_cb( + rd_kafka_conf_t *conf, + void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)); + + +/** + * @deprecated See rd_kafka_conf_set_dr_msg_cb() + */ +RD_EXPORT +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque)); + +/** + * @brief \b Producer: Set delivery report callback in provided \p conf object. + * + * The delivery report callback will be called once for each message + * accepted by rd_kafka_produce() (et.al) with \p err set to indicate + * the result of the produce request. + * + * The callback is called when a message is succesfully produced or + * if librdkafka encountered a permanent failure. + * Delivery errors occur when the retry count is exceeded, when the + * message.timeout.ms timeout is exceeded or there is a permanent error + * like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART. + * + * An application must call rd_kafka_poll() at regular intervals to + * serve queued delivery report callbacks. + * + * The broker-assigned offset can be retrieved with \c rkmessage->offset + * and the timestamp can be retrieved using rd_kafka_message_timestamp(). + * + * The \p dr_msg_cb \c opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * The per-message msg_opaque value is available in + * \c rd_kafka_message_t._private. + * + * @remark The Idempotent Producer may return invalid timestamp + * (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and + * and offset (RD_KAFKA_OFFSET_INVALID) for retried messages + * that were previously successfully delivered but not properly + * acknowledged. + */ +RD_EXPORT +void rd_kafka_conf_set_dr_msg_cb( + rd_kafka_conf_t *conf, + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque)); + + +/** + * @brief \b Consumer: Set consume callback for use with + * rd_kafka_consumer_poll() + * + * The \p consume_cb \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + */ +RD_EXPORT +void rd_kafka_conf_set_consume_cb( + rd_kafka_conf_t *conf, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)); + +/** + * @brief \b Consumer: Set rebalance callback for use with + * coordinated consumer group balancing. + * + * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS + * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' + * contains the full partition set that was either assigned or revoked. + * + * Registering a \p rebalance_cb turns off librdkafka's automatic + * partition assignment/revocation and instead delegates that responsibility + * to the application's \p rebalance_cb. + * + * The rebalance callback is responsible for updating librdkafka's + * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS + * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle + * arbitrary rebalancing failures where \p err is neither of those. + * @remark In this latter case (arbitrary error), the application must + * call rd_kafka_assign(rk, NULL) to synchronize state. + * + * For eager/non-cooperative `partition.assignment.strategy` assignors, + * such as `range` and `roundrobin`, the application must use + * rd_kafka_assign() to set or clear the entire assignment. + * For the cooperative assignors, such as `cooperative-sticky`, the application + * must use rd_kafka_incremental_assign() for + * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign() + * for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. + * + * Without a rebalance callback this is done automatically by librdkafka + * but registering a rebalance callback gives the application flexibility + * in performing other operations along with the assigning/revocation, + * such as fetching offsets from an alternate location (on assign) + * or manually committing offsets (on revoke). + * + * rebalance_cb is always triggered exactly once when a rebalance completes + * with a new assignment, even if that assignment is empty. If an + * eager/non-cooperative assignor is configured, there will eventually be + * exactly one corresponding call to rebalance_cb to revoke these partitions + * (even if empty), whether this is due to a group rebalance or lost + * partitions. In the cooperative case, rebalance_cb will never be called if + * the set of partitions being revoked is empty (whether or not lost). + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * @remark The \p partitions list is destroyed by librdkafka on return + * return from the rebalance_cb and must not be freed or + * saved by the application. + * + * @remark Be careful when modifying the \p partitions list. + * Changing this list should only be done to change the initial + * offsets for each partition. + * But a function like `rd_kafka_position()` might have unexpected + * effects for instance when a consumer gets assigned a partition + * it used to consume at an earlier rebalance. In this case, the + * list of partitions will be updated with the old offset for that + * partition. In this case, it is generally better to pass a copy + * of the list (see `rd_kafka_topic_partition_list_copy()`). + * The result of `rd_kafka_position()` is typically outdated in + * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. + * + * @sa rd_kafka_assign() + * @sa rd_kafka_incremental_assign() + * @sa rd_kafka_incremental_unassign() + * @sa rd_kafka_assignment_lost() + * @sa rd_kafka_rebalance_protocol() + * + * The following example shows the application's responsibilities: + * @code + * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, + * rd_kafka_topic_partition_list_t *partitions, + * void *opaque) { + * + * switch (err) + * { + * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + * // application may load offets from arbitrary external + * // storage here and update \p partitions + * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + * rd_kafka_incremental_assign(rk, partitions); + * else // EAGER + * rd_kafka_assign(rk, partitions); + * break; + * + * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + * if (manual_commits) // Optional explicit manual commit + * rd_kafka_commit(rk, partitions, 0); // sync commit + * + * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) + * rd_kafka_incremental_unassign(rk, partitions); + * else // EAGER + * rd_kafka_assign(rk, NULL); + * break; + * + * default: + * handle_unlikely_error(err); + * rd_kafka_assign(rk, NULL); // sync state + * break; + * } + * } + * @endcode + * + * @remark The above example lacks error handling for assign calls, see + * the examples/ directory. + */ +RD_EXPORT +void rd_kafka_conf_set_rebalance_cb( + rd_kafka_conf_t *conf, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque)); + + + +/** + * @brief \b Consumer: Set offset commit callback for use with consumer groups. + * + * The results of automatic or manual offset commits will be scheduled + * for this callback and is served by rd_kafka_consumer_poll(). + * + * If no partitions had valid offsets to commit this callback will be called + * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered + * an error. + * + * The \p offsets list contains per-partition information: + * - \c offset: committed offset (attempted) + * - \c err: commit error + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + */ +RD_EXPORT +void rd_kafka_conf_set_offset_commit_cb( + rd_kafka_conf_t *conf, + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque)); + + +/** + * @brief Set error callback in provided conf object. + * + * The error callback is used by librdkafka to signal warnings and errors + * back to the application. + * + * These errors should generally be considered informational and non-permanent, + * the client will try to recover automatically from all type of errors. + * Given that the client and cluster configuration is correct the + * application should treat these as temporary errors. + * + * \p error_cb will be triggered with \c err set to RD_KAFKA_RESP_ERR__FATAL + * if a fatal error has been raised; in this case use rd_kafka_fatal_error() to + * retrieve the fatal error code and error string, and then begin terminating + * the client instance. + * + * If no \p error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set + * with rd_kafka_conf_set_events, then the errors will be logged instead. + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + */ +RD_EXPORT +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque)); + +/** + * @brief Set throttle callback. + * + * The throttle callback is used to forward broker throttle times to the + * application for Produce and Fetch (consume) requests. + * + * Callbacks are triggered whenever a non-zero throttle time is returned by + * the broker, or when the throttle time drops back to zero. + * + * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at + * regular intervals to serve queued callbacks. + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * @remark Requires broker version 0.9.0 or later. + */ +RD_EXPORT +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque)); + + +/** + * @brief Set logger callback. + * + * The default is to print to stderr, but a syslog logger is also available, + * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives. + * Alternatively the application may provide its own logger callback. + * Or pass \p func as NULL to disable logging. + * + * This is the configuration alternative to the deprecated rd_kafka_set_logger() + * + * @remark The log_cb will be called spontaneously from librdkafka's internal + * threads unless logs have been forwarded to a poll queue through + * \c rd_kafka_set_log_queue(). + * An application MUST NOT call any librdkafka APIs or do any prolonged + * work in a non-forwarded \c log_cb. + */ +RD_EXPORT +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); + + +/** + * @brief Set statistics callback in provided conf object. + * + * The statistics callback is triggered from rd_kafka_poll() every + * \c statistics.interval.ms (needs to be configured separately). + * Function arguments: + * - \p rk - Kafka handle + * - \p json - String containing the statistics data in JSON format + * - \p json_len - Length of \p json string. + * - \p opaque - Application-provided opaque as set by + * rd_kafka_conf_set_opaque(). + * + * For more information on the format of \p json, see + * https://github.com/confluentinc/librdkafka/wiki/Statistics + * + * If the application wishes to hold on to the \p json pointer and free + * it at a later time it must return 1 from the \p stats_cb. + * If the application returns 0 from the \p stats_cb then librdkafka + * will immediately free the \p json pointer. + * + * See STATISTICS.md for a full definition of the JSON object. + */ +RD_EXPORT +void rd_kafka_conf_set_stats_cb( + rd_kafka_conf_t *conf, + int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque)); + +/** + * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object. + * + * @param conf the configuration to mutate. + * @param oauthbearer_token_refresh_cb the callback to set; callback function + * arguments:
+ * \p rk - Kafka handle
+ * \p oauthbearer_config - Value of configuration property + * sasl.oauthbearer.config. + * \p opaque - Application-provided opaque set via + * rd_kafka_conf_set_opaque() + * + * The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll() + * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, + * typically based on the configuration defined in \c sasl.oauthbearer.config. + * + * The callback should invoke rd_kafka_oauthbearer_set_token() + * or rd_kafka_oauthbearer_set_token_failure() to indicate success + * or failure, respectively. + * + * The refresh operation is eventable and may be received via + * rd_kafka_queue_poll() with an event type of + * \c RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH. + * + * Note that before any SASL/OAUTHBEARER broker connection can succeed the + * application must call rd_kafka_oauthbearer_set_token() once -- either + * directly or, more typically, by invoking either rd_kafka_poll(), + * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause + * retrieval of an initial token to occur. + * + * Alternatively, the application can enable the SASL queue by calling + * rd_kafka_conf_enable_sasl_queue() on the configuration object prior to + * creating the client instance, get the SASL queue with + * rd_kafka_queue_get_sasl(), and either serve the queue manually by calling + * rd_kafka_queue_poll(), or redirecting the queue to the background thread to + * have the queue served automatically. For the latter case the SASL queue + * must be forwarded to the background queue with rd_kafka_queue_forward(). + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). + * + * An unsecured JWT refresh handler is provided by librdkafka for development + * and testing purposes, it is enabled by setting + * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is + * mutually exclusive to using a refresh callback. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + * @sa rd_kafka_queue_get_sasl() + */ +RD_EXPORT +void rd_kafka_conf_set_oauthbearer_token_refresh_cb( + rd_kafka_conf_t *conf, + void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque)); + +/** + * @brief Enable/disable creation of a queue specific to SASL events + * and callbacks. + * + * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this + * configuration API allows an application to get a dedicated + * queue for the SASL events/callbacks. After enabling the queue with this API + * the application can retrieve the queue by calling + * rd_kafka_queue_get_sasl() on the client instance. + * This queue may then be served directly by the application + * (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as + * the background queue. + * + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). + * + * By default (\p enable = 0) the main queue (as served by rd_kafka_poll(), + * et.al.) is used for SASL callbacks. + * + * @remark The SASL queue is currently only used by the SASL OAUTHBEARER + * mechanism's token_refresh_cb(). + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_sasl_background_callbacks_enable() + */ + +RD_EXPORT +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable); + + +/** + * @brief Set socket callback. + * + * The socket callback is responsible for opening a socket + * according to the supplied \p domain, \p type and \p protocol. + * The socket shall be created with \c CLOEXEC set in a racefree fashion, if + * possible. + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * Default: + * - on linux: racefree CLOEXEC + * - others : non-racefree CLOEXEC + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT +void rd_kafka_conf_set_socket_cb( + rd_kafka_conf_t *conf, + int (*socket_cb)(int domain, int type, int protocol, void *opaque)); + + + +/** + * @brief Set connect callback. + * + * The connect callback is responsible for connecting socket \p sockfd + * to peer address \p addr. + * The \p id field contains the broker identifier. + * + * \p connect_cb shall return 0 on success (socket connected) or an error + * number (errno) on error. + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT void +rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque)); + +/** + * @brief Set close socket callback. + * + * Close a socket (optionally opened with socket_cb()). + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT void rd_kafka_conf_set_closesocket_cb( + rd_kafka_conf_t *conf, + int (*closesocket_cb)(int sockfd, void *opaque)); + + + +#ifndef _WIN32 +/** + * @brief Set open callback. + * + * The open callback is responsible for opening the file specified by + * pathname, flags and mode. + * The file shall be opened with \c CLOEXEC set in a racefree fashion, if + * possible. + * + * Default: + * - on linux: racefree CLOEXEC + * - others : non-racefree CLOEXEC + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT +void rd_kafka_conf_set_open_cb( + rd_kafka_conf_t *conf, + int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque)); +#endif + +/** Forward declaration to avoid netdb.h or winsock includes */ +struct addrinfo; + +/** + * @brief Set address resolution callback. + * + * The callback is responsible for resolving the hostname \p node and the + * service \p service into a list of socket addresses as \c getaddrinfo(3) + * would. The \p hints and \p res parameters function as they do for + * \c getaddrinfo(3). The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * If the callback is invoked with a NULL \p node, \p service, and \p hints, the + * callback should instead free the addrinfo struct specified in \p res. In this + * case the callback must succeed; the return value will not be checked by the + * caller. + * + * The callback's return value is interpreted as the return value of \p + * \c getaddrinfo(3). + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT void +rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque)); + +/** + * @brief Sets the verification callback of the broker certificate + * + * The verification callback is triggered from internal librdkafka threads + * upon connecting to a broker. On each connection attempt the callback + * will be called for each certificate in the broker's certificate chain, + * starting at the root certification, as long as the application callback + * returns 1 (valid certificate). + * \c broker_name and \c broker_id correspond to the broker the connection + * is being made to. + * The \c x509_error argument indicates if OpenSSL's verification of + * the certificate succeed (0) or failed (an OpenSSL error code). + * The application may set the SSL context error code by returning 0 + * from the verify callback and providing a non-zero SSL context error code + * in \c x509_error. + * If the verify callback sets \c x509_error to 0, returns 1, and the + * original \c x509_error was non-zero, the error on the SSL context will + * be cleared. + * \c x509_error is always a valid pointer to an int. + * + * \c depth is the depth of the current certificate in the chain, starting + * at the root certificate. + * + * The certificate itself is passed in binary DER format in \c buf of + * size \c size. + * + * The callback must return 1 if verification succeeds, or + * 0 if verification fails and then write a human-readable error message + * to \c errstr (limited to \c errstr_size bytes, including nul-term). + * + * The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * @returns RD_KAFKA_CONF_OK if SSL is supported in this build, else + * RD_KAFKA_CONF_INVALID. + * + * @warning This callback will be called from internal librdkafka threads. + * + * @remark See in the OpenSSL source distribution + * for a list of \p x509_error codes. + */ +RD_EXPORT +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( + rd_kafka_conf_t *conf, + int (*ssl_cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque)); + + +/** + * @enum rd_kafka_cert_type_t + * + * @brief SSL certificate type + * + * @sa rd_kafka_conf_set_ssl_cert + */ +typedef enum rd_kafka_cert_type_t { + RD_KAFKA_CERT_PUBLIC_KEY, /**< Client's public key */ + RD_KAFKA_CERT_PRIVATE_KEY, /**< Client's private key */ + RD_KAFKA_CERT_CA, /**< CA certificate */ + RD_KAFKA_CERT__CNT, +} rd_kafka_cert_type_t; + +/** + * @enum rd_kafka_cert_enc_t + * + * @brief SSL certificate encoding + * + * @sa rd_kafka_conf_set_ssl_cert + */ +typedef enum rd_kafka_cert_enc_t { + RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ + RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ + RD_KAFKA_CERT_ENC_PEM, /**< PEM */ + RD_KAFKA_CERT_ENC__CNT, +} rd_kafka_cert_enc_t; + + +/** + * @brief Set certificate/key \p cert_type from the \p cert_enc encoded + * memory at \p buffer of \p size bytes. + * + * @param conf Configuration object. + * @param cert_type Certificate or key type to configure. + * @param cert_enc Buffer \p encoding type. + * @param buffer Memory pointer to encoded certificate or key. + * The memory is not referenced after this function returns. + * @param size Size of memory at \p buffer. + * @param errstr Memory were a human-readable error string will be written + * on failure. + * @param errstr_size Size of \p errstr, including space for nul-terminator. + * + * @returns RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the + * memory in \p buffer is of incorrect encoding, or if librdkafka + * was not built with SSL support. + * + * @remark Calling this method multiple times with the same \p cert_type + * will replace the previous value. + * + * @remark Calling this method with \p buffer set to NULL will clear the + * configuration for \p cert_type. + * + * @remark The private key may require a password, which must be specified + * with the `ssl.key.password` configuration property prior to + * calling this function. + * + * @remark Private and public keys in PEM format may also be set with the + * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. + * + * @remark CA certificate in PEM format may also be set with the + * `ssl.ca.pem` configuration property. + * + * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is + * encoded using an obsolete cipher, it might be necessary to set up + * an OpenSSL configuration file to load the "legacy" provider and + * set the OPENSSL_CONF environment variable. + * See + * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more + * information. + */ +RD_EXPORT rd_kafka_conf_res_t +rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, + rd_kafka_cert_type_t cert_type, + rd_kafka_cert_enc_t cert_enc, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size); + + +/** + * @brief Set callback_data for OpenSSL engine. + * + * @param conf Configuration object. + * @param callback_data passed to engine callbacks, + * e.g. \c ENGINE_load_ssl_client_cert. + * + * @remark The \c ssl.engine.location configuration must be set for this + * to have affect. + * + * @remark The memory pointed to by \p value must remain valid for the + * lifetime of the configuration object and any Kafka clients that + * use it. + */ +RD_EXPORT +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, + void *callback_data); + + +/** + * @brief Sets the application's opaque pointer that will be passed to callbacks + * + * @sa rd_kafka_opaque() + */ +RD_EXPORT +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque); + +/** + * @brief Retrieves the opaque pointer previously set + * with rd_kafka_conf_set_opaque() + */ +RD_EXPORT +void *rd_kafka_opaque(const rd_kafka_t *rk); + + + +/** + * @brief Sets the default topic configuration to use for automatically + * subscribed topics (e.g., through pattern-matched topics). + * The topic config object is not usable after this call. + * + * @warning Any topic configuration settings that have been set on the + * global rd_kafka_conf_t object will be overwritten by this call + * since the implicitly created default topic config object is + * replaced by the user-supplied one. + * + * @deprecated Set default topic level configuration on the + * global rd_kafka_conf_t object instead. + */ +RD_EXPORT +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf); + +/** + * @brief Gets the default topic configuration as previously set with + * rd_kafka_conf_set_default_topic_conf() or that was implicitly created + * by configuring a topic-level property on the global \p conf object. + * + * @returns the \p conf's default topic configuration (if any), or NULL. + * + * @warning The returned topic configuration object is owned by the \p conf + * object. It may be modified but not destroyed and its lifetime is + * the same as the \p conf object or the next call to + * rd_kafka_conf_set_default_topic_conf(). + */ +RD_EXPORT rd_kafka_topic_conf_t * +rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf); + + +/** + * @brief Retrieve configuration value for property \p name. + * + * If \p dest is non-NULL the value will be written to \p dest with at + * most \p dest_size. + * + * \p *dest_size is updated to the full length of the value, thus if + * \p *dest_size initially is smaller than the full length the application + * may reallocate \p dest to fit the returned \p *dest_size and try again. + * + * If \p dest is NULL only the full length of the value is returned. + * + * Fallthrough: + * Topic-level configuration properties from the \c default_topic_conf + * may be retrieved using this interface. + * + * @returns \p RD_KAFKA_CONF_OK if the property name matched, else + * \p RD_KAFKA_CONF_UNKNOWN. + */ +RD_EXPORT +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); + + +/** + * @brief Retrieve topic configuration value for property \p name. + * + * @sa rd_kafka_conf_get() + */ +RD_EXPORT +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); + + +/** + * @brief Dump the configuration properties and values of \p conf to an array + * with \"key\", \"value\" pairs. + * + * The number of entries in the array is returned in \p *cntp. + * + * The dump must be freed with `rd_kafka_conf_dump_free()`. + */ +RD_EXPORT +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); + + +/** + * @brief Dump the topic configuration properties and values of \p conf + * to an array with \"key\", \"value\" pairs. + * + * The number of entries in the array is returned in \p *cntp. + * + * The dump must be freed with `rd_kafka_conf_dump_free()`. + */ +RD_EXPORT +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, + size_t *cntp); + +/** + * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or + * `rd_kafka_topic_conf_dump(). + */ +RD_EXPORT +void rd_kafka_conf_dump_free(const char **arr, size_t cnt); + +/** + * @brief Prints a table to \p fp of all supported configuration properties, + * their default values as well as a description. + * + * @remark All properties and properties and values are shown, even those + * that have been disabled at build time due to missing dependencies. + */ +RD_EXPORT +void rd_kafka_conf_properties_show(FILE *fp); + +/**@}*/ + + +/** + * @name Topic configuration + * @brief Topic configuration property interface + * @{ + * + */ + + +/** + * @brief Create topic configuration object + * + * @sa Same semantics as for rd_kafka_conf_new(). + */ +RD_EXPORT +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void); + + +/** + * @brief Creates a copy/duplicate of topic configuration object \p conf. + */ +RD_EXPORT +rd_kafka_topic_conf_t * +rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf); + +/** + * @brief Creates a copy/duplicate of \p rk 's default topic configuration + * object. + */ +RD_EXPORT +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk); + + +/** + * @brief Destroys a topic conf object. + */ +RD_EXPORT +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); + + +/** + * @brief Sets a single rd_kafka_topic_conf_t value by property name. + * + * \p topic_conf should have been previously set up + * with `rd_kafka_topic_conf_new()`. + * + * @returns rd_kafka_conf_res_t to indicate success or failure. + */ +RD_EXPORT +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, + const char *name, + const char *value, + char *errstr, + size_t errstr_size); + +/** + * @brief Sets the application's opaque pointer that will be passed to all topic + * callbacks as the \c rkt_opaque argument. + * + * @sa rd_kafka_topic_opaque() + */ +RD_EXPORT +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, + void *rkt_opaque); + + +/** + * @brief \b Producer: Set partitioner callback in provided topic conf object. + * + * The partitioner may be called in any thread at any time, + * it may be called multiple times for the same message/key. + * + * The callback's \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The callback's \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * Partitioner function constraints: + * - MUST NOT call any rd_kafka_*() functions except: + * rd_kafka_topic_partition_available() + * - MUST NOT block or execute for prolonged periods of time. + * - MUST return a value between 0 and partition_cnt-1, or the + * special \c RD_KAFKA_PARTITION_UA value if partitioning + * could not be performed. + */ +RD_EXPORT +void rd_kafka_topic_conf_set_partitioner_cb( + rd_kafka_topic_conf_t *topic_conf, + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque)); + + +/** + * @brief \b Producer: Set message queueing order comparator callback. + * + * The callback may be called in any thread at any time, + * it may be called multiple times for the same message. + * + * Ordering comparator function constraints: + * - MUST be stable sort (same input gives same output). + * - MUST NOT call any rd_kafka_*() functions. + * - MUST NOT block or execute for prolonged periods of time. + * + * The comparator shall compare the two messages and return: + * - < 0 if message \p a should be inserted before message \p b. + * - >=0 if message \p a should be inserted after message \p b. + * + * @remark Insert sorting will be used to enqueue the message in the + * correct queue position, this comes at a cost of O(n). + * + * @remark If `queuing.strategy=fifo` new messages are enqueued to the + * tail of the queue regardless of msg_order_cmp, but retried messages + * are still affected by msg_order_cmp. + * + * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, + * DO NOT USE IN PRODUCTION. + */ +RD_EXPORT void rd_kafka_topic_conf_set_msg_order_cmp( + rd_kafka_topic_conf_t *topic_conf, + int (*msg_order_cmp)(const rd_kafka_message_t *a, + const rd_kafka_message_t *b)); + + +/** + * @brief Check if partition is available (has a leader broker). + * + * @returns 1 if the partition is available, else 0. + * + * @warning This function must only be called from inside a partitioner function + */ +RD_EXPORT +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, + int32_t partition); + + +/******************************************************************* + * * + * Partitioners provided by rdkafka * + * * + *******************************************************************/ + +/** + * @brief Random partitioner. + * + * Will try not to return unavailable partitions. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a random partition between 0 and \p partition_cnt - 1. + * + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + +/** + * @brief Consistent partitioner. + * + * Uses consistent hashing to map identical keys onto identical partitions. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on + * the CRC value of the key + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + +/** + * @brief Consistent-Random partitioner. + * + * This is the default partitioner. + * Uses consistent hashing to map identical keys onto identical partitions, and + * messages without keys will be assigned via the random partitioner. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on + * the CRC value of the key (if provided) + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + + +/** + * @brief Murmur2 partitioner (Java compatible). + * + * Uses consistent hashing to map identical keys onto identical partitions + * using Java-compatible Murmur2 hashing. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a partition between 0 and \p partition_cnt - 1. + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + +/** + * @brief Consistent-Random Murmur2 partitioner (Java compatible). + * + * Uses consistent hashing to map identical keys onto identical partitions + * using Java-compatible Murmur2 hashing. + * Messages without keys will be assigned via the random partitioner. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a partition between 0 and \p partition_cnt - 1. + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + + +/** + * @brief FNV-1a partitioner. + * + * Uses consistent hashing to map identical keys onto identical partitions + * using FNV-1a hashing. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a partition between 0 and \p partition_cnt - 1. + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + + +/** + * @brief Consistent-Random FNV-1a partitioner. + * + * Uses consistent hashing to map identical keys onto identical partitions + * using FNV-1a hashing. + * Messages without keys will be assigned via the random partitioner. + * + * The \p rkt_opaque argument is the opaque set by + * rd_kafka_topic_conf_set_opaque(). + * The \p msg_opaque argument is the per-message opaque + * passed to produce(). + * + * @returns a partition between 0 and \p partition_cnt - 1. + */ +RD_EXPORT +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); + + +/**@}*/ + + + +/** + * @name Main Kafka and Topic object handles + * @{ + * + * + */ + + + +/** + * @brief Creates a new Kafka handle and starts its operation according to the + * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER). + * + * \p conf is an optional struct created with `rd_kafka_conf_new()` that will + * be used instead of the default configuration. + * The \p conf object is freed by this function on success and must not be used + * or destroyed by the application subsequently. + * See `rd_kafka_conf_set()` et.al for more information. + * + * \p errstr must be a pointer to memory of at least size \p errstr_size where + * `rd_kafka_new()` may write a human readable error message in case the + * creation of a new handle fails. In which case the function returns NULL. + * + * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER + * rd_kafka_t handle is created it may either operate in the + * legacy simple consumer mode using the rd_kafka_consume_start() + * interface, or the High-level KafkaConsumer API. + * @remark An application must only use one of these groups of APIs on a given + * rd_kafka_t RD_KAFKA_CONSUMER handle. + + * + * @returns The Kafka handle on success or NULL on error (see \p errstr) + * + * @sa To destroy the Kafka handle, use rd_kafka_destroy(). + */ +RD_EXPORT +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, + rd_kafka_conf_t *conf, + char *errstr, + size_t errstr_size); + + +/** + * @brief Destroy Kafka handle. + * + * @remark This is a blocking operation. + * @remark rd_kafka_consumer_close() will be called from this function + * if the instance type is RD_KAFKA_CONSUMER, a \c group.id was + * configured, and the rd_kafka_consumer_close() was not + * explicitly called by the application. This in turn may + * trigger consumer callbacks, such as rebalance_cb. + * Use rd_kafka_destroy_flags() with + * RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE to avoid this behaviour. + * + * @sa rd_kafka_destroy_flags() + */ +RD_EXPORT +void rd_kafka_destroy(rd_kafka_t *rk); + + +/** + * @brief Destroy Kafka handle according to specified destroy flags + * + */ +RD_EXPORT +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags); + +/** + * @brief Flags for rd_kafka_destroy_flags() + */ + +/*! + * Don't call consumer_close() to leave group and commit final offsets. + * + * This also disables consumer callbacks to be called from rd_kafka_destroy*(), + * such as rebalance_cb. + * + * The consumer group handler is still closed internally, but from an + * application perspective none of the functionality from consumer_close() + * is performed. + */ +#define RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE 0x8 + + + +/** + * @brief Returns Kafka handle name. + */ +RD_EXPORT +const char *rd_kafka_name(const rd_kafka_t *rk); + + +/** + * @brief Returns Kafka handle type. + */ +RD_EXPORT +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk); + + +/** + * @brief Returns this client's broker-assigned group member id. + * + * @remark This currently requires the high-level KafkaConsumer + * + * @returns An allocated string containing the current broker-assigned group + * member id, or NULL if not available. + * The application must free the string with \p free() or + * rd_kafka_mem_free() + */ +RD_EXPORT +char *rd_kafka_memberid(const rd_kafka_t *rk); + + + +/** + * @brief Returns the ClusterId as reported in broker metadata. + * + * @param rk Client instance. + * @param timeout_ms If there is no cached value from metadata retrieval + * then this specifies the maximum amount of time + * (in milliseconds) the call will block waiting + * for metadata to be retrieved. + * Use 0 for non-blocking calls. + + * @remark Requires broker version >=0.10.0 and api.version.request=true. + * + * @remark The application must free the returned pointer + * using rd_kafka_mem_free(). + * + * @returns a newly allocated string containing the ClusterId, or NULL + * if no ClusterId could be retrieved in the allotted timespan. + */ +RD_EXPORT +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms); + + +/** + * @brief Returns the current ControllerId as reported in broker metadata. + * + * @param rk Client instance. + * @param timeout_ms If there is no cached value from metadata retrieval + * then this specifies the maximum amount of time + * (in milliseconds) the call will block waiting + * for metadata to be retrieved. + * Use 0 for non-blocking calls. + + * @remark Requires broker version >=0.10.0 and api.version.request=true. + * + * @returns the controller broker id (>= 0), or -1 if no ControllerId could be + * retrieved in the allotted timespan. + */ +RD_EXPORT +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms); + + +/** + * @brief Creates a new topic handle for topic named \p topic. + * + * \p conf is an optional configuration for the topic created with + * `rd_kafka_topic_conf_new()` that will be used instead of the default + * topic configuration. + * The \p conf object is freed by this function and must not be used or + * destroyed by the application subsequently. + * See `rd_kafka_topic_conf_set()` et.al for more information. + * + * Topic handles are refcounted internally and calling rd_kafka_topic_new() + * again with the same topic name will return the previous topic handle + * without updating the original handle's configuration. + * Applications must eventually call rd_kafka_topic_destroy() for each + * succesfull call to rd_kafka_topic_new() to clear up resources. + * + * @returns the new topic handle or NULL on error (use rd_kafka_errno2err() + * to convert system \p errno to an rd_kafka_resp_err_t error code. + * + * @sa rd_kafka_topic_destroy() + */ +RD_EXPORT +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf); + + + +/** + * @brief Loose application's topic handle refcount as previously created + * with `rd_kafka_topic_new()`. + * + * @remark Since topic objects are refcounted (both internally and for the app) + * the topic object might not actually be destroyed by this call, + * but the application must consider the object destroyed. + */ +RD_EXPORT +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt); + + +/** + * @brief Returns the topic name. + */ +RD_EXPORT +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); + + +/** + * @brief Get the \p rkt_opaque pointer that was set in the topic configuration + * with rd_kafka_topic_conf_set_opaque(). + */ +RD_EXPORT +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt); + + +/** + * @brief Unassigned partition. + * + * The unassigned partition is used by the producer API for messages + * that should be partitioned using the configured or default partitioner. + */ +#define RD_KAFKA_PARTITION_UA ((int32_t)-1) + + +/** + * @brief Polls the provided kafka handle for events. + * + * Events will cause application-provided callbacks to be called. + * + * The \p timeout_ms argument specifies the maximum amount of time + * (in milliseconds) that the call will block waiting for events. + * For non-blocking calls, provide 0 as \p timeout_ms. + * To wait indefinitely for an event, provide -1. + * + * @remark An application should make sure to call poll() at regular + * intervals to serve any queued callbacks waiting to be called. + * @remark If your producer doesn't have any callback set (in particular + * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) + * you might choose not to call poll(), though this is not + * recommended. + * + * Events: + * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] + * - error callbacks (rd_kafka_conf_set_error_cb()) [all] + * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] + * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] + * - OAUTHBEARER token refresh callbacks + * (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] + * + * @returns the number of events served. + */ +RD_EXPORT +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); + + +/** + * @brief Cancels the current callback dispatcher (rd_kafka_poll(), + * rd_kafka_consume_callback(), etc). + * + * A callback may use this to force an immediate return to the calling + * code (caller of e.g. rd_kafka_poll()) without processing any further + * events. + * + * @remark This function MUST ONLY be called from within a librdkafka callback. + */ +RD_EXPORT +void rd_kafka_yield(rd_kafka_t *rk); + + + +/** + * @brief Pause producing or consumption for the provided list of partitions. + * + * Success or error is returned per-partition \p err in the \p partitions list. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_pause_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); + + + +/** + * @brief Resume producing consumption for the provided list of partitions. + * + * Success or error is returned per-partition \p err in the \p partitions list. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_resume_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); + + + +/** + * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets + * for partition. + * + * Offsets are returned in \p *low and \p *high respectively. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_query_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms); + + +/** + * @brief Get last known low (oldest/beginning) and high (newest/end) offsets + * for partition. + * + * The low offset is updated periodically (if statistics.interval.ms is set) + * while the high offset is updated on each fetched message set from the broker. + * + * If there is no cached offset (either low or high, or both) then + * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset. + * + * Offsets are returned in \p *low and \p *high respectively. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. + * + * @remark Shall only be used with an active consumer instance. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high); + + + +/** + * @brief Look up the offsets for the given partitions by timestamp. + * + * The returned offset for each partition is the earliest offset whose + * timestamp is greater than or equal to the given timestamp in the + * corresponding partition. + * + * The timestamps to query are represented as \c offset in \p offsets + * on input, and \c offset will contain the offset on output. + * + * The function will block for at most \p timeout_ms milliseconds. + * + * @remark Duplicate Topic+Partitions are not supported. + * @remark Per-partition errors may be returned in \c + * rd_kafka_topic_partition_t.err + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note + * that per-partition errors might be set), + * RD_KAFKA_RESP_ERR__TIMED_OUT if not all offsets could be fetched + * within \p timeout_ms, + * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p offsets list is empty, + * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if all partitions are unknown, + * RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE if unable to query leaders + * for the given partitions. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_offsets_for_times(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + int timeout_ms); + + + +/** + * @brief Allocate and zero memory using the same allocator librdkafka uses. + * + * This is typically an abstraction for the calloc(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * allocating pointers that are used by librdkafka. + * + * \p rk can be set to return memory allocated by a specific \c rk instance + * otherwise pass NULL for \p rk. + * + * @remark Memory allocated by rd_kafka_mem_calloc() must be freed using + * rd_kafka_mem_free() + */ +RD_EXPORT +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size); + + + +/** + * @brief Allocate memory using the same allocator librdkafka uses. + * + * This is typically an abstraction for the malloc(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * allocating pointers that are used by librdkafka. + * + * \p rk can be set to return memory allocated by a specific \c rk instance + * otherwise pass NULL for \p rk. + * + * @remark Memory allocated by rd_kafka_mem_malloc() must be freed using + * rd_kafka_mem_free() + */ +RD_EXPORT +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size); + + + +/** + * @brief Free pointer returned by librdkafka + * + * This is typically an abstraction for the free(3) call and makes sure + * the application can use the same memory allocator as librdkafka for + * freeing pointers returned by librdkafka. + * + * In standard setups it is usually not necessary to use this interface + * rather than the free(3) functione. + * + * \p rk must be set for memory returned by APIs that take an \c rk argument, + * for other APIs pass NULL for \p rk. + * + * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs + * that explicitly mention using this function for freeing. + */ +RD_EXPORT +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr); + + +/**@}*/ + + + +/** + * @name Queue API + * @{ + * + * Message queues allows the application to re-route consumed messages + * from multiple topic+partitions into one single queue point. + * This queue point containing messages from a number of topic+partitions + * may then be served by a single rd_kafka_consume*_queue() call, + * rather than one call per topic+partition combination. + */ + + +/** + * @brief Create a new message queue. + * + * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al. + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk); + +/** + * Destroy a queue, purging all of its enqueued messages. + */ +RD_EXPORT +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); + + +/** + * @returns a reference to the main librdkafka event queue. + * This is the queue served by rd_kafka_poll(). + * + * Use rd_kafka_queue_destroy() to loose the reference. + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk); + + + +/** + * @returns a reference to the SASL callback queue, if a SASL mechanism + * with callbacks is configured (currently only OAUTHBEARER), else + * returns NULL. + * + * Use rd_kafka_queue_destroy() to loose the reference. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk); + + +/** + * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka + * background thread. + * + * This serves as an alternative for applications that do not call + * rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means + * of automatically trigger the refresh callbacks, which are needed to + * initiate connections to the brokers in the case a custom OAUTHBEARER + * refresh callback is configured. + * + * @returns NULL on success or an error object on error. + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb() + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk); + + +/** + * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by + * this Kafka client. + * + * This function sets or resets the SASL username and password credentials + * used by this Kafka client. The new credentials will be used the next time + * this client needs to authenticate to a broker. This function + * will not disconnect existing connections that might have been made using + * the old credentials. + * + * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms. + * + * @returns NULL on success or an error object on error. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, + const char *username, + const char *password); + +/** + * @returns a reference to the librdkafka consumer queue. + * This is the queue served by rd_kafka_consumer_poll(). + * + * Use rd_kafka_queue_destroy() to loose the reference. + * + * @remark rd_kafka_queue_destroy() MUST be called on this queue + * prior to calling rd_kafka_consumer_close(). + * @remark Polling the returned queue counts as a consumer poll, and will reset + * the timer for max.poll.interval.ms. If this queue is forwarded to a + * "destq", polling destq also counts as a consumer poll (this works + * for any number of forwards). However, even if this queue is + * unforwarded or forwarded elsewhere, polling destq will continue + * to count as a consumer poll. + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk); + +/** + * @returns a reference to the partition's queue, or NULL if + * partition is invalid. + * + * Use rd_kafka_queue_destroy() to loose the reference. + * + * @remark rd_kafka_queue_destroy() MUST be called on this queue + * + * @remark This function only works on consumers. + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition); + +/** + * @returns a reference to the background thread queue, or NULL if the + * background queue is not enabled. + * + * The background thread queue provides the application with an automatically + * polled queue that triggers the event callback in a background thread, + * this background thread is completely managed by librdkafka. + * + * The background thread queue is automatically created if a generic event + * handler callback is configured with rd_kafka_conf_set_background_event_cb() + * or if rd_kafka_queue_get_background() is called. + * + * The background queue is polled and served by librdkafka and MUST NOT be + * polled, forwarded, or otherwise managed by the application, it may only + * be used as the destination queue passed to queue-enabled APIs, such as + * the Admin API. + * + * Use rd_kafka_queue_destroy() to loose the reference. + * + * @warning The background queue MUST NOT be read from (polled, consumed, etc), + * or forwarded from. + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk); + + +/** + * @brief Forward/re-route queue \p src to \p dst. + * If \p dst is \c NULL the forwarding is removed. + * + * The internal refcounts for both queues are increased. + * + * @remark Regardless of whether \p dst is NULL or not, after calling this + * function, \p src will not forward it's fetch queue to the consumer + * queue. + */ +RD_EXPORT +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst); + +/** + * @brief Forward librdkafka logs (and debug) to the specified queue + * for serving with one of the ..poll() calls. + * + * This allows an application to serve log callbacks (\c log_cb) + * in its thread of choice. + * + * @param rk Client instance. + * @param rkqu Queue to forward logs to. If the value is NULL the logs + * are forwarded to the main queue. + * + * @remark The configuration property \c log.queue MUST also be set to true. + * + * @remark librdkafka maintains its own reference to the provided queue. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error, + * eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu); + + +/** + * @returns the current number of elements in queue. + */ +RD_EXPORT +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu); + + +/** + * @brief Enable IO event triggering for queue. + * + * To ease integration with IO based polling loops this API + * allows an application to create a separate file-descriptor + * that librdkafka will write \p payload (of size \p size) to + * whenever a new element is enqueued on a previously empty queue. + * + * To remove event triggering call with \p fd = -1. + * + * librdkafka will maintain a copy of the \p payload. + * + * @remark IO and callback event triggering are mutually exclusive. + * @remark When using forwarded queues the IO event must only be enabled + * on the final forwarded-to (destination) queue. + * @remark The file-descriptor/socket must be set to non-blocking. + */ +RD_EXPORT +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, + int fd, + const void *payload, + size_t size); + +/** + * @brief Enable callback event triggering for queue. + * + * The callback will be called from an internal librdkafka thread + * when a new element is enqueued on a previously empty queue. + * + * To remove event triggering call with \p event_cb = NULL. + * + * The \p qev_opaque is passed to the callback's \p qev_opaque argument. + * + * @remark IO and callback event triggering are mutually exclusive. + * @remark Since the callback may be triggered from internal librdkafka + * threads, the application must not perform any pro-longed work in + * the callback, or call any librdkafka APIs (for the same rd_kafka_t + * handle). + */ +RD_EXPORT +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, + void (*event_cb)(rd_kafka_t *rk, + void *qev_opaque), + void *qev_opaque); + + +/** + * @brief Cancels the current rd_kafka_queue_poll() on \p rkqu. + * + * An application may use this from another thread to force + * an immediate return to the calling code (caller of rd_kafka_queue_poll()). + * Must not be used from signal handlers since that may cause deadlocks. + */ +RD_EXPORT +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu); + + +/**@}*/ + +/** + * + * @name Simple Consumer API (legacy) + * @{ + * + */ + + +#define RD_KAFKA_OFFSET_BEGINNING \ + -2 /**< Start consuming from beginning of \ + * kafka partition queue: oldest msg */ +#define RD_KAFKA_OFFSET_END \ + -1 /**< Start consuming from end of kafka \ + * partition queue: next msg */ +#define RD_KAFKA_OFFSET_STORED \ + -1000 /**< Start consuming from offset retrieved \ + * from offset store */ +#define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */ + + +/** @cond NO_DOC */ +#define RD_KAFKA_OFFSET_TAIL_BASE -2000 /* internal: do not use */ +/** @endcond */ + +/** + * @brief Start consuming \p CNT messages from topic's current end offset. + * + * That is, if current end offset is 12345 and \p CNT is 200, it will start + * consuming from offset \c 12345-200 = \c 12145. */ +#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) + +/** + * @brief Start consuming messages for topic \p rkt and \p partition + * at offset \p offset which may either be an absolute \c (0..N) + * or one of the logical offsets: + * - RD_KAFKA_OFFSET_BEGINNING + * - RD_KAFKA_OFFSET_END + * - RD_KAFKA_OFFSET_STORED + * - RD_KAFKA_OFFSET_TAIL + * + * rdkafka will attempt to keep \c queued.min.messages (config property) + * messages in the local queue by repeatedly fetching batches of messages + * from the broker until the threshold is reached. + * + * The application shall use one of the `rd_kafka_consume*()` functions + * to consume messages from the local queue, each kafka message being + * represented as a `rd_kafka_message_t *` object. + * + * `rd_kafka_consume_start()` must not be called multiple times for the same + * topic and partition without stopping consumption first with + * `rd_kafka_consume_stop()`. + * + * @returns 0 on success or -1 on error in which case errno is set accordingly: + * - EBUSY - Conflicts with an existing or previous subscription + * (RD_KAFKA_RESP_ERR__CONFLICT) + * - EINVAL - Invalid offset, or incomplete configuration (lacking group.id) + * (RD_KAFKA_RESP_ERR__INVALID_ARG) + * - ESRCH - requested \p partition is invalid. + * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + * - ENOENT - topic is unknown in the Kafka cluster. + * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + * + * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` + */ +RD_EXPORT +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset); + +/** + * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to + * the provided queue \p rkqu (which must have been previously allocated + * with `rd_kafka_queue_new()`. + * + * The application must use one of the `rd_kafka_consume_*_queue()` functions + * to receive fetched messages. + * + * `rd_kafka_consume_start_queue()` must not be called multiple times for the + * same topic and partition without stopping consumption first with + * `rd_kafka_consume_stop()`. + * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not + * be combined for the same topic and partition. + */ +RD_EXPORT +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + rd_kafka_queue_t *rkqu); + +/** + * @brief Stop consuming messages for topic \p rkt and \p partition, purging + * all messages currently in the local queue. + * + * NOTE: To enforce synchronisation this call will block until the internal + * fetcher has terminated and offsets are committed to configured + * storage method. + * + * The application needs to be stop all consumers before calling + * `rd_kafka_destroy()` on the main object handle. + * + * @returns 0 on success or -1 on error (see `errno`). + */ +RD_EXPORT +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); + + + +/** + * @brief Seek consumer for topic+partition to \p offset which is either an + * absolute or logical offset. + * + * If \p timeout_ms is specified (not 0) the seek call will wait this long + * for the consumer to update its fetcher state for the given partition with + * the new offset. This guarantees that no previously fetched messages for the + * old offset (or fetch position) will be passed to the application. + * + * If the timeout is reached the internal state will be unknown to the caller + * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * + * If \p timeout_ms is 0 it will initiate the seek but return + * immediately without any error reporting (e.g., async). + * + * This call will purge all pre-fetched messages for the given partition, which + * may be up to \c queued.max.message.kbytes in size. Repeated use of seek + * may thus lead to increased network usage as messages are re-fetched from + * the broker. + * + * @remark Seek must only be performed for already assigned/consumed partitions, + * use rd_kafka_assign() (et.al) to set the initial starting offset + * for a new assignmenmt. + * + * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code. + * + * @deprecated Use rd_kafka_seek_partitions(). + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + int timeout_ms); + + + +/** + * @brief Seek consumer for partitions in \p partitions to the per-partition + * offset in the \c .offset field of \p partitions. + * + * The offset may be either absolute (>= 0) or a logical offset. + * + * If \p timeout_ms is specified (not 0) the seek call will wait this long + * for the consumer to update its fetcher state for the given partition with + * the new offset. This guarantees that no previously fetched messages for the + * old offset (or fetch position) will be passed to the application. + * + * If the timeout is reached the internal state will be unknown to the caller + * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * + * If \p timeout_ms is 0 it will initiate the seek but return + * immediately without any error reporting (e.g., async). + * + * This call will purge all pre-fetched messages for the given partition, which + * may be up to \c queued.max.message.kbytes in size. Repeated use of seek + * may thus lead to increased network usage as messages are re-fetched from + * the broker. + * + * Individual partition errors are reported in the per-partition \c .err field + * of \p partitions. + * + * @remark Seek must only be performed for already assigned/consumed partitions, + * use rd_kafka_assign() (et.al) to set the initial starting offset + * for a new assignmenmt. + * + * @returns NULL on success or an error object on failure. + */ +RD_EXPORT rd_kafka_error_t * +rd_kafka_seek_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); + + +/** + * @brief Consume a single message from topic \p rkt and \p partition + * + * \p timeout_ms is maximum amount of time to wait for a message to be received. + * Consumer must have been previously started with `rd_kafka_consume_start()`. + * + * @returns a message object on success or \c NULL on error. + * The message object must be destroyed with `rd_kafka_message_destroy()` + * when the application is done with it. + * + * Errors (when returning NULL): + * - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched. + * - ENOENT - \p rkt + \p partition is unknown. + * (no prior `rd_kafka_consume_start()` call) + * + * NOTE: The returned message's \c ..->err must be checked for errors. + * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the + * end of the partition has been reached, which should typically not be + * considered an error. The application should handle this case + * (e.g., ignore). + * + * @remark on_consume() interceptors may be called from this function prior to + * passing message to application. + */ +RD_EXPORT +rd_kafka_message_t * +rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms); + + + +/** + * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition + * putting a pointer to each message in the application provided + * array \p rkmessages (of size \p rkmessages_size entries). + * + * `rd_kafka_consume_batch()` provides higher throughput performance + * than `rd_kafka_consume()`. + * + * \p timeout_ms is the maximum amount of time to wait for all of + * \p rkmessages_size messages to be put into \p rkmessages. + * If no messages were available within the timeout period this function + * returns 0 and \p rkmessages remains untouched. + * This differs somewhat from `rd_kafka_consume()`. + * + * The message objects must be destroyed with `rd_kafka_message_destroy()` + * when the application is done with it. + * + * @returns the number of rkmessages added in \p rkmessages, + * or -1 on error (same error codes as for `rd_kafka_consume()`. + * + * @sa rd_kafka_consume() + * + * @remark on_consume() interceptors may be called from this function prior to + * passing message to application. + */ +RD_EXPORT +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); + + + +/** + * @brief Consumes messages from topic \p rkt and \p partition, calling + * the provided callback for each consumed messsage. + * + * `rd_kafka_consume_callback()` provides higher throughput performance + * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`. + * + * \p timeout_ms is the maximum amount of time to wait for one or more messages + * to arrive. + * + * The provided \p consume_cb function is called for each message, + * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the + * provided \p rkmessage. + * + * The \p commit_opaque argument is passed to the \p consume_cb + * as \p commit_opaque. + * + * @returns the number of messages processed or -1 on error. + * + * @sa rd_kafka_consume() + * + * @remark on_consume() interceptors may be called from this function prior to + * passing message to application. + * + * @remark This function will return early if a transaction control message is + * received, these messages are not exposed to the application but + * still enqueued on the consumer queue to make sure their + * offsets are stored. + * + * @deprecated This API is deprecated and subject for future removal. + * There is no new callback-based consume interface, use the + * poll/queue based alternatives. + */ +RD_EXPORT +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, + int32_t partition, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, + void *commit_opaque), + void *commit_opaque); + + +/**@}*/ + +/** + * @name Simple Consumer API (legacy): Queue consumers + * @{ + * + * The following `..._queue()` functions are analogue to the functions above + * but reads messages from the provided queue \p rkqu instead. + * \p rkqu must have been previously created with `rd_kafka_queue_new()` + * and the topic consumer must have been started with + * `rd_kafka_consume_start_queue()` utilising the the same queue. + */ + +/** + * @brief Consume from queue + * + * @sa rd_kafka_consume() + */ +RD_EXPORT +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, + int timeout_ms); + +/** + * @brief Consume batch of messages from queue + * + * @sa rd_kafka_consume_batch() + */ +RD_EXPORT +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); + +/** + * @brief Consume multiple messages from queue with callback + * + * @sa rd_kafka_consume_callback() + * + * @deprecated This API is deprecated and subject for future removal. + * There is no new callback-based consume interface, use the + * poll/queue based alternatives. + */ +RD_EXPORT +int rd_kafka_consume_callback_queue( + rd_kafka_queue_t *rkqu, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), + void *commit_opaque); + + +/**@}*/ + + + +/** + * @name Simple Consumer API (legacy): Topic+partition offset store. + * @{ + * + * If \c auto.commit.enable is true the offset is stored automatically prior to + * returning of the message(s) in each of the rd_kafka_consume*() functions + * above. + */ + + +/** + * @brief Store offset \p offset + 1 for topic \p rkt partition \p partition. + * + * The \c offset + 1 will be committed (written) to broker (or file) according + * to \c `auto.commit.interval.ms` or manual offset-less commit() + * + * @deprecated This API lacks support for partition leader epochs, which makes + * it at risk for unclean leader election log truncation issues. + * Use rd_kafka_offsets_store() and rd_kafka_offset_store_message() + * instead. + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. + */ +RD_EXPORT +rd_kafka_resp_err_t +rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); + + +/** + * @brief Store offsets for next auto-commit for one or more partitions. + * + * The offset will be committed (written) to the offset store according + * to \c `auto.commit.interval.ms` or manual offset-less commit(). + * + * Per-partition success/error status propagated through each partition's + * \c .err for all return values (even NO_ERROR) except INVALID_ARG. + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark The \c .offset field is stored as is, it will NOT be + 1. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. + * + * @remark The leader epoch, if set, will be used to fence outdated partition + * leaders. See rd_kafka_topic_partition_set_leader_epoch(). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or + * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store + * is true, or + * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE + * if none of the offsets could be stored. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_offsets_store(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets); + + +/** + * @brief Store offset +1 for the consumed message. + * + * The message offset + 1 will be committed to broker according + * to \c `auto.commit.interval.ms` or manual offset-less commit() + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. + * + * @returns NULL on success or an error object on failure. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage); + +/**@}*/ + + + +/** + * @name KafkaConsumer (C) + * @brief High-level KafkaConsumer C API + * @{ + * + * + * + */ + +/** + * @brief Subscribe to topic set using balanced consumer groups. + * + * Wildcard (regex) topics are supported: + * any topic name in the \p topics list that is prefixed with \c \"^\" will + * be regex-matched to the full list of topics in the cluster and matching + * topics will be added to the subscription list. + * + * The full topic list is retrieved every \c topic.metadata.refresh.interval.ms + * to pick up new or delete topics that match the subscription. + * If there is any change to the matched topics the consumer will + * immediately rejoin the group with the updated set of subscribed topics. + * + * Regex and full topic names can be mixed in \p topics. + * + * @remark Only the \c .topic field is used in the supplied \p topics list, + * all other fields are ignored. + * + * @remark subscribe() is an asynchronous method which returns immediately: + * background threads will (re)join the group, wait for group rebalance, + * issue any registered rebalance_cb, assign() the assigned partitions, + * and then start fetching messages. This cycle may take up to + * \c session.timeout.ms * 2 or more to complete. + * + * @remark After this call returns a consumer error will be returned by + * rd_kafka_consumer_poll (et.al) for each unavailable topic in the + * \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART + * for non-existent topics, and + * RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. + * The consumer error will be raised through rd_kafka_consumer_poll() + * (et.al.) with the \c rd_kafka_message_t.err field set to one of the + * error codes mentioned above. + * The subscribe function itself is asynchronous and will not return + * an error on unavailable topics. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or + * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid + * topics or regexes or duplicate entries, + * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_subscribe(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *topics); + + +/** + * @brief Unsubscribe from the current subscription set. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk); + + +/** + * @brief Returns the current topic subscription + * + * @returns An error code on failure, otherwise \p topic is updated + * to point to a newly allocated topic list (possibly empty). + * + * @remark The application is responsible for calling + * rd_kafka_topic_partition_list_destroy on the returned list. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics); + + + +/** + * @brief Poll the consumer for messages or events. + * + * Will block for at most \p timeout_ms milliseconds. + * + * @remark An application should make sure to call consumer_poll() at regular + * intervals, even if no messages are expected, to serve any + * queued callbacks waiting to be called. This is especially + * important when a rebalance_cb has been registered as it needs + * to be called and handled properly to synchronize internal + * consumer state. + * + * @returns A message object which is a proper message if \p ->err is + * RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other + * value. + * + * @remark on_consume() interceptors may be called from this function prior to + * passing message to application. + * + * @remark When subscribing to topics the application must call poll at + * least every \c max.poll.interval.ms to remain a member of the + * consumer group. + * + * Noteworthy errors returned in \c ->err: + * - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED - application failed to call + * poll within `max.poll.interval.ms`. + * + * @sa rd_kafka_message_t + */ +RD_EXPORT +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms); + +/** + * @brief Close the consumer. + * + * This call will block until the consumer has revoked its assignment, + * calling the \c rebalance_cb if it is configured, committed offsets + * to broker, and left the consumer group (if applicable). + * The maximum blocking time is roughly limited to session.timeout.ms. + * + * @returns An error code indicating if the consumer close was succesful + * or not. + * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised + * a fatal error. + * + * @remark The application still needs to call rd_kafka_destroy() after + * this call finishes to clean up the underlying handle resources. + * + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk); + + +/** + * @brief Asynchronously close the consumer. + * + * Performs the same actions as rd_kafka_consumer_close() but in a + * background thread. + * + * Rebalance events/callbacks (etc) will be forwarded to the + * application-provided \p rkqu. The application must poll/serve this queue + * until rd_kafka_consumer_closed() returns true. + * + * @remark Depending on consumer group join state there may or may not be + * rebalance events emitted on \p rkqu. + * + * @returns an error object if the consumer close failed, else NULL. + * + * @sa rd_kafka_consumer_closed() + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu); + + +/** + * @returns 1 if the consumer is closed, else 0. + * + * Should be used in conjunction with rd_kafka_consumer_close_queue() to know + * when the consumer has been closed. + * + * @sa rd_kafka_consumer_close_queue() + */ +RD_EXPORT +int rd_kafka_consumer_closed(rd_kafka_t *rk); + + +/** + * @brief Incrementally add \p partitions to the current assignment. + * + * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, + * this method should be used in a rebalance callback to adjust the current + * assignment appropriately in the case where the rebalance type is + * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the + * partition list passed to the callback (or a copy of it), even if the + * list is empty. \p partitions must not be NULL. This method may also be + * used outside the context of a rebalance callback. + * + * @returns NULL on success, or an error object if the operation was + * unsuccessful. + * + * @remark The returned error object (if not NULL) must be destroyed with + * rd_kafka_error_destroy(). + */ +RD_EXPORT rd_kafka_error_t * +rd_kafka_incremental_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); + + +/** + * @brief Incrementally remove \p partitions from the current assignment. + * + * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, + * this method should be used in a rebalance callback to adjust the current + * assignment appropriately in the case where the rebalance type is + * RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the + * partition list passed to the callback (or a copy of it), even if the + * list is empty. \p partitions must not be NULL. This method may also be + * used outside the context of a rebalance callback. + * + * @returns NULL on success, or an error object if the operation was + * unsuccessful. + * + * @remark The returned error object (if not NULL) must be destroyed with + * rd_kafka_error_destroy(). + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_incremental_unassign( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); + + +/** + * @brief The rebalance protocol currently in use. This will be + * "NONE" if the consumer has not (yet) joined a group, else it will + * match the rebalance protocol ("EAGER", "COOPERATIVE") of the + * configured and selected assignor(s). All configured + * assignors must have the same protocol type, meaning + * online migration of a consumer group from using one + * protocol to another (in particular upgading from EAGER + * to COOPERATIVE) without a restart is not currently + * supported. + * + * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success. + */ +RD_EXPORT +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk); + + +/** + * @brief Atomic assignment of partitions to consume. + * + * The new \p partitions will replace the existing assignment. + * + * A zero-length \p partitions will treat the partitions as a valid, + * albeit empty assignment, and maintain internal state, while a \c NULL + * value for \p partitions will reset and clear the internal state. + * + * When used from a rebalance callback, the application should pass the + * partition list passed to the callback (or a copy of it) even if the list + * is empty (i.e. should not pass NULL in this case) so as to maintain + * internal join state. This is not strictly required - the application + * may adjust the assignment provided by the group. However, this is rarely + * useful in practice. + * + * @returns An error code indicating if the new assignment was applied or not. + * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised + * a fatal error. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Returns the current partition assignment as set by rd_kafka_assign() + * or rd_kafka_incremental_assign(). + * + * @returns An error code on failure, otherwise \p partitions is updated + * to point to a newly allocated partition list (possibly empty). + * + * @remark The application is responsible for calling + * rd_kafka_topic_partition_list_destroy on the returned list. + * + * @remark This assignment represents the partitions assigned through the + * assign functions and not the partitions assigned to this consumer + * instance by the consumer group leader. + * They are usually the same following a rebalance but not necessarily + * since an application is free to assign any partitions. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_assignment(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **partitions); + + +/** + * @brief Check whether the consumer considers the current assignment to + * have been lost involuntarily. This method is only applicable for + * use with a high level subscribing consumer. Assignments are revoked + * immediately when determined to have been lost, so this method + * is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event + * or from within a rebalance_cb. Partitions that have been lost may + * already be owned by other members in the group and therefore + * commiting offsets, for example, may fail. + * + * @remark Calling rd_kafka_assign(), rd_kafka_incremental_assign() or + * rd_kafka_incremental_unassign() resets this flag. + * + * @returns Returns 1 if the current partition assignment is considered + * lost, 0 otherwise. + */ +RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk); + + +/** + * @brief Commit offsets on broker for the provided list of partitions. + * + * \p offsets should contain \c topic, \c partition, \c offset and possibly + * \c metadata. The \c offset should be the offset where consumption will + * resume, i.e., the last processed offset + 1. + * If \p offsets is NULL the current partition assignment will be used instead. + * + * If \p async is false this operation will block until the broker offset commit + * is done, returning the resulting success or error code. + * + * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been + * configured the callback will be enqueued for a future call to + * rd_kafka_poll(), rd_kafka_consumer_poll() or similar. + * + * @returns An error code indiciating if the commit was successful, + * or successfully scheduled if asynchronous, or failed. + * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised + * a fatal error. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + int async); + + +/** + * @brief Commit message's offset on broker for the message's partition. + * The committed offset is the message's offset + 1. + * + * @sa rd_kafka_commit + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_commit_message(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + int async); + + +/** + * @brief Commit offsets on broker for the provided list of partitions. + * + * See rd_kafka_commit for \p offsets semantics. + * + * The result of the offset commit will be posted on the provided \p rkqu queue. + * + * If the application uses one of the poll APIs (rd_kafka_poll(), + * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue + * the \p cb callback is required. + * + * The \p commit_opaque argument is passed to the callback as \p commit_opaque, + * or if using the event API the callback is ignored and the offset commit + * result will be returned as an RD_KAFKA_EVENT_COMMIT event and the + * \p commit_opaque value will be available with rd_kafka_event_opaque(). + * + * If \p rkqu is NULL a temporary queue will be created and the callback will + * be served by this call. + * + * @sa rd_kafka_commit() + * @sa rd_kafka_conf_set_offset_commit_cb() + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_commit_queue(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_queue_t *rkqu, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *commit_opaque), + void *commit_opaque); + + +/** + * @brief Retrieve committed offsets for topics+partitions. + * + * The \p offset field of each requested partition will either be set to + * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored + * offset for that partition. + * + * Committed offsets will be returned according to the `isolation.level` + * configuration property, if set to `read_committed` (default) then only + * stable offsets for fully committed transactions will be returned, while + * `read_uncommitted` may return offsets for not yet committed transactions. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the + * \p offset or \p err field of each \p partitions' element is filled + * in with the stored offset, or a partition specific error. + * Else returns an error code. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_committed(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); + + + +/** + * @brief Retrieve current positions (offsets) for topics+partitions. + * + * The \p offset field of each requested partition will be set to the offset + * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there + * was no previous message. + * + * @remark In this context the last consumed message is the offset consumed + * by the current librdkafka instance and, in case of rebalancing, not + * necessarily the last message fetched from the partition. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the + * \p offset or \p err field of each \p partitions' element is filled + * in with the stored offset, or a partition specific error. + * Else returns an error code. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions); + + + +/** + * @returns the current consumer group metadata associated with this consumer, + * or NULL if \p rk is not a consumer configured with a \c group.id. + * This metadata object should be passed to the transactional + * producer's rd_kafka_send_offsets_to_transaction() API. + * + * @remark The returned pointer must be freed by the application using + * rd_kafka_consumer_group_metadata_destroy(). + * + * @sa rd_kafka_send_offsets_to_transaction() + */ +RD_EXPORT rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata(rd_kafka_t *rk); + + +/** + * @brief Create a new consumer group metadata object. + * This is typically only used for writing tests. + * + * @param group_id The group id. + * + * @remark The returned pointer must be freed by the application using + * rd_kafka_consumer_group_metadata_destroy(). + */ +RD_EXPORT rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata_new(const char *group_id); + + +/** + * @brief Create a new consumer group metadata object. + * This is typically only used for writing tests. + * + * @param group_id The group id. + * @param generation_id The group generation id. + * @param member_id The group member id. + * @param group_instance_id The group instance id (may be NULL). + * + * @remark The returned pointer must be freed by the application using + * rd_kafka_consumer_group_metadata_destroy(). + */ +RD_EXPORT rd_kafka_consumer_group_metadata_t * +rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id); + + +/** + * @brief Frees the consumer group metadata object as returned by + * rd_kafka_consumer_group_metadata(). + */ +RD_EXPORT void +rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *); + + +/** + * @brief Serialize the consumer group metadata to a binary format. + * This is mainly for client binding use and not for application use. + * + * @remark The serialized metadata format is private and is not compatible + * across different versions or even builds of librdkafka. + * It should only be used in the same process runtime and must only + * be passed to rd_kafka_consumer_group_metadata_read(). + * + * @param cgmd Metadata to be serialized. + * @param bufferp On success this pointer will be updated to point to na + * allocated buffer containing the serialized metadata. + * The buffer must be freed with rd_kafka_mem_free(). + * @param sizep The pointed to size will be updated with the size of + * the serialized buffer. + * + * @returns NULL on success or an error object on failure. + * + * @sa rd_kafka_consumer_group_metadata_read() + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_write( + const rd_kafka_consumer_group_metadata_t *cgmd, + void **bufferp, + size_t *sizep); + +/** + * @brief Reads serialized consumer group metadata and returns a + * consumer group metadata object. + * This is mainly for client binding use and not for application use. + * + * @remark The serialized metadata format is private and is not compatible + * across different versions or even builds of librdkafka. + * It should only be used in the same process runtime and must only + * be passed to rd_kafka_consumer_group_metadata_read(). + * + * @param cgmdp On success this pointer will be updated to point to a new + * consumer group metadata object which must be freed with + * rd_kafka_consumer_group_metadata_destroy(). + * @param buffer Pointer to the serialized data. + * @param size Size of the serialized data. + * + * @returns NULL on success or an error object on failure. + * + * @sa rd_kafka_consumer_group_metadata_write() + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( + rd_kafka_consumer_group_metadata_t **cgmdp, + const void *buffer, + size_t size); + +/**@}*/ + + + +/** + * @name Producer API + * @{ + * + * + */ + + +/** + * @brief Producer message flags + */ +#define RD_KAFKA_MSG_F_FREE \ + 0x1 /**< Delegate freeing of payload to rdkafka. \ + */ +#define RD_KAFKA_MSG_F_COPY \ + 0x2 /**< rdkafka will make a copy of the payload. \ + */ +#define RD_KAFKA_MSG_F_BLOCK \ + 0x4 /**< Block produce*() on message queue full. \ + * WARNING: If a delivery report callback \ + * is used, the application MUST \ + * call rd_kafka_poll() (or equiv.) \ + * to make sure delivered messages \ + * are drained from the internal \ + * delivery report queue. \ + * Failure to do so will result \ + * in indefinitely blocking on \ + * the produce() call when the \ + * message queue is full. */ +#define RD_KAFKA_MSG_F_PARTITION \ + 0x8 /**< produce_batch() will honor \ + * per-message partition. */ + + + +/** + * @brief Produce and send a single message to broker. + * + * \p rkt is the target topic which must have been previously created with + * `rd_kafka_topic_new()`. + * + * `rd_kafka_produce()` is an asynchronous non-blocking API. + * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called + * once the delivery status (success or failure) is known. The delivery report + * is triggered by the application calling `rd_kafka_poll()` (at regular + * intervals) or `rd_kafka_flush()` (at termination). + * + * Since producing is asynchronous, you should call `rd_kafka_flush()` before + * you destroy the producer. Otherwise, any outstanding messages will be + * silently discarded. + * + * When temporary errors occur, librdkafka automatically retries to produce the + * messages. Retries are triggered after retry.backoff.ms and when the + * leader broker for the given partition is available. Otherwise, librdkafka + * falls back to polling the topic metadata to monitor when a new leader is + * elected (see the topic.metadata.refresh.fast.interval.ms and + * topic.metadata.refresh.interval.ms configurations) and then performs a + * retry. A delivery error will occur if the message could not be produced + * within message.timeout.ms. + * + * See the "Message reliability" chapter in INTRODUCTION.md for more + * information. + * + * \p partition is the target partition, either: + * - RD_KAFKA_PARTITION_UA (unassigned) for + * automatic partitioning using the topic's partitioner function, or + * - a fixed partition (0..N) + * + * \p msgflags is zero or more of the following flags OR:ed together: + * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if + * \p queue.buffering.max.messages or + * \p queue.buffering.max.kbytes are exceeded. + * Messages are considered in-queue from the point + * they are accepted by produce() until their corresponding delivery report + * callback/event returns. It is thus a requirement to call rd_kafka_poll() (or + * equiv.) from a separate thread when F_BLOCK is used. See WARNING on \c + * RD_KAFKA_MSG_F_BLOCK above. + * + * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done + * with it. + * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the + * \p payload pointer will not be used by rdkafka + * after the call returns. + * RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message + * partition, either set manually or by the + * configured partitioner. + * + * .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are + * set, the caller must ensure that the memory backing \p payload remains + * valid and is not modified or reused until the delivery callback is + * invoked. Other buffers passed to `rd_kafka_produce()` don't have this + * restriction on reuse, i.e. the memory backing the key or the topic name + * may be reused as soon as `rd_kafka_produce()` returns. + * + * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then + * the memory associated with the payload is still the caller's + * responsibility. + * + * \p payload is the message payload of size \p len bytes. + * + * \p key is an optional message key of size \p keylen bytes, if non-NULL it + * will be passed to the topic partitioner as well as be sent with the + * message to the broker and passed on to the consumer. + * + * \p msg_opaque is an optional application-provided per-message opaque + * pointer that will provided in the message's delivery report callback + * (\c dr_msg_cb or \c dr_cb) and the \c rd_kafka_message_t \c _private field. + * + * @remark on_send() and on_acknowledgement() interceptors may be called + * from this function. on_acknowledgement() will only be called if the + * message fails partitioning. + * + * @remark If the producer is transactional (\c transactional.id is configured) + * producing is only allowed during an on-going transaction, namely + * after rd_kafka_begin_transaction() has been called. + * + * @returns 0 on success or -1 on error in which case errno is set accordingly: + * - ENOBUFS - maximum number of outstanding messages has been reached: + * "queue.buffering.max.messages" + * (RD_KAFKA_RESP_ERR__QUEUE_FULL) + * - EMSGSIZE - message is larger than configured max size: + * "messages.max.bytes". + * (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) + * - ESRCH - requested \p partition is unknown in the Kafka cluster. + * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + * - ENOENT - topic is unknown in the Kafka cluster. + * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + * - ECANCELED - fatal error has been raised on producer, see + * rd_kafka_fatal_error(), + * (RD_KAFKA_RESP_ERR__FATAL). + * - ENOEXEC - transactional state forbids producing + * (RD_KAFKA_RESP_ERR__STATE) + * + * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. + */ +RD_EXPORT +int rd_kafka_produce(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque); + + +/** + * @brief Produce and send a single message to broker. + * + * The message is defined by a va-arg list using \c rd_kafka_vtype_t + * tag tuples which must be terminated with a single \c RD_KAFKA_V_END. + * + * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as + * described in rd_kafka_produce(). + * \c RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and + * _V_HEADERS are mixed. + * + * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...); + + +/** + * @brief Produce and send a single message to broker. + * + * The message is defined by an array of \c rd_kafka_vu_t of + * count \p cnt. + * + * @returns an error object on failure or NULL on success. + * See rd_kafka_producev() for specific error codes. + * + * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END + */ +RD_EXPORT +rd_kafka_error_t * +rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt); + + +/** + * @brief Produce multiple messages. + * + * If partition is RD_KAFKA_PARTITION_UA the configured partitioner will + * be run for each message (slower), otherwise the messages will be enqueued + * to the specified partition directly (faster). + * + * The messages are provided in the array \p rkmessages of count \p message_cnt + * elements. + * The \p partition and \p msgflags are used for all provided messages. + * + * Honoured \p rkmessages[] fields are: + * - payload,len Message payload and length + * - key,key_len Optional message key + * - _private Message opaque pointer (msg_opaque) + * - err Will be set according to success or failure, see + * rd_kafka_produce() for possible error codes. + * Application only needs to check for errors if + * return value != \p message_cnt. + * + * @remark If \c RD_KAFKA_MSG_F_PARTITION is set in \p msgflags, the + * \c .partition field of the \p rkmessages is used instead of + * \p partition. + * + * @returns the number of messages succesfully enqueued for producing. + * + * @remark This interface does NOT support setting message headers on + * the provided \p rkmessages. + */ +RD_EXPORT +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + rd_kafka_message_t *rkmessages, + int message_cnt); + + + +/** + * @brief Wait until all outstanding produce requests, et.al, are completed. + * This should typically be done prior to destroying a producer instance + * to make sure all queued and in-flight produce requests are completed + * before terminating. + * + * @remark This function will call rd_kafka_poll() and thus trigger callbacks. + * + * @remark The \c linger.ms time will be ignored for the duration of the call, + * queued messages will be sent to the broker as soon as possible. + * + * @remark If RD_KAFKA_EVENT_DR has been enabled + * (through rd_kafka_conf_set_events()) this function will not call + * rd_kafka_poll() but instead wait for the librdkafka-handled + * message count to reach zero. This requires the application to + * serve the event queue in a separate thread. + * In this mode only messages are counted, not other types of + * queued events. + * + * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all + * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR + * + * @sa rd_kafka_outq_len() + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms); + + + +/** + * @brief Purge messages currently handled by the producer instance. + * + * @param rk Client instance. + * @param purge_flags Tells which messages to purge and how. + * + * The application will need to call rd_kafka_poll() or rd_kafka_flush() + * afterwards to serve the delivery report callbacks of the purged messages. + * + * Messages purged from internal queues fail with the delivery report + * error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that + * are in-flight to or from the broker will fail with the error code set to + * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT. + * + * @warning Purging messages that are in-flight to or from the broker + * will ignore any subsequent acknowledgement for these messages + * received from the broker, effectively making it impossible + * for the application to know if the messages were successfully + * produced or not. This may result in duplicate messages if the + * application retries these messages at a later time. + * + * @remark This call may block for a short time while background thread + * queues are purged. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, + * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p purge flags are invalid + * or unknown, + * RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer + * client instance. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags); + + +/** + * @brief Flags for rd_kafka_purge() + */ + +/*! + * Purge messages in internal queues. + */ +#define RD_KAFKA_PURGE_F_QUEUE 0x1 + +/*! + * Purge messages in-flight to or from the broker. + * Purging these messages will void any future acknowledgements from the + * broker, making it impossible for the application to know if these + * messages were successfully delivered or not. + * Retrying these messages may lead to duplicates. + */ +#define RD_KAFKA_PURGE_F_INFLIGHT 0x2 + + +/*! + * Don't wait for background thread queue purging to finish. + */ +#define RD_KAFKA_PURGE_F_NON_BLOCKING 0x4 + + +/**@}*/ + + +/** + * @name Metadata API + * @{ + * + * + */ + + +/** + * @brief Broker information + */ +typedef struct rd_kafka_metadata_broker { + int32_t id; /**< Broker Id */ + char *host; /**< Broker hostname */ + int port; /**< Broker listening port */ +} rd_kafka_metadata_broker_t; + +/** + * @brief Partition information + */ +typedef struct rd_kafka_metadata_partition { + int32_t id; /**< Partition Id */ + rd_kafka_resp_err_t err; /**< Partition error reported by broker */ + int32_t leader; /**< Leader broker */ + int replica_cnt; /**< Number of brokers in \p replicas */ + int32_t *replicas; /**< Replica brokers */ + int isr_cnt; /**< Number of ISR brokers in \p isrs */ + int32_t *isrs; /**< In-Sync-Replica brokers */ +} rd_kafka_metadata_partition_t; + +/** + * @brief Topic information + */ +typedef struct rd_kafka_metadata_topic { + char *topic; /**< Topic name */ + int partition_cnt; /**< Number of partitions in \p partitions*/ + struct rd_kafka_metadata_partition *partitions; /**< Partitions */ + rd_kafka_resp_err_t err; /**< Topic error reported by broker */ +} rd_kafka_metadata_topic_t; + + +/** + * @brief Metadata container + */ +typedef struct rd_kafka_metadata { + int broker_cnt; /**< Number of brokers in \p brokers */ + struct rd_kafka_metadata_broker *brokers; /**< Brokers */ + + int topic_cnt; /**< Number of topics in \p topics */ + struct rd_kafka_metadata_topic *topics; /**< Topics */ + + int32_t orig_broker_id; /**< Broker originating this metadata */ + char *orig_broker_name; /**< Name of originating broker */ +} rd_kafka_metadata_t; + +/** + * @brief Request Metadata from broker. + * + * Parameters: + * - \p all_topics if non-zero: request info about all topics in cluster, + * if zero: only request info about locally known topics. + * - \p only_rkt only request info about this topic + * - \p metadatap pointer to hold metadata result. + * The \p *metadatap pointer must be released + * with rd_kafka_metadata_destroy(). + * - \p timeout_ms maximum response time before failing. + * + * @remark Consumer: If \p all_topics is non-zero the Metadata response + * information may trigger a re-join if any subscribed topics + * have changed partition count or existence state. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) + * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or + * other error code on error. + */ +RD_EXPORT +rd_kafka_resp_err_t +rd_kafka_metadata(rd_kafka_t *rk, + int all_topics, + rd_kafka_topic_t *only_rkt, + const struct rd_kafka_metadata **metadatap, + int timeout_ms); + +/** + * @brief Release metadata memory. + */ +RD_EXPORT +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); + +/** + * @brief Node (broker) information. + */ +typedef struct rd_kafka_Node_s rd_kafka_Node_t; + +/** + * @brief Get the id of \p node. + * + * @param node The Node instance. + * + * @return The node id. + */ +RD_EXPORT +int rd_kafka_Node_id(const rd_kafka_Node_t *node); + +/** + * @brief Get the host of \p node. + * + * @param node The Node instance. + * + * @return The node host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p node object. + */ +RD_EXPORT +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node); + +/** + * @brief Get the port of \p node. + * + * @param node The Node instance. + * + * @return The node port. + */ +RD_EXPORT +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node); + +/** + * @brief Get the rack of \p node. + * + * @param node The Node instance + * + * @return The node rack id. May be NULL. + */ +RD_EXPORT +const char *rd_kafka_Node_rack(const rd_kafka_Node_t *node); + +/**@}*/ + + + +/** + * @name Client group information + * @{ + * + * + */ + + +/** + * @brief Group member information + * + * For more information on \p member_metadata format, see + * https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI + * + */ +struct rd_kafka_group_member_info { + char *member_id; /**< Member id (generated by broker) */ + char *client_id; /**< Client's \p client.id */ + char *client_host; /**< Client's hostname */ + void *member_metadata; /**< Member metadata (binary), + * format depends on \p protocol_type. */ + int member_metadata_size; /**< Member metadata size in bytes */ + void *member_assignment; /**< Member assignment (binary), + * format depends on \p protocol_type. */ + int member_assignment_size; /**< Member assignment size in bytes */ +}; + +/** + * @enum rd_kafka_consumer_group_state_t + * + * @brief Consumer group state. + */ +typedef enum { + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0, + RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1, + RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2, + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3, + RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4, + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5, + RD_KAFKA_CONSUMER_GROUP_STATE__CNT +} rd_kafka_consumer_group_state_t; + +/** + * @brief Group information + */ +struct rd_kafka_group_info { + struct rd_kafka_metadata_broker broker; /**< Originating broker info */ + char *group; /**< Group name */ + rd_kafka_resp_err_t err; /**< Broker-originated error */ + char *state; /**< Group state */ + char *protocol_type; /**< Group protocol type */ + char *protocol; /**< Group protocol */ + struct rd_kafka_group_member_info *members; /**< Group members */ + int member_cnt; /**< Group member count */ +}; + +/** + * @brief List of groups + * + * @sa rd_kafka_group_list_destroy() to release list memory. + */ +struct rd_kafka_group_list { + struct rd_kafka_group_info *groups; /**< Groups */ + int group_cnt; /**< Group count */ +}; + + +/** + * @brief List and describe client groups in cluster. + * + * \p group is an optional group name to describe, otherwise (\c NULL) all + * groups are returned. + * + * \p timeout_ms is the (approximate) maximum time to wait for response + * from brokers and must be a positive value. + * + * @returns \c RD_KAFKA_RESP_ERR__NO_ERROR on success and \p grplistp is + * updated to point to a newly allocated list of groups. + * \c RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded + * in time but at least one group is returned in \p grplistlp. + * \c RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the + * given timeframe but not all brokers have yet responded, or + * if the list of brokers in the cluster could not be obtained within + * the given timeframe. + * \c RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found. + * Other error codes may also be returned from the request layer. + * + * The \p grplistp remains untouched if any error code is returned, + * with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves + * as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete + * group list. + * + * @sa Use rd_kafka_group_list_destroy() to release list memory. + * + * @deprecated Use rd_kafka_ListConsumerGroups() and + * rd_kafka_DescribeConsumerGroups() instead. + */ +RD_EXPORT +rd_kafka_resp_err_t +rd_kafka_list_groups(rd_kafka_t *rk, + const char *group, + const struct rd_kafka_group_list **grplistp, + int timeout_ms); + +/** + * @brief Returns a name for a state code. + * + * @param state The state value. + * + * @return The group state name corresponding to the provided group state value. + */ +RD_EXPORT +const char * +rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state); + +/** + * @brief Returns a code for a state name. + * + * @param name The state name. + * + * @return The group state value corresponding to the provided group state name. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t +rd_kafka_consumer_group_state_code(const char *name); + +/** + * @brief Release list memory + */ +RD_EXPORT +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist); + + +/**@}*/ + + + +/** + * @name Miscellaneous APIs + * @{ + * + */ + + +/** + * @brief Adds one or more brokers to the kafka handle's list of initial + * bootstrap brokers. + * + * Additional brokers will be discovered automatically as soon as rdkafka + * connects to a broker by querying the broker metadata. + * + * If a broker name resolves to multiple addresses (and possibly + * address families) all will be used for connection attempts in + * round-robin fashion. + * + * \p brokerlist is a ,-separated list of brokers in the format: + * \c \,\,.. + * Where each broker is in either the host or URL based format: + * \c \[:\] + * \c \://\[:port] + * \c \ is either \c PLAINTEXT, \c SSL, \c SASL, \c SASL_PLAINTEXT + * The two formats can be mixed but ultimately the value of the + * `security.protocol` config property decides what brokers are allowed. + * + * Example: + * brokerlist = "broker1:10000,broker2" + * brokerlist = "SSL://broker3:9000,ssl://broker2" + * + * @returns the number of brokers successfully added. + * + * @remark Brokers may also be defined with the \c metadata.broker.list or + * \c bootstrap.servers configuration property (preferred method). + * + * @deprecated Set bootstrap servers with the \c bootstrap.servers + * configuration property. + */ +RD_EXPORT +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); + + + +/** + * @brief Set logger function. + * + * The default is to print to stderr, but a syslog logger is also available, + * see rd_kafka_log_(print|syslog) for the builtin alternatives. + * Alternatively the application may provide its own logger callback. + * Or pass 'func' as NULL to disable logging. + * + * @deprecated Use rd_kafka_conf_set_log_cb() + * + * @remark \p rk may be passed as NULL in the callback. + */ +RD_EXPORT RD_DEPRECATED void +rd_kafka_set_logger(rd_kafka_t *rk, + void (*func)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); + + +/** + * @brief Specifies the maximum logging level emitted by + * internal kafka logging and debugging. + * + * @deprecated Set the \c "log_level" configuration property instead. + * + * @remark If the \p \"debug\" configuration property is set the log level is + * automatically adjusted to \c LOG_DEBUG (7). + */ +RD_EXPORT +void rd_kafka_set_log_level(rd_kafka_t *rk, int level); + + +/** + * @brief Builtin (default) log sink: print to stderr + */ +RD_EXPORT +void rd_kafka_log_print(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); + + +/** + * @brief Builtin log sink: print to syslog. + * @remark This logger is only available if librdkafka was built + * with syslog support. + */ +RD_EXPORT +void rd_kafka_log_syslog(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); + + +/** + * @brief Returns the current out queue length. + * + * The out queue length is the sum of: + * - number of messages waiting to be sent to, or acknowledged by, + * the broker. + * - number of delivery reports (e.g., dr_msg_cb) waiting to be served + * by rd_kafka_poll() or rd_kafka_flush(). + * - number of callbacks (e.g., error_cb, stats_cb, etc) waiting to be + * served by rd_kafka_poll(), rd_kafka_consumer_poll() or rd_kafka_flush(). + * - number of events waiting to be served by background_event_cb() in + * the background queue (see rd_kafka_conf_set_background_event_cb). + * + * An application should wait for the return value of this function to reach + * zero before terminating to make sure outstanding messages, + * requests (such as offset commits), callbacks and events are fully processed. + * See rd_kafka_flush(). + * + * @returns number of messages and events waiting in queues. + * + * @sa rd_kafka_flush() + */ +RD_EXPORT +int rd_kafka_outq_len(rd_kafka_t *rk); + + + +/** + * @brief Dumps rdkafka's internal state for handle \p rk to stream \p fp + * + * This is only useful for debugging rdkafka, showing state and statistics + * for brokers, topics, partitions, etc. + */ +RD_EXPORT +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk); + + + +/** + * @brief Retrieve the current number of threads in use by librdkafka. + * + * Used by regression tests. + */ +RD_EXPORT +int rd_kafka_thread_cnt(void); + + +/** + * @enum rd_kafka_thread_type_t + * + * @brief librdkafka internal thread type. + * + * @sa rd_kafka_interceptor_add_on_thread_start() + */ +typedef enum rd_kafka_thread_type_t { + RD_KAFKA_THREAD_MAIN, /**< librdkafka's internal main thread */ + RD_KAFKA_THREAD_BACKGROUND, /**< Background thread (if enabled) */ + RD_KAFKA_THREAD_BROKER /**< Per-broker thread */ +} rd_kafka_thread_type_t; + + +/** + * @brief Wait for all rd_kafka_t objects to be destroyed. + * + * Returns 0 if all kafka objects are now destroyed, or -1 if the + * timeout was reached. + * + * @remark This function is deprecated. + */ +RD_EXPORT +int rd_kafka_wait_destroyed(int timeout_ms); + + +/** + * @brief Run librdkafka's built-in unit-tests. + * + * @returns the number of failures, or 0 if all tests passed. + */ +RD_EXPORT +int rd_kafka_unittest(void); + + +/**@}*/ + + + +/** + * @name Experimental APIs + * @{ + */ + +/** + * @brief Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's + * queue (rd_kafka_consumer_poll()). + * + * @warning It is not permitted to call rd_kafka_poll() after directing the + * main queue with rd_kafka_poll_set_consumer(). + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk); + + +/**@}*/ + +/** + * @name Event interface + * + * @brief The event API provides an alternative pollable non-callback interface + * to librdkafka's message and event queues. + * + * @{ + */ + + +/** + * @brief Event types + */ +typedef int rd_kafka_event_type_t; +#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ +#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ +#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ +#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ +#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ +#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ +#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ +#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ +#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ +#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ +#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT \ + 102 /**< CreatePartitions_result_t */ +#define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT \ + 104 /**< DescribeConfigs_result_t */ +#define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */ +#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ +/** DeleteConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107 +/** SASL/OAUTHBEARER token needs to be refreshed */ +#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 +#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */ +#define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */ +#define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */ +#define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */ +/** ListConsumerGroupsResult_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT 0x2000 +/** DescribeConsumerGroups_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT 0x4000 +/** ListConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT 0x8000 +/** AlterConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT 0x10000 +/** IncrementalAlterConfigs_result_t */ +#define RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT 0x20000 +/** DescribeUserScramCredentials_result_t */ +#define RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT 0x40000 +/** AlterUserScramCredentials_result_t */ +#define RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT 0x80000 +/** DescribeTopics_result_t */ +#define RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT 0x100000 +/** DescribeCluster_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT 0x200000 +/** ListOffsets_result_t */ +#define RD_KAFKA_EVENT_LISTOFFSETS_RESULT 0x400000 + +/** + * @returns the event type for the given event. + * + * @remark As a convenience it is okay to pass \p rkev as NULL in which case + * RD_KAFKA_EVENT_NONE is returned. + */ +RD_EXPORT +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev); + +/** + * @returns the event type's name for the given event. + * + * @remark As a convenience it is okay to pass \p rkev as NULL in which case + * the name for RD_KAFKA_EVENT_NONE is returned. + */ +RD_EXPORT +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev); + + +/** + * @brief Destroy an event. + * + * @remark Any references to this event, such as extracted messages, + * will not be usable after this call. + * + * @remark As a convenience it is okay to pass \p rkev as NULL in which case + * no action is performed. + */ +RD_EXPORT +void rd_kafka_event_destroy(rd_kafka_event_t *rkev); + + +/** + * @returns the next message from an event. + * + * Call repeatedly until it returns NULL. + * + * Event types: + * - RD_KAFKA_EVENT_FETCH (1 message) + * - RD_KAFKA_EVENT_DR (>=1 message(s)) + * + * @remark The returned message(s) MUST NOT be + * freed with rd_kafka_message_destroy(). + * + * @remark on_consume() interceptor may be called + * from this function prior to passing message to application. + */ +RD_EXPORT +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev); + + +/** + * @brief Extacts \p size message(s) from the event into the + * pre-allocated array \p rkmessages. + * + * Event types: + * - RD_KAFKA_EVENT_FETCH (1 message) + * - RD_KAFKA_EVENT_DR (>=1 message(s)) + * + * @returns the number of messages extracted. + * + * @remark on_consume() interceptor may be called + * from this function prior to passing message to application. + */ +RD_EXPORT +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, + const rd_kafka_message_t **rkmessages, + size_t size); + + +/** + * @returns the number of remaining messages in the event. + * + * Event types: + * - RD_KAFKA_EVENT_FETCH (1 message) + * - RD_KAFKA_EVENT_DR (>=1 message(s)) + */ +RD_EXPORT +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev); + + +/** + * @returns the associated configuration string for the event, or NULL + * if the configuration property is not set or if + * not applicable for the given event type. + * + * The returned memory is read-only and its lifetime is the same as the + * event object. + * + * Event types: + * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config + */ +RD_EXPORT +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev); + + +/** + * @returns the error code for the event. + * + * Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error. + * + * Event types: + * - all + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev); + + +/** + * @returns the error string (if any). + * An application should check that rd_kafka_event_error() returns + * non-zero before calling this function. + * + * Event types: + * - all + */ +RD_EXPORT +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev); + + +/** + * @returns 1 if the error is a fatal error, else 0. + * + * Event types: + * - RD_KAFKA_EVENT_ERROR + * + * @sa rd_kafka_fatal_error() + */ +RD_EXPORT +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev); + + +/** + * @returns the event opaque (if any) as passed to rd_kafka_commit() (et.al) or + * rd_kafka_AdminOptions_set_opaque(), depending on event type. + * + * Event types: + * - RD_KAFKA_EVENT_OFFSET_COMMIT + * - RD_KAFKA_EVENT_CREATETOPICS_RESULT + * - RD_KAFKA_EVENT_DELETETOPICS_RESULT + * - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT + * - RD_KAFKA_EVENT_CREATEACLS_RESULT + * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + * - RD_KAFKA_EVENT_DELETEACLS_RESULT + * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT + * - RD_KAFKA_EVENT_INCREMENTAL_ALTERCONFIGS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT + * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT + * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_DELETERECORDS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + * - RD_KAFKA_EVENT_LISTOFFSETS_RESULT + */ +RD_EXPORT +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev); + + +/** + * @brief Extract log message from the event. + * + * Event types: + * - RD_KAFKA_EVENT_LOG + * + * @returns 0 on success or -1 if unsupported event type. + */ +RD_EXPORT +int rd_kafka_event_log(rd_kafka_event_t *rkev, + const char **fac, + const char **str, + int *level); + + +/** + * @brief Extract log debug context from event. + * + * Event types: + * - RD_KAFKA_EVENT_LOG + * + * @param rkev the event to extract data from. + * @param dst destination string for comma separated list. + * @param dstsize size of provided dst buffer. + * @returns 0 on success or -1 if unsupported event type. + */ +RD_EXPORT +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, + char *dst, + size_t dstsize); + + +/** + * @brief Extract stats from the event. + * + * Event types: + * - RD_KAFKA_EVENT_STATS + * + * @returns stats json string. + * + * @remark the returned string will be freed automatically along with the event + * object + * + */ +RD_EXPORT +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev); + + +/** + * @returns the topic partition list from the event. + * + * @remark The list MUST NOT be freed with + * rd_kafka_topic_partition_list_destroy() + * + * Event types: + * - RD_KAFKA_EVENT_REBALANCE + * - RD_KAFKA_EVENT_OFFSET_COMMIT + */ +RD_EXPORT rd_kafka_topic_partition_list_t * +rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev); + + +/** + * @returns a newly allocated topic_partition container, if applicable for the + * event type, else NULL. + * + * @remark The returned pointer MUST be freed with + * rd_kafka_topic_partition_destroy(). + * + * Event types: + * RD_KAFKA_EVENT_ERROR (for partition level errors) + */ +RD_EXPORT rd_kafka_topic_partition_t * +rd_kafka_event_topic_partition(rd_kafka_event_t *rkev); + + +/*! CreateTopics result type */ +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t; +/*! DeleteTopics result type */ +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t; +/*! CreateAcls result type */ +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t; +/*! DescribeAcls result type */ +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t; +/*! DeleteAcls result type */ +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t; +/*! CreatePartitions result type */ +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t; +/*! AlterConfigs result type */ +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t; +/*! IncrementalAlterConfigs result type */ +typedef rd_kafka_event_t rd_kafka_IncrementalAlterConfigs_result_t; +/*! CreateTopics result type */ +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t; +/*! DeleteRecords result type */ +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t; +/*! ListConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t; +/*! DescribeConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t; +/*! DeleteGroups result type */ +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t; +/*! DeleteConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; +/*! AlterConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t; +/*! ListConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t; +/*! DescribeTopics result type */ +typedef rd_kafka_event_t rd_kafka_DescribeTopics_result_t; +/*! DescribeCluster result type */ +typedef rd_kafka_event_t rd_kafka_DescribeCluster_result_t; +/*! DescribeUserScramCredentials result type */ +typedef rd_kafka_event_t rd_kafka_DescribeUserScramCredentials_result_t; +/*! AlterUserScramCredentials result type */ +typedef rd_kafka_event_t rd_kafka_AlterUserScramCredentials_result_t; +/*! ListOffsets result type */ +typedef rd_kafka_event_t rd_kafka_ListOffsets_result_t; + +/** + * @brief Get CreateTopics result. + * + * @returns the result of a CreateTopics request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_CREATETOPICS_RESULT + */ +RD_EXPORT const rd_kafka_CreateTopics_result_t * +rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DeleteTopics result. + * + * @returns the result of a DeleteTopics request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETETOPICS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteTopics_result_t * +rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev); + +/** + * @brief Get CreatePartitions result. + * + * @returns the result of a CreatePartitions request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT + */ +RD_EXPORT const rd_kafka_CreatePartitions_result_t * +rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev); + +/** + * @brief Get AlterConfigs result. + * + * @returns the result of a AlterConfigs request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT + */ +RD_EXPORT const rd_kafka_AlterConfigs_result_t * +rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev); + +/** + * @brief Get IncrementalAlterConfigs result. + * + * @returns the result of a IncrementalAlterConfigs request, or NULL if event is + * of different type. + * + * Event types: + * RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT + */ +RD_EXPORT const rd_kafka_IncrementalAlterConfigs_result_t * +rd_kafka_event_IncrementalAlterConfigs_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeConfigs result. + * + * @returns the result of a DescribeConfigs request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeConfigs_result_t * +rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DeleteRecords request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETERECORDS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteRecords_result_t * +rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ListConsumerGroups result. + * + * @returns the result of a ListConsumerGroups request, or NULL if event is of + * different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_ListConsumerGroups_result_t * +rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeConsumerGroups result. + * + * @returns the result of a DescribeConsumerGroups request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeConsumerGroups_result_t * +rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeTopics result. + * + * @returns the result of a DescribeTopics request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeTopics_result_t * +rd_kafka_event_DescribeTopics_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeCluster result. + * + * @returns the result of a DescribeCluster request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + */ +RD_EXPORT const rd_kafka_DescribeCluster_result_t * +rd_kafka_event_DescribeCluster_result(rd_kafka_event_t *rkev); +/** + * @brief Get DeleteGroups result. + * + * @returns the result of a DeleteGroups request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETEGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteGroups_result_t * +rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DeleteConsumerGroupOffsets result. + * + * @returns the result of a DeleteConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t * +rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a CreateAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_CREATEACLS_RESULT + */ +RD_EXPORT const rd_kafka_CreateAcls_result_t * +rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DescribeAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeAcls_result_t * +rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DeleteAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETEACLS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteAcls_result_t * +rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ListConsumerGroupOffsets result. + * + * @returns the result of a ListConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_ListConsumerGroupOffsets_result_t * +rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @brief Get AlterConsumerGroupOffsets result. + * + * @returns the result of a AlterConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_AlterConsumerGroupOffsets_result_t * +rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ListOffsets result. + * + * @returns the result of a ListOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_ListOffsets_result_t * +rd_kafka_event_ListOffsets_result(rd_kafka_event_t *rkev); + + +/** + * @brief Get DescribeUserScramCredentials result. + * + * @returns the result of a DescribeUserScramCredentials request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeUserScramCredentials_result_t * +rd_kafka_event_DescribeUserScramCredentials_result(rd_kafka_event_t *rkev); + +/** + * @brief Get AlterUserScramCredentials result. + * + * @returns the result of a AlterUserScramCredentials request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT + */ +RD_EXPORT const rd_kafka_AlterUserScramCredentials_result_t * +rd_kafka_event_AlterUserScramCredentials_result(rd_kafka_event_t *rkev); + +/** + * @brief Poll a queue for an event for max \p timeout_ms. + * + * @returns an event, or NULL. + * + * @remark Use rd_kafka_event_destroy() to free the event. + * + * @sa rd_kafka_conf_set_background_event_cb() + */ +RD_EXPORT +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms); + +/** + * @brief Poll a queue for events served through callbacks for max \p + * timeout_ms. + * + * @returns the number of events served. + * + * @remark This API must only be used for queues with callbacks registered + * for all expected event types. E.g., not a message queue. + * + * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering + * event callbacks from a librdkafka-managed background thread. + * + * @sa rd_kafka_conf_set_background_event_cb() + */ +RD_EXPORT +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); + + +/**@}*/ + + +/** + * @name Plugin interface + * + * @brief A plugin interface that allows external runtime-loaded libraries + * to integrate with a client instance without modifications to + * the application code. + * + * Plugins are loaded when referenced through the `plugin.library.paths` + * configuration property and operates on the \c rd_kafka_conf_t + * object prior \c rd_kafka_t instance creation. + * + * @warning Plugins require the application to link librdkafka dynamically + * and not statically. Failure to do so will lead to missing symbols + * or finding symbols in another librdkafka library than the + * application was linked with. + * @{ + */ + + +/** + * @brief Plugin's configuration initializer method called each time the + * library is referenced from configuration (even if previously loaded by + * another client instance). + * + * @remark This method MUST be implemented by plugins and have the symbol name + * \c conf_init + * + * @param conf Configuration set up to this point. + * @param plug_opaquep Plugin can set this pointer to a per-configuration + * opaque pointer. + * @param errstr String buffer of size \p errstr_size where plugin must write + * a human readable error string in the case the initializer + * fails (returns non-zero). + * @param errstr_size Maximum space (including \0) in \p errstr. + * + * @remark A plugin may add an on_conf_destroy() interceptor to clean up + * plugin-specific resources created in the plugin's conf_init() method. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. + */ +typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)( + rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size); + +/**@}*/ + + + +/** + * @name Interceptors + * + * @{ + * + * @brief A callback interface that allows message interception for both + * producer and consumer data pipelines. + * + * Except for the on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() + * interceptors, interceptors are added to the + * newly created rd_kafka_t client instance. These interceptors MUST only + * be added from on_new() and MUST NOT be added after rd_kafka_new() returns. + * + * The on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() interceptors + * are added to the configuration object which is later passed to + * rd_kafka_new() where on_new() is called to allow addition of + * other interceptors. + * + * Each interceptor reference consists of a display name (ic_name), + * a callback function, and an application-specified opaque value that is + * passed as-is to the callback. + * The ic_name must be unique for the interceptor implementation and is used + * to reject duplicate interceptor methods. + * + * Any number of interceptors can be added and they are called in the order + * they were added, unless otherwise noted. + * The list of registered interceptor methods are referred to as + * interceptor chains. + * + * @remark Contrary to the Java client the librdkafka interceptor interface + * does not support message key and value modification. + * Message mutability is discouraged in the Java client and the + * combination of serializers and headers cover most use-cases. + * + * @remark Interceptors are NOT copied to the new configuration on + * rd_kafka_conf_dup() since it would be hard for interceptors to + * track usage of the interceptor's opaque value. + * An interceptor should rely on the plugin, which will be copied + * in rd_kafka_conf_conf_dup(), to set up the initial interceptors. + * An interceptor should implement the on_conf_dup() method + * to manually set up its internal configuration on the newly created + * configuration object that is being copied-to based on the + * interceptor-specific configuration properties. + * conf_dup() should thus be treated the same as conf_init(). + * + * @remark Interceptors are keyed by the interceptor type (on_..()), the + * interceptor name (ic_name) and the interceptor method function. + * Duplicates are not allowed and the .._add_on_..() method will + * return RD_KAFKA_RESP_ERR__CONFLICT if attempting to add a duplicate + * method. + * The only exception is on_conf_destroy() which may be added multiple + * times by the same interceptor to allow proper cleanup of + * interceptor configuration state. + */ + + +/** + * @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order + * the interceptors were added. + * + * @param conf Configuration object. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * @param name The configuration property to set. + * @param val The configuration value to set, or NULL for reverting to default + * in which case the previous value should be freed. + * @param errstr A human readable error string in case the interceptor fails. + * @param errstr_size Maximum space (including \0) in \p errstr. + * + * @returns RD_KAFKA_CONF_OK if the property was known and successfully + * handled by the interceptor, RD_KAFKA_CONF_INVALID if the + * property was handled by the interceptor but the value was invalid, + * or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle + * this property, in which case the property is passed on on the + * interceptor in the chain, finally ending up at the built-in + * configuration handler. + */ +typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)( + rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size, + void *ic_opaque); + + +/** + * @brief on_conf_dup() is called from rd_kafka_conf_dup() in the + * order the interceptors were added and is used to let + * an interceptor re-register its conf interecptors with a new + * opaque value. + * The on_conf_dup() method is called prior to the configuration from + * \p old_conf being copied to \p new_conf. + * + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * @param new_conf New configuration object. + * @param old_conf Old configuration object to copy properties from. + * @param filter_cnt Number of property names to filter in \p filter. + * @param filter Property names to filter out (ignore) when setting up + * \p new_conf. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code + * on failure (which is logged but otherwise ignored). + * + * @remark No on_conf_* interceptors are copied to the new configuration + * object on rd_kafka_conf_dup(). + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t)( + rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque); + + +/** + * @brief on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the + * order the interceptors were added. + * + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t)( + void *ic_opaque); + + +/** + * @brief on_new() is called from rd_kafka_new() prior toreturning + * the newly created client instance to the application. + * + * @param rk The client instance. + * @param conf The client instance's final configuration. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * @param errstr A human readable error string in case the interceptor fails. + * @param errstr_size Maximum space (including \0) in \p errstr. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + * + * @warning The \p rk client instance will not be fully set up when this + * interceptor is called and the interceptor MUST NOT call any + * other rk-specific APIs than rd_kafka_interceptor_add..(). + * + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t)( + rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size); + + +/** + * @brief on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new() + * if rd_kafka_new() fails during initialization). + * + * @param rk The client instance. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + */ +typedef rd_kafka_resp_err_t( + rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque); + + + +/** + * @brief on_send() is called from rd_kafka_produce*() (et.al) prior to + * the partitioner being called. + * + * @param rk The client instance. + * @param rkmessage The message being produced. Immutable. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @remark This interceptor is only used by producer instances. + * + * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified + * by the interceptor. + * + * @remark If the partitioner fails or an unknown partition was specified, + * the on_acknowledgement() interceptor chain will be called from + * within the rd_kafka_produce*() call to maintain send-acknowledgement + * symmetry. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); + +/** + * @brief on_acknowledgement() is called to inform interceptors that a message + * was succesfully delivered or permanently failed delivery. + * The interceptor chain is called from internal librdkafka background + * threads, or rd_kafka_produce*() if the partitioner failed. + * + * @param rk The client instance. + * @param rkmessage The message being produced. Immutable. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @remark This interceptor is only used by producer instances. + * + * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified + * by the interceptor. + * + * @warning The on_acknowledgement() method may be called from internal + * librdkafka threads. An on_acknowledgement() interceptor MUST NOT + * call any librdkafka API's associated with the \p rk, or perform + * any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); + + +/** + * @brief on_consume() is called just prior to passing the message to the + * application in rd_kafka_consumer_poll(), rd_kafka_consume*(), + * the event interface, etc. + * + * @param rk The client instance. + * @param rkmessage The message being consumed. Immutable. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @remark This interceptor is only used by consumer instances. + * + * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified + * by the interceptor. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); + +/** + * @brief on_commit() is called on completed or failed offset commit. + * It is called from internal librdkafka threads. + * + * @param rk The client instance. + * @param offsets List of topic+partition+offset+error that were committed. + * The error message of each partition should be checked for + * error. + * @param err The commit error, if any. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @remark This interceptor is only used by consumer instances. + * + * @warning The on_commit() interceptor is called from internal + * librdkafka threads. An on_commit() interceptor MUST NOT + * call any librdkafka API's associated with the \p rk, or perform + * any blocking or prolonged work. + * + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque); + + +/** + * @brief on_request_sent() is called when a request has been fully written + * to a broker TCP connections socket. + * + * @param rk The client instance. + * @param sockfd Socket file descriptor. + * @param brokername Broker request is being sent to. + * @param brokerid Broker request is being sent to. + * @param ApiKey Kafka protocol request type. + * @param ApiVersion Kafka protocol request type version. + * @param CorrId Kafka protocol request correlation id. + * @param size Size of request. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @warning The on_request_sent() interceptor is called from internal + * librdkafka broker threads. An on_request_sent() interceptor MUST NOT + * call any librdkafka API's associated with the \p rk, or perform + * any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque); + + +/** + * @brief on_response_received() is called when a protocol response has been + * fully received from a broker TCP connection socket but before the + * response payload is parsed. + * + * @param rk The client instance. + * @param sockfd Socket file descriptor (always -1). + * @param brokername Broker response was received from, possibly empty string + * on error. + * @param brokerid Broker response was received from. + * @param ApiKey Kafka protocol request type or -1 on error. + * @param ApiVersion Kafka protocol request type version or -1 on error. + * @param CorrId Kafka protocol request correlation id, possibly -1 on error. + * @param size Size of response, possibly 0 on error. + * @param rtt Request round-trip-time in microseconds, possibly -1 on error. + * @param err Receive error. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @warning The on_response_received() interceptor is called from internal + * librdkafka broker threads. An on_response_received() interceptor + * MUST NOT call any librdkafka API's associated with the \p rk, or + * perform any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque); + + +/** + * @brief on_thread_start() is called from a newly created librdkafka-managed + * thread. + + * @param rk The client instance. + * @param thread_type Thread type. + * @param thread_name Human-readable thread name, may not be unique. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @warning The on_thread_start() interceptor is called from internal + * librdkafka threads. An on_thread_start() interceptor MUST NOT + * call any librdkafka API's associated with the \p rk, or perform + * any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); + + +/** + * @brief on_thread_exit() is called just prior to a librdkafka-managed + * thread exiting from the exiting thread itself. + * + * @param rk The client instance. + * @param thread_type Thread type.n + * @param thread_name Human-readable thread name, may not be unique. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @remark Depending on the thread type, librdkafka may execute additional + * code on the thread after on_thread_exit() returns. + * + * @warning The on_thread_exit() interceptor is called from internal + * librdkafka threads. An on_thread_exit() interceptor MUST NOT + * call any librdkafka API's associated with the \p rk, or perform + * any blocking or prolonged work. + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); + + +/** + * @brief on_broker_state_change() is called just after a broker + * has been created or its state has been changed. + * + * @param rk The client instance. + * @param broker_id The broker id (-1 is used for bootstrap brokers). + * @param secproto The security protocol. + * @param name The original name of the broker. + * @param port The port of the broker. + * @param state Broker state name. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t)( + rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state, + void *ic_opaque); + + +/** + * @brief Append an on_conf_set() interceptor. + * + * @param conf Configuration object. + * @param ic_name Interceptor name, used in logging. + * @param on_conf_set Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, + void *ic_opaque); + + +/** + * @brief Append an on_conf_dup() interceptor. + * + * @param conf Configuration object. + * @param ic_name Interceptor name, used in logging. + * @param on_conf_dup Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, + void *ic_opaque); + +/** + * @brief Append an on_conf_destroy() interceptor. + * + * @param conf Configuration object. + * @param ic_name Interceptor name, used in logging. + * @param on_conf_destroy Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR + * + * @remark Multiple on_conf_destroy() interceptors are allowed to be added + * to the same configuration object. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, + void *ic_opaque); + + +/** + * @brief Append an on_new() interceptor. + * + * @param conf Configuration object. + * @param ic_name Interceptor name, used in logging. + * @param on_new Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @remark Since the on_new() interceptor is added to the configuration object + * it may be copied by rd_kafka_conf_dup(). + * An interceptor implementation must thus be able to handle + * the same interceptor,ic_opaque tuple to be used by multiple + * client instances. + * + * @remark An interceptor plugin should check the return value to make sure it + * has not already been added. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_new_t *on_new, + void *ic_opaque); + + + +/** + * @brief Append an on_destroy() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_destroy Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_destroy_t *on_destroy, + void *ic_opaque); + + +/** + * @brief Append an on_send() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_send Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing intercepted with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_send_t *on_send, + void *ic_opaque); + +/** + * @brief Append an on_acknowledgement() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_acknowledgement Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, + void *ic_opaque); + + +/** + * @brief Append an on_consume() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_consume Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_consume_t *on_consume, + void *ic_opaque); + + +/** + * @brief Append an on_commit() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_commit() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_commit_t *on_commit, + void *ic_opaque); + + +/** + * @brief Append an on_request_sent() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_request_sent() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, + void *ic_opaque); + + +/** + * @brief Append an on_response_received() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_response_received() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_response_received_t *on_response_received, + void *ic_opaque); + + +/** + * @brief Append an on_thread_start() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_thread_start() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, + void *ic_opaque); + + +/** + * @brief Append an on_thread_exit() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_thread_exit() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, + void *ic_opaque); + + +/** + * @brief Append an on_broker_state_change() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_broker_state_change() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, + void *ic_opaque); + + + +/**@}*/ + + + +/** + * @name Auxiliary types + * + * @{ + */ + + + +/** + * @brief Topic result provides per-topic operation result information. + * + */ + +/** + * @returns the error code for the given topic result. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres); + +/** + * @returns the human readable error string for the given topic result, + * or NULL if there was no error. + * + * @remark lifetime of the returned string is the same as the \p topicres. + */ +RD_EXPORT const char * +rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres); + +/** + * @returns the name of the topic for the given topic result. + * @remark lifetime of the returned string is the same as the \p topicres. + * + */ +RD_EXPORT const char * +rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres); + +/** + * @brief Group result provides per-group operation result information. + * + */ + +/** + * @returns the error for the given group result, or NULL on success. + * @remark lifetime of the returned error is the same as the \p groupres. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres); + +/** + * @returns the name of the group for the given group result. + * @remark lifetime of the returned string is the same as the \p groupres. + * + */ +RD_EXPORT const char * +rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres); + +/** + * @returns the partitions/offsets for the given group result, if applicable + * to the request type, else NULL. + * @remark lifetime of the returned list is the same as the \p groupres. + */ +RD_EXPORT const rd_kafka_topic_partition_list_t * +rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres); + + +/**@}*/ + + +/** + * @name Admin API + * @{ + * + * @brief The Admin API enables applications to perform administrative + * Apache Kafka tasks, such as creating and deleting topics, + * altering and reading broker configuration, etc. + * + * The Admin API is asynchronous and makes use of librdkafka's standard + * \c rd_kafka_queue_t queues to propagate the result of an admin operation + * back to the application. + * The supplied queue may be any queue, such as a temporary single-call queue, + * a shared queue used for multiple requests, or even the main queue or + * consumer queues. + * + * Use \c rd_kafka_queue_poll() to collect the result of an admin operation + * from the queue of your choice, then extract the admin API-specific result + * type by using the corresponding \c rd_kafka_event_CreateTopics_result, + * \c rd_kafka_event_DescribeConfigs_result, etc, methods. + * Use the getter methods on the \c .._result_t type to extract response + * information and finally destroy the result and event by calling + * \c rd_kafka_event_destroy(). + * + * Use rd_kafka_event_error() and rd_kafka_event_error_string() to acquire + * the request-level error/success for an Admin API request. + * Even if the returned value is \c RD_KAFKA_RESP_ERR_NO_ERROR there + * may be individual objects (topics, resources, etc) that have failed. + * Extract per-object error information with the corresponding + * \c rd_kafka_..._result_topics|resources|..() to check per-object errors. + * + * Locally triggered errors: + * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not + * become available in the time allowed by AdminOption_set_request_timeout. + */ + + +/** + * @enum rd_kafka_admin_op_t + * + * @brief Admin operation enum name for use with rd_kafka_AdminOptions_new() + * + * @sa rd_kafka_AdminOptions_new() + */ +typedef enum rd_kafka_admin_op_t { + RD_KAFKA_ADMIN_OP_ANY = 0, /**< Default value */ + RD_KAFKA_ADMIN_OP_CREATETOPICS, /**< CreateTopics */ + RD_KAFKA_ADMIN_OP_DELETETOPICS, /**< DeleteTopics */ + RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, /**< CreatePartitions */ + RD_KAFKA_ADMIN_OP_ALTERCONFIGS, /**< AlterConfigs */ + RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, /**< DescribeConfigs */ + RD_KAFKA_ADMIN_OP_DELETERECORDS, /**< DeleteRecords */ + RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ + /** DeleteConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, + RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ + RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ + RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, /**< ListConsumerGroups */ + RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, /**< DescribeConsumerGroups */ + /** ListConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, + /** AlterConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, + /** IncrementalAlterConfigs */ + RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS, + /** DescribeUserScramCredentials */ + RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS, + /** AlterUserScramCredentials */ + RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS, + RD_KAFKA_ADMIN_OP_DESCRIBETOPICS, /**< DescribeTopics */ + RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER, /**< DescribeCluster */ + RD_KAFKA_ADMIN_OP_LISTOFFSETS, /**< ListOffsets */ + RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ +} rd_kafka_admin_op_t; + +/** + * @brief AdminOptions provides a generic mechanism for setting optional + * parameters for the Admin API requests. + * + * @remark Since AdminOptions is decoupled from the actual request type + * there is no enforcement to prevent setting unrelated properties, + * e.g. setting validate_only on a DescribeConfigs request is allowed + * but is silently ignored by DescribeConfigs. + * Future versions may introduce such enforcement. + */ + + +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; + +/** + * @enum rd_kafka_IsolationLevel_t + * + * @brief IsolationLevel enum name for use with rd_kafka_AdminOptions_new() + * + * @sa rd_kafka_AdminOptions_new() + */ +typedef enum rd_kafka_IsolationLevel_t { + RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED = 0, + RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED = 1 +} rd_kafka_IsolationLevel_t; + +/** + * @brief Create a new AdminOptions object. + * + * The options object is not modified by the Admin API request APIs, + * (e.g. CreateTopics) and may be reused for multiple calls. + * + * @param rk Client instance. + * @param for_api Specifies what Admin API this AdminOptions object will be used + * for, which will enforce what AdminOptions_set_..() calls may + * be used based on the API, causing unsupported set..() calls + * to fail. + * Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement + * allowing any option to be set, even if the option + * is not used in a future call to an Admin API method. + * + * @returns a new AdminOptions object (which must be freed with + * rd_kafka_AdminOptions_destroy()), or NULL if \p for_api was set to + * an unknown API op type. + */ +RD_EXPORT rd_kafka_AdminOptions_t * +rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api); + + +/** + * @brief Destroy a AdminOptions object. + */ +RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options); + + +/** + * @brief Sets the overall request timeout, including broker lookup, + * request transmission, operation time on broker, and response. + * + * @param options Admin options. + * @param timeout_ms Timeout in milliseconds. Defaults to `socket.timeout.ms`. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or + * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which + * case an error string will be written \p errstr. + * + * @remark This option is valid for all Admin API requests. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); + + +/** + * @brief Sets the broker's operation timeout, such as the timeout for + * CreateTopics to complete the creation of topics on the controller + * before returning a result to the application. + * + * CreateTopics: values <= 0 will return immediately after triggering topic + * creation, while > 0 will wait this long for topic creation to propagate + * in cluster. Default: 60 seconds. + * + * DeleteTopics: same semantics as CreateTopics. + * CreatePartitions: same semantics as CreateTopics. + * + * @param options Admin options. + * @param timeout_ms Timeout in milliseconds. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or + * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which + * case an error string will be written \p errstr. + * + * @remark This option is valid for CreateTopics, DeleteTopics, + * CreatePartitions, and DeleteRecords. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); + + +/** + * @brief Tell broker to only validate the request, without performing + * the requested operation (create topics, etc). + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an + * error code on failure in which case an error string will + * be written \p errstr. + * + * @remark This option is valid for CreateTopics, + * CreatePartitions, AlterConfigs. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, + int true_or_false, + char *errstr, + size_t errstr_size); + + +/** + * @brief Override what broker the Admin request will be sent to. + * + * By default, Admin requests are sent to the controller broker, with + * the following exceptions: + * - AlterConfigs with a BROKER resource are sent to the broker id set + * as the resource name. + * - IncrementalAlterConfigs with a BROKER resource are sent to the broker id + * set as the resource name. + * - DescribeConfigs with a BROKER resource are sent to the broker id set + * as the resource name. + * + * @param options Admin Options. + * @param broker_id The broker to send the request to. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an + * error code on failure in which case an error string will + * be written \p errstr. + * + * @remark This API should typically not be used, but serves as a workaround + * if new resource types are to the broker that the client + * does not know where to send. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, + int32_t broker_id, + char *errstr, + size_t errstr_size); + + +/** + * @brief Whether broker should return stable offsets + * (transaction-committed). + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroupOffsets. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets( + rd_kafka_AdminOptions_t *options, + int true_or_false); + +/** + * @brief Whether broker should return authorized operations for the given + * resource in the DescribeConsumerGroups, DescribeTopics, or + * DescribeCluster calls. + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for DescribeConsumerGroups, DescribeTopics, + * DescribeCluster. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_include_authorized_operations( + rd_kafka_AdminOptions_t *options, + int true_or_false); + +/** + * @brief Set consumer groups states to query for. + * + * @param options Admin options. + * @param consumer_group_states Array of consumer group states. + * @param consumer_group_states_cnt Size of the \p consumer_group_states array. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroups. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states( + rd_kafka_AdminOptions_t *options, + const rd_kafka_consumer_group_state_t *consumer_group_states, + size_t consumer_group_states_cnt); + +/** + * @brief Set Isolation Level to an allowed `rd_kafka_IsolationLevel_t` value. + */ +RD_EXPORT +rd_kafka_error_t * +rd_kafka_AdminOptions_set_isolation_level(rd_kafka_AdminOptions_t *options, + rd_kafka_IsolationLevel_t value); + +/** + * @brief Set application opaque value that can be extracted from the + * result event using rd_kafka_event_opaque() + */ +RD_EXPORT void +rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, + void *ev_opaque); + + + +/** + * @enum rd_kafka_AclOperation_t + * @brief Apache Kafka ACL operation types. Common type for multiple Admin API + * functions. + */ +typedef enum rd_kafka_AclOperation_t { + RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_ACL_OPERATION_ANY = + 1, /**< In a filter, matches any AclOperation */ + RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */ + RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */ + RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */ + RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */ + RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */ + RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */ + RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = + 9, /**< CLUSTER_ACTION operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = + 10, /**< DESCRIBE_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = + 11, /**< ALTER_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = + 12, /**< IDEMPOTENT_WRITE operation */ + RD_KAFKA_ACL_OPERATION__CNT +} rd_kafka_AclOperation_t; + +/**@}*/ + +/** + * @name Admin API - Topics + * @brief Topic related operations. + * @{ + * + */ + + +/*! Defines a new topic to be created. */ +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t; + +/** + * @brief Create a new NewTopic object. This object is later passed to + * rd_kafka_CreateTopics(). + * + * @param topic Topic name to create. + * @param num_partitions Number of partitions in topic, or -1 to use the + * broker's default partition count (>= 2.4.0). + * @param replication_factor Default replication factor for the topic's + * partitions, or -1 to use the broker's default + * replication factor (>= 2.4.0) or if + * set_replica_assignment() will be used. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * + * + * @returns a new allocated NewTopic object, or NULL if the input parameters + * are invalid. + * Use rd_kafka_NewTopic_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, + int num_partitions, + int replication_factor, + char *errstr, + size_t errstr_size); + +/** + * @brief Destroy and free a NewTopic object previously created with + * rd_kafka_NewTopic_new() + */ +RD_EXPORT void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic); + + +/** + * @brief Helper function to destroy all NewTopic objects in the \p new_topics + * array (of \p new_topic_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt); + + +/** + * @brief Set the replica (broker) assignment for \p partition to the + * replica set in \p broker_ids (of \p broker_id_cnt elements). + * + * @remark When this method is used, rd_kafka_NewTopic_new() must have + * been called with a \c replication_factor of -1. + * + * @remark An application must either set the replica assignment for + * all new partitions, or none. + * + * @remark If called, this function must be called consecutively for each + * partition, starting at 0. + * + * @remark Use rd_kafka_metadata() to retrieve the list of brokers + * in the cluster. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code + * if the arguments were invalid. + * + * @sa rd_kafka_AdminOptions_set_validate_only() + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, + int32_t partition, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); + +/** + * @brief Set (broker-side) topic configuration name/value pair. + * + * @remark The name and value are not validated by the client, the validation + * takes place on the broker. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code + * if the arguments were invalid. + * + * @sa rd_kafka_AdminOptions_set_validate_only() + * @sa http://kafka.apache.org/documentation.html#topicconfigs + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, + const char *name, + const char *value); + + +/** + * @brief Create topics in cluster as specified by the \p new_topics + * array of size \p new_topic_cnt elements. + * + * @param rk Client instance. + * @param new_topics Array of new topics to create. + * @param new_topic_cnt Number of elements in \p new_topics array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_validate_only() - default false + * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT + */ +RD_EXPORT void rd_kafka_CreateTopics(rd_kafka_t *rk, + rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + +/* + * CreateTopics result type and methods + */ + +/** + * @brief Get an array of topic results from a CreateTopics result. + * + * The returned \p topics life-time is the same as the \p result object. + * + * @param result Result to get topics from. + * @param cntp Updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics( + const rd_kafka_CreateTopics_result_t *result, + size_t *cntp); + + + +/* + * DeleteTopics - delete topics from cluster + * + */ + +/*! Represents a topic to be deleted. */ +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t; + +/** + * @brief Create a new DeleteTopic object. This object is later passed to + * rd_kafka_DeleteTopics(). + * + * @param topic Topic name to delete. + * + * @returns a new allocated DeleteTopic object. + * Use rd_kafka_DeleteTopic_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic); + +/** + * @brief Destroy and free a DeleteTopic object previously created with + * rd_kafka_DeleteTopic_new() + */ +RD_EXPORT void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic); + +/** + * @brief Helper function to destroy all DeleteTopic objects in + * the \p del_topics array (of \p del_topic_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt); + +/** + * @brief Delete topics from cluster as specified by the \p topics + * array of size \p topic_cnt elements. + * + * @param rk Client instance. + * @param del_topics Array of topics to delete. + * @param del_topic_cnt Number of elements in \p topics array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT + */ +RD_EXPORT +void rd_kafka_DeleteTopics(rd_kafka_t *rk, + rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * DeleteTopics result type and methods + */ + +/** + * @brief Get an array of topic results from a DeleteTopics result. + * + * The returned \p topics life-time is the same as the \p result object. + * + * @param result Result to get topic results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( + const rd_kafka_DeleteTopics_result_t *result, + size_t *cntp); + + +/**@}*/ + +/** + * @name Admin API - Partitions + * @brief Partition related operations. + * @{ + * + */ + +/*! Defines a new partition to be created. */ +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t; + +/** + * @brief Create a new NewPartitions. This object is later passed to + * rd_kafka_CreatePartitions() to increase the number of partitions + * to \p new_total_cnt for an existing topic. + * + * @param topic Topic name to create more partitions for. + * @param new_total_cnt Increase the topic's partition count to this value. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * + * @returns a new allocated NewPartitions object, or NULL if the + * input parameters are invalid. + * Use rd_kafka_NewPartitions_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_NewPartitions_t * +rd_kafka_NewPartitions_new(const char *topic, + size_t new_total_cnt, + char *errstr, + size_t errstr_size); + +/** + * @brief Destroy and free a NewPartitions object previously created with + * rd_kafka_NewPartitions_new() + */ +RD_EXPORT void +rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts); + +/** + * @brief Helper function to destroy all NewPartitions objects in the + * \p new_parts array (of \p new_parts_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt); + +/** + * @brief Set the replica (broker id) assignment for \p new_partition_idx to the + * replica set in \p broker_ids (of \p broker_id_cnt elements). + * + * @remark An application must either set the replica assignment for + * all new partitions, or none. + * + * @remark If called, this function must be called consecutively for each + * new partition being created, + * where \p new_partition_idx 0 is the first new partition, + * 1 is the second, and so on. + * + * @remark \p broker_id_cnt should match the topic's replication factor. + * + * @remark Use rd_kafka_metadata() to retrieve the list of brokers + * in the cluster. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code + * if the arguments were invalid. + * + * @sa rd_kafka_AdminOptions_set_validate_only() + */ +RD_EXPORT rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment( + rd_kafka_NewPartitions_t *new_parts, + int32_t new_partition_idx, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); + + +/** + * @brief Create additional partitions for the given topics, as specified + * by the \p new_parts array of size \p new_parts_cnt elements. + * + * @param rk Client instance. + * @param new_parts Array of topics for which new partitions are to be created. + * @param new_parts_cnt Number of elements in \p new_parts array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_validate_only() - default false + * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT + */ +RD_EXPORT void rd_kafka_CreatePartitions(rd_kafka_t *rk, + rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * CreatePartitions result type and methods + */ + +/** + * @brief Get an array of topic results from a CreatePartitions result. + * + * The returned \p topics life-time is the same as the \p result object. + * + * @param result Result o get topic results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_topic_result_t ** +rd_kafka_CreatePartitions_result_topics( + const rd_kafka_CreatePartitions_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - Configuration + * @brief Cluster, broker, topic configuration entries, sources, etc. + * @{ + * + */ + +/** + * @enum rd_kafka_ConfigSource_t + * + * @brief Apache Kafka config sources. + * + * @remark These entities relate to the cluster, not the local client. + * + * @sa rd_kafka_conf_set(), et.al. for local client configuration. + */ +typedef enum rd_kafka_ConfigSource_t { + /** Source unknown, e.g., in the ConfigEntry used for alter requests + * where source is not set */ + RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0, + /** Dynamic topic config that is configured for a specific topic */ + RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1, + /** Dynamic broker config that is configured for a specific broker */ + RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2, + /** Dynamic broker config that is configured as default for all + * brokers in the cluster */ + RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3, + /** Static broker config provided as broker properties at startup + * (e.g. from server.properties file) */ + RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4, + /** Built-in default configuration for configs that have a + * default value */ + RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5, + + /** Number of source types defined */ + RD_KAFKA_CONFIG_SOURCE__CNT, +} rd_kafka_ConfigSource_t; + + +/** + * @returns a string representation of the \p confsource. + */ +RD_EXPORT const char * +rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource); + + +/*! Apache Kafka configuration entry. */ +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t; + +/** + * @returns the configuration property name + */ +RD_EXPORT const char * +rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry); + +/** + * @returns the configuration value, may be NULL for sensitive or unset + * properties. + */ +RD_EXPORT const char * +rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry); + +/** + * @returns the config source. + */ +RD_EXPORT rd_kafka_ConfigSource_t +rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry); + +/** + * @returns 1 if the config property is read-only on the broker, else 0. + * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. + */ +RD_EXPORT int +rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry); + +/** + * @returns 1 if the config property is set to its default value on the broker, + * else 0. + * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. + */ +RD_EXPORT int +rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry); + +/** + * @returns 1 if the config property contains sensitive information (such as + * security configuration), else 0. + * @remark An application should take care not to include the value of + * sensitive configuration entries in its output. + * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. + */ +RD_EXPORT int +rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry); + +/** + * @returns 1 if this entry is a synonym, else 0. + */ +RD_EXPORT int +rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry); + + +/** + * @returns the synonym config entry array. + * + * @param entry Entry to get synonyms for. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned entry is the same as \p conf . + * @remark Shall only be used on a DescribeConfigs result, + * otherwise returns NULL. + */ +RD_EXPORT const rd_kafka_ConfigEntry_t ** +rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, + size_t *cntp); + + + +/** + * @enum rd_kafka_ResourceType_t + * @brief Apache Kafka resource types + */ +typedef enum rd_kafka_ResourceType_t { + RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ + RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ + RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ + RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ + RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ +} rd_kafka_ResourceType_t; + +/** + * @enum rd_kafka_ResourcePatternType_t + * @brief Apache Kafka pattern types + */ +typedef enum rd_kafka_ResourcePatternType_t { + /** Unknown */ + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0, + /** Any (used for lookups) */ + RD_KAFKA_RESOURCE_PATTERN_ANY = 1, + /** Match: will perform pattern matching */ + RD_KAFKA_RESOURCE_PATTERN_MATCH = 2, + /** Literal: A literal resource name */ + RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3, + /** Prefixed: A prefixed resource name */ + RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4, + RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT, +} rd_kafka_ResourcePatternType_t; + +/** + * @enum rd_kafka_AlterConfigOpType_t + * @brief Incremental alter configs operations. + */ +typedef enum rd_kafka_AlterConfigOpType_t { + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3, + RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT, +} rd_kafka_AlterConfigOpType_t; + +/** + * @returns a string representation of the \p resource_pattern_type + */ +RD_EXPORT const char *rd_kafka_ResourcePatternType_name( + rd_kafka_ResourcePatternType_t resource_pattern_type); + +/** + * @returns a string representation of the \p restype + */ +RD_EXPORT const char * +rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype); + +/*! Apache Kafka configuration resource. */ +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; + + +/** + * @brief Create new ConfigResource object. + * + * @param restype The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC) + * @param resname The resource name (e.g., the topic name) + * + * @returns a newly allocated object + */ +RD_EXPORT rd_kafka_ConfigResource_t * +rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, + const char *resname); + +/** + * @brief Destroy and free a ConfigResource object previously created with + * rd_kafka_ConfigResource_new() + */ +RD_EXPORT void +rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config); + + +/** + * @brief Helper function to destroy all ConfigResource objects in + * the \p configs array (of \p config_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, + size_t config_cnt); + + +/** + * @brief Set configuration name value pair. + * + * @param config ConfigResource to set config property on. + * @param name Configuration name, depends on resource type. + * @param value Configuration value, depends on resource type and \p name. + * Set to \c NULL to revert configuration value to default. + * + * This will overwrite the current value. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource, + * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, + const char *name, + const char *value); + + +/** + * @brief Add the value of the configuration entry for a subsequent + * incremental alter config operation. APPEND and SUBTRACT are + * possible for list-type configuration entries only. + * + * @param config ConfigResource to add config property to. + * @param name Configuration name, depends on resource type. + * @param op_type Operation type, one of rd_kafka_AlterConfigOpType_t. + * @param value Configuration value, depends on resource type and \p name. + * Set to \c NULL, only with with op_type set to DELETE, + * to revert configuration value to default. + * + * @returns NULL on success, or an rd_kafka_error_t * + * with the corresponding error code and string. + * Error ownership belongs to the caller. + * Possible error codes: + * - RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_ConfigResource_add_incremental_config( + rd_kafka_ConfigResource_t *config, + const char *name, + rd_kafka_AlterConfigOpType_t op_type, + const char *value); + + +/** + * @brief Get an array of config entries from a ConfigResource object. + * + * The returned object life-times are the same as the \p config object. + * + * @param config ConfigResource to get configs from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_ConfigEntry_t ** +rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, + size_t *cntp); + + + +/** + * @returns the ResourceType for \p config + */ +RD_EXPORT rd_kafka_ResourceType_t +rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config); + +/** + * @returns the name for \p config + */ +RD_EXPORT const char * +rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config); + +/** + * @returns the error for this resource from an AlterConfigs request + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config); + +/** + * @returns the error string for this resource from an AlterConfigs + * request, or NULL if no error. + */ +RD_EXPORT const char * +rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config); + + +/* + * AlterConfigs - alter cluster configuration. + * + */ + + +/** + * @brief Update the configuration for the specified resources. + * Updates are not transactional so they may succeed for a subset + * of the provided resources while the others fail. + * The configuration for a particular resource is updated atomically, + * replacing values using the provided ConfigEntrys and reverting + * unspecified ConfigEntrys to their default values. + * + * @remark Requires broker version >=0.11.0.0 + * + * @warning AlterConfigs will replace all existing configuration for + * the provided resources with the new configuration given, + * reverting all other configuration to their default values. + * + * @remark Multiple resources and resource types may be set, but at most one + * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call + * since these resource requests must be sent to the broker specified + * in the resource. + * + * @deprecated Use rd_kafka_IncrementalAlterConfigs(). + * + */ +RD_EXPORT +void rd_kafka_AlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + +/* + * AlterConfigs result type and methods + */ + +/** + * @brief Get an array of resource results from a AlterConfigs result. + * + * Use \c rd_kafka_ConfigResource_error() and + * \c rd_kafka_ConfigResource_error_string() to extract per-resource error + * results on the returned array elements. + * + * The returned object life-times are the same as the \p result object. + * + * @param result Result object to get resource results from. + * @param cntp is updated to the number of elements in the array. + * + * @returns an array of ConfigResource elements, or NULL if not available. + */ +RD_EXPORT const rd_kafka_ConfigResource_t ** +rd_kafka_AlterConfigs_result_resources( + const rd_kafka_AlterConfigs_result_t *result, + size_t *cntp); + + + +/* + * IncrementalAlterConfigs - alter cluster configuration incrementally. + * + */ + + +/** + * @brief Incrementally update the configuration for the specified resources. + * Updates are not transactional so they may succeed for some resources + * while fail for others. The configs for a particular resource are + * updated atomically, executing the corresponding incremental operations + * on the provided configurations. + * + * @remark Requires broker version >=2.3.0 + * + * @remark Multiple resources and resource types may be set, but at most one + * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call + * since these resource requests must be sent to the broker specified + * in the resource. Broker option will be ignored in this case. + * + * @param rk Client instance. + * @param configs Array of config entries to alter. + * @param config_cnt Number of elements in \p configs array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_IncrementalAlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + +/* + * IncrementalAlterConfigs result type and methods + */ + +/** + * @brief Get an array of resource results from a IncrementalAlterConfigs + * result. + * + * Use \c rd_kafka_ConfigResource_error() and + * \c rd_kafka_ConfigResource_error_string() to extract per-resource error + * results on the returned array elements. + * + * The returned object life-times are the same as the \p result object. + * + * @param result Result object to get resource results from. + * @param cntp is updated to the number of elements in the array. + * + * @returns an array of ConfigResource elements, or NULL if not available. + */ +RD_EXPORT const rd_kafka_ConfigResource_t ** +rd_kafka_IncrementalAlterConfigs_result_resources( + const rd_kafka_IncrementalAlterConfigs_result_t *result, + size_t *cntp); + + + +/* + * DescribeConfigs - retrieve cluster configuration. + * + */ + + +/** + * @brief Get configuration for the specified resources in \p configs. + * + * The returned configuration includes default values and the + * rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() + * methods may be used to distinguish them from user supplied values. + * + * The value of config entries where rd_kafka_ConfigEntry_is_sensitive() + * is true will always be NULL to avoid disclosing sensitive + * information, such as security settings. + * + * Configuration entries where rd_kafka_ConfigEntry_is_read_only() + * is true can't be updated (with rd_kafka_AlterConfigs()). + * + * Synonym configuration entries are returned if the broker supports + * it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms(). + * + * @remark Requires broker version >=0.11.0.0 + * + * @remark Multiple resources and resource types may be requested, but at most + * one resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call + * since these resource requests must be sent to the broker specified + * in the resource. + */ +RD_EXPORT +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * DescribeConfigs result type and methods + */ + +/** + * @brief Get an array of resource results from a DescribeConfigs result. + * + * The returned \p resources life-time is the same as the \p result object. + * + * @param result Result object to get resource results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_ConfigResource_t ** +rd_kafka_DescribeConfigs_result_resources( + const rd_kafka_DescribeConfigs_result_t *result, + size_t *cntp); + + +/**@}*/ + +/** + * @name Admin API - DeleteRecords + * @brief delete records (messages) from partitions. + * @{ + * + */ + +/**! Represents records to be deleted */ +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t; + +/** + * @brief Create a new DeleteRecords object. This object is later passed to + * rd_kafka_DeleteRecords(). + * + * \p before_offsets must contain \c topic, \c partition, and + * \c offset is the offset before which the messages will + * be deleted (exclusive). + * Set \c offset to RD_KAFKA_OFFSET_END (high-watermark) in order to + * delete all data in the partition. + * + * @param before_offsets For each partition delete all messages up to but not + * including the specified offset. + * + * @returns a new allocated DeleteRecords object. + * Use rd_kafka_DeleteRecords_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new( + const rd_kafka_topic_partition_list_t *before_offsets); + +/** + * @brief Destroy and free a DeleteRecords object previously created with + * rd_kafka_DeleteRecords_new() + */ +RD_EXPORT void +rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records); + +/** + * @brief Helper function to destroy all DeleteRecords objects in + * the \p del_groups array (of \p del_group_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt); + +/** + * @brief Delete records (messages) in topic partitions older than the + * offsets provided. + * + * @param rk Client instance. + * @param del_records The offsets to delete (up to). + * Currently only one DeleteRecords_t (but containing + * multiple offsets) is supported. + * @param del_record_cnt The number of elements in del_records, must be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds. + * Controls how long the brokers will wait for records to be deleted. + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms. + * Controls how long \c rdkafka will wait for the request to complete. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT + */ +RD_EXPORT void rd_kafka_DeleteRecords(rd_kafka_t *rk, + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + +/* + * DeleteRecords result type and methods + */ + +/** + * @brief Get a list of topic and partition results from a DeleteRecords result. + * The returned objects will contain \c topic, \c partition, \c offset + * and \c err. \c offset will be set to the post-deletion low-watermark + * (smallest available offset of all live replicas). \c err will be set + * per-partition if deletion failed. + * + * The returned object's life-time is the same as the \p result object. + */ +RD_EXPORT const rd_kafka_topic_partition_list_t * +rd_kafka_DeleteRecords_result_offsets( + const rd_kafka_DeleteRecords_result_t *result); + +/**@}*/ + +/** + * @name Admin API - DescribeTopics + * @{ + */ + +/** + * @brief Represents a collection of topics, to be passed to DescribeTopics. + * + */ +typedef struct rd_kafka_TopicCollection_s rd_kafka_TopicCollection_t; + +/** + * @brief TopicPartition represents a partition in the DescribeTopics result. + * + */ +typedef struct rd_kafka_TopicPartitionInfo_s rd_kafka_TopicPartitionInfo_t; + +/** + * @brief DescribeTopics result type. + * + */ +typedef struct rd_kafka_TopicDescription_s rd_kafka_TopicDescription_t; + +/** + * @brief Creates a new TopicCollection for passing to rd_kafka_DescribeTopics. + * + * @param topics A list of topics. + * @param topics_cnt Count of topics. + * + * @return a newly allocated TopicCollection object. Must be freed using + * rd_kafka_TopicCollection_destroy when done. + */ +RD_EXPORT +rd_kafka_TopicCollection_t * +rd_kafka_TopicCollection_of_topic_names(const char **topics, size_t topics_cnt); + +/** + * @brief Destroy and free a TopicCollection object created with + * rd_kafka_TopicCollection_new_* methods. + */ +RD_EXPORT void +rd_kafka_TopicCollection_destroy(rd_kafka_TopicCollection_t *topics); + +/** + * @brief Describe topics as specified by the \p topics + * array of size \p topics_cnt elements. + * + * @param rk Client instance. + * @param topics Collection of topics to describe. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeTopics(rd_kafka_t *rk, + const rd_kafka_TopicCollection_t *topics, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get an array of topic results from a DescribeTopics result. + * + * @param result Result to get topics results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_TopicDescription_t **rd_kafka_DescribeTopics_result_topics( + const rd_kafka_DescribeTopics_result_t *result, + size_t *cntp); + + +/** + * @brief Gets an array of partitions for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @param cntp is updated to the number of partitions in the array. + * + * @return An array of TopicPartitionInfos. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_TopicPartitionInfo_t **rd_kafka_TopicDescription_partitions( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp); + + +/** + * @brief Gets the partition id for \p partition. + * + * @param partition The partition info. + * + * @return The partition id. + */ +RD_EXPORT +const int rd_kafka_TopicPartitionInfo_partition( + const rd_kafka_TopicPartitionInfo_t *partition); + + +/** + * @brief Gets the partition leader for \p partition. + * + * @param partition The partition info. + * + * @return The partition leader. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_TopicPartitionInfo_leader( + const rd_kafka_TopicPartitionInfo_t *partition); + +/** + * @brief Gets the partition in-sync replicas for \p partition. + * + * @param partition The partition info. + * @param cntp is updated with in-sync replicas count. + * + * @return The in-sync replica nodes. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t ** +rd_kafka_TopicPartitionInfo_isr(const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp); + +/** + * @brief Gets the partition replicas for \p partition. + * + * @param partition The partition info. + * @param cntp is updated with partition replicas count. + * + * @return The partition replicas nodes. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t **rd_kafka_TopicPartitionInfo_replicas( + const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp); + +/** + * @brief Gets the topic authorized ACL operations for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @param cntp is updated with authorized ACL operations count. + * + * @return The topic authorized operations. Is NULL if operations were not + * requested. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t *rd_kafka_TopicDescription_authorized_operations( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp); + +/** + * @brief Gets the topic name for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * + * @return The topic name. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const char * +rd_kafka_TopicDescription_name(const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets the topic id for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @return The topic id + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT const rd_kafka_Uuid_t *rd_kafka_TopicDescription_topic_id( + const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets if the \p topicdesc topic is internal. + * + * @param topicdesc The topic description. + * + * @return 1 if the topic is internal to Kafka, 0 otherwise. + */ +RD_EXPORT +int rd_kafka_TopicDescription_is_internal( + const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets the error for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * + * @return The topic description error. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_error_t * +rd_kafka_TopicDescription_error(const rd_kafka_TopicDescription_t *topicdesc); + + +/**@}*/ + +/** + * @name Admin API - DescribeCluster + * @{ + */ + +/** + * @brief Describes the cluster. + * + * @param rk Client instance. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeCluster(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Gets the broker nodes for the \p result cluster. + * + * @param result The result of DescribeCluster. + * @param cntp is updated with the count of broker nodes. + * + * @return An array of broker nodes. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_Node_t **rd_kafka_DescribeCluster_result_nodes( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp); + +/** + * @brief Gets the authorized ACL operations for the \p result cluster. + * + * @param result The result of DescribeCluster. + * @param cntp is updated with authorized ACL operations count. + * + * @return The cluster authorized operations. Is NULL if operations were not + * requested. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t * +rd_kafka_DescribeCluster_result_authorized_operations( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp); + +/** + * @brief Gets the current controller for the \p result cluster. + * + * @param result The result of DescribeCluster. + * + * @return The cluster current controller. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_DescribeCluster_result_controller( + const rd_kafka_DescribeCluster_result_t *result); + +/** + * @brief Gets the cluster id for the \p result cluster. + * + * @param result The result of DescribeCluster. + * + * @return The cluster id. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const char *rd_kafka_DescribeCluster_result_cluster_id( + const rd_kafka_DescribeCluster_result_t *result); + +/**@}*/ + + +/** + * @name Admin API - ListConsumerGroups + * @{ + */ + + +/** + * @brief ListConsumerGroups result for a single group + */ + +/**! ListConsumerGroups result for a single group */ +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t; + +/**! ListConsumerGroups results and errors */ +typedef struct rd_kafka_ListConsumerGroupsResult_s + rd_kafka_ListConsumerGroupsResult_t; + +/** + * @brief List the consumer groups available in the cluster. + * + * @param rk Client instance. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + */ +RD_EXPORT +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Gets the group id for the \p grplist group. + * + * @param grplist The group listing. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grplist object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupListing_group_id( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Is the \p grplist group a simple consumer group. + * + * @param grplist The group listing. + * + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Gets state for the \p grplist group. + * + * @param grplist The group listing. + * + * @return A group state. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Get an array of valid list groups from a ListConsumerGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_ConsumerGroupListing_t ** +rd_kafka_ListConsumerGroups_result_valid( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp); + +/** + * @brief Get an array of errors from a ListConsumerGroups call result. + * + * The returned errors life-time is the same as the \p result object. + * + * @param result ListConsumerGroups result. + * @param cntp Is updated to the number of elements in the array. + * + * @return Array of errors in \p result. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - DescribeConsumerGroups + * @{ + */ + +/** + * @brief DescribeConsumerGroups result type. + * + */ +typedef struct rd_kafka_ConsumerGroupDescription_s + rd_kafka_ConsumerGroupDescription_t; + +/** + * @brief Member description included in ConsumerGroupDescription. + * + */ +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t; + +/** + * @brief Member assignment included in MemberDescription. + * + */ +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t; + +/** + * @brief Describe groups from cluster as specified by the \p groups + * array of size \p groups_cnt elements. + * + * @param rk Client instance. + * @param groups Array of groups to describe. + * @param groups_cnt Number of elements in \p groups array. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, + const char **groups, + size_t groups_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get an array of group results from a DescribeConsumerGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_ConsumerGroupDescription_t ** +rd_kafka_DescribeConsumerGroups_result_groups( + const rd_kafka_DescribeConsumerGroups_result_t *result, + size_t *cntp); + + +/** + * @brief Gets the group id for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_group_id( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the error for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group description error. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Is the \p grpdesc group a simple consumer group. + * + * @param grpdesc The group description. + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + + +/** + * @brief Gets the partition assignor for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The partition assignor. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_partition_assignor( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the authorized ACL operations for the \p grpdesc group. + * + * @param grpdesc The group description. + * @param cntp is updated with authorized ACL operations count. + * + * @return The group authorized operations. Is NULL if operations were not + * requested. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t * +rd_kafka_ConsumerGroupDescription_authorized_operations( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t *cntp); + +/** + * @brief Gets state for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return A group state. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the coordinator for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group coordinator. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the members count of \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The member count. + */ +RD_EXPORT +size_t rd_kafka_ConsumerGroupDescription_member_count( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets a member of \p grpdesc group. + * + * @param grpdesc The group description. + * @param idx The member idx. + * + * @return A member at index \p idx, or NULL if + * \p idx is out of range. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t idx); + +/** + * @brief Gets client id of \p member. + * + * @param member The group member. + * + * @return The client id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_client_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets group instance id of \p member. + * + * @param member The group member. + * + * @return The group instance id, or NULL if not available. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_group_instance_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets consumer id of \p member. + * + * @param member The group member. + * + * @return The consumer id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_consumer_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets host of \p member. + * + * @param member The group member. + * + * @return The host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char * +rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assignment of \p member. + * + * @param member The group member. + * + * @return The member assignment. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assigned partitions of a member \p assignment. + * + * @param assignment The group member assignment. + * + * @return The assigned partitions. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p assignment object. + */ +RD_EXPORT +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions( + const rd_kafka_MemberAssignment_t *assignment); + +/**@}*/ + +/** + * @name Admin API - DeleteGroups + * @brief Delete groups from cluster + * @{ + * + * + */ + +/*! Represents a group to be deleted. */ +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; + +/** + * @brief Create a new DeleteGroup object. This object is later passed to + * rd_kafka_DeleteGroups(). + * + * @param group Name of group to delete. + * + * @returns a new allocated DeleteGroup object. + * Use rd_kafka_DeleteGroup_destroy() to free object when done. + */ +RD_EXPORT +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); + +/** + * @brief Destroy and free a DeleteGroup object previously created with + * rd_kafka_DeleteGroup_new() + */ +RD_EXPORT +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); + +/** + * @brief Helper function to destroy all DeleteGroup objects in + * the \p del_groups array (of \p del_group_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt); + +/** + * @brief Delete groups from cluster as specified by the \p del_groups + * array of size \p del_group_cnt elements. + * + * @param rk Client instance. + * @param del_groups Array of groups to delete. + * @param del_group_cnt Number of elements in \p del_groups array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT + * + * @remark This function in called deleteConsumerGroups in the Java client. + */ +RD_EXPORT +void rd_kafka_DeleteGroups(rd_kafka_t *rk, + rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * DeleteGroups result type and methods + */ + +/** + * @brief Get an array of group results from a DeleteGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - ListConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be listed. */ +typedef struct rd_kafka_ListConsumerGroupOffsets_s + rd_kafka_ListConsumerGroupOffsets_t; + +/** + * @brief Create a new ListConsumerGroupOffsets object. + * This object is later passed to rd_kafka_ListConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to list committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated ListConsumerGroupOffsets object. + * Use rd_kafka_ListConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_ListConsumerGroupOffsets_t * +rd_kafka_ListConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a ListConsumerGroupOffsets object previously + * created with rd_kafka_ListConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy( + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets); + +/** + * @brief Helper function to destroy all ListConsumerGroupOffsets objects in + * the \p list_grpoffsets array (of \p list_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy_array( + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffset_cnt); + +/** + * @brief List committed offsets for a set of partitions in a consumer + * group. + * + * @param rk Client instance. + * @param list_grpoffsets Array of group committed offsets to list. + * MUST only be one single element. + * @param list_grpoffsets_cnt Number of elements in \p list_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_ListConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * ListConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a ListConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_ListConsumerGroupOffsets_result_groups( + const rd_kafka_ListConsumerGroupOffsets_result_t *result, + size_t *cntp); + + + +/**@}*/ + +/** + * @name Admin API - AlterConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be altered. */ +typedef struct rd_kafka_AlterConsumerGroupOffsets_s + rd_kafka_AlterConsumerGroupOffsets_t; + +/** + * @brief Create a new AlterConsumerGroupOffsets object. + * This object is later passed to rd_kafka_AlterConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to alter committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated AlterConsumerGroupOffsets object. + * Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_AlterConsumerGroupOffsets_t * +rd_kafka_AlterConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a AlterConsumerGroupOffsets object previously + * created with rd_kafka_AlterConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy( + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets); + +/** + * @brief Helper function to destroy all AlterConsumerGroupOffsets objects in + * the \p alter_grpoffsets array (of \p alter_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy_array( + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffset_cnt); + +/** + * @brief Alter committed offsets for a set of partitions in a consumer + * group. This will succeed at the partition level only if the group + * is not actively subscribed to the corresponding topic. + * + * @param rk Client instance. + * @param alter_grpoffsets Array of group committed offsets to alter. + * MUST only be one single element. + * @param alter_grpoffsets_cnt Number of elements in \p alter_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_AlterConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * AlterConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a AlterConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_AlterConsumerGroupOffsets_result_groups( + const rd_kafka_AlterConsumerGroupOffsets_result_t *result, + size_t *cntp); + + + +/**@}*/ + +/** + * @name Admin API - DeleteConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be deleted. */ +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s + rd_kafka_DeleteConsumerGroupOffsets_t; + +/** + * @brief Create a new DeleteConsumerGroupOffsets object. + * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets(). + * + * @param group Consumer group id. + * @param partitions Partitions to delete committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated DeleteConsumerGroupOffsets object. + * Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t * +rd_kafka_DeleteConsumerGroupOffsets_new( + const char *group, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a DeleteConsumerGroupOffsets object previously + * created with rd_kafka_DeleteConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy( + rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); + +/** + * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in + * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffset_cnt); + +/** + * @brief Delete committed offsets for a set of partitions in a consumer + * group. This will succeed at the partition level only if the group + * is not actively subscribed to the corresponding topic. + * + * @param rk Client instance. + * @param del_grpoffsets Array of group committed offsets to delete. + * MUST only be one single element. + * @param del_grpoffsets_cnt Number of elements in \p del_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * DeleteConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a DeleteConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_DeleteConsumerGroupOffsets_result_groups( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - ListOffsets + * @brief Given a topic_partition list, provides the offset information. + * @{ + */ + +/** + * @enum rd_kafka_OffsetSpec_t + * @brief Allows to specify the desired offsets when using ListOffsets. + */ +typedef enum rd_kafka_OffsetSpec_t { + /* Used to retrieve the offset with the largest timestamp of a partition + * as message timestamps can be specified client side this may not match + * the log end offset returned by SPEC_LATEST. + */ + RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP = -3, + /* Used to retrieve the offset with the earliest timestamp of a + partition. */ + RD_KAFKA_OFFSET_SPEC_EARLIEST = -2, + /* Used to retrieve the offset with the latest timestamp of a partition. + */ + RD_KAFKA_OFFSET_SPEC_LATEST = -1, +} rd_kafka_OffsetSpec_t; + +/** + * @brief Information returned from a ListOffsets call for a specific + * `rd_kafka_topic_partition_t`. + */ +typedef struct rd_kafka_ListOffsetsResultInfo_s + rd_kafka_ListOffsetsResultInfo_t; + +/** + * @brief Returns the topic partition of the passed \p result_info. + */ +RD_EXPORT +const rd_kafka_topic_partition_t * +rd_kafka_ListOffsetsResultInfo_topic_partition( + const rd_kafka_ListOffsetsResultInfo_t *result_info); + +/** + * @brief Returns the timestamp corresponding to the offset in \p result_info. + */ +RD_EXPORT +int64_t rd_kafka_ListOffsetsResultInfo_timestamp( + const rd_kafka_ListOffsetsResultInfo_t *result_info); + +/** + * @brief Returns the array of ListOffsetsResultInfo in \p result + * and populates the size of the array in \p cntp. + */ +RD_EXPORT +const rd_kafka_ListOffsetsResultInfo_t ** +rd_kafka_ListOffsets_result_infos(const rd_kafka_ListOffsets_result_t *result, + size_t *cntp); + +/** + * @brief List offsets for the specified \p topic_partitions. + * This operation enables to find the beginning offset, + * end offset as well as the offset matching a timestamp in partitions + * or the offset with max timestamp. + * + * @param rk Client instance. + * @param topic_partitions topic_partition_list_t with the partitions and + * offsets to list. Each topic partition offset can be + * a value of the `rd_kafka_OffsetSpec_t` enum or + * a non-negative value, representing a timestamp, + * to query for the first offset after the + * given timestamp. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_isolation_level() - default \c + * RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTOFFSETS_RESULT + */ +RD_EXPORT +void rd_kafka_ListOffsets(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *topic_partitions, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ + +/** + * @name Admin API - User SCRAM credentials + * @{ + */ + +/** + * @enum rd_kafka_ScramMechanism_t + * @brief Apache Kafka ScramMechanism values. + */ +typedef enum rd_kafka_ScramMechanism_t { + RD_KAFKA_SCRAM_MECHANISM_UNKNOWN = 0, + RD_KAFKA_SCRAM_MECHANISM_SHA_256 = 1, + RD_KAFKA_SCRAM_MECHANISM_SHA_512 = 2, + RD_KAFKA_SCRAM_MECHANISM__CNT +} rd_kafka_ScramMechanism_t; + +/** + * @brief Scram credential info. + * Mechanism and iterations for a SASL/SCRAM + * credential associated with a user. + */ +typedef struct rd_kafka_ScramCredentialInfo_s rd_kafka_ScramCredentialInfo_t; + +/** + * @brief Returns the mechanism of a given ScramCredentialInfo. + */ +RD_EXPORT +rd_kafka_ScramMechanism_t rd_kafka_ScramCredentialInfo_mechanism( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info); + +/** + * @brief Returns the iterations of a given ScramCredentialInfo. + */ +RD_EXPORT +int32_t rd_kafka_ScramCredentialInfo_iterations( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info); + +/** + * @brief Representation of all SASL/SCRAM credentials associated + * with a user that can be retrieved, + * or an error indicating why credentials + * could not be retrieved. + */ +typedef struct rd_kafka_UserScramCredentialsDescription_s + rd_kafka_UserScramCredentialsDescription_t; + +/** + * @brief Returns the username of a UserScramCredentialsDescription. + */ +RD_EXPORT +const char *rd_kafka_UserScramCredentialsDescription_user( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the error associated with a UserScramCredentialsDescription. + */ +RD_EXPORT +const rd_kafka_error_t *rd_kafka_UserScramCredentialsDescription_error( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the count of ScramCredentialInfos of a + * UserScramCredentialsDescription. + */ +RD_EXPORT +size_t rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the ScramCredentialInfo at index idx of + * UserScramCredentialsDescription. + */ +RD_EXPORT +const rd_kafka_ScramCredentialInfo_t * +rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + const rd_kafka_UserScramCredentialsDescription_t *description, + size_t idx); + +/** + * @brief Get an array of descriptions from a DescribeUserScramCredentials + * result. + * + * The returned value life-time is the same as the \p result object. + * + * @param result Result to get descriptions from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT +const rd_kafka_UserScramCredentialsDescription_t ** +rd_kafka_DescribeUserScramCredentials_result_descriptions( + const rd_kafka_DescribeUserScramCredentials_result_t *result, + size_t *cntp); + +/** + * @brief Describe SASL/SCRAM credentials. + * This operation is supported by brokers with version 2.7.0 or higher. + * + * @param rk Client instance. + * @param users The users for which credentials are to be described. + * All users' credentials are described if NULL. + * @param user_cnt Number of elements in \p users array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_DescribeUserScramCredentials( + rd_kafka_t *rk, + const char **users, + size_t user_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief A request to alter a user's SASL/SCRAM credentials. + */ +typedef struct rd_kafka_UserScramCredentialAlteration_s + rd_kafka_UserScramCredentialAlteration_t; + +/** + * @brief Allocates a new UserScramCredentialUpsertion given its fields. + * If salt isn't given a 64 B salt is generated using OpenSSL + * RAND_priv_bytes, if available. + * + * @param username The username (not empty). + * @param mechanism SASL/SCRAM mechanism. + * @param iterations SASL/SCRAM iterations. + * @param password Password bytes (not empty). + * @param password_size Size of \p password (greater than 0). + * @param salt Salt bytes (optional). + * @param salt_size Size of \p salt (optional). + * + * @remark A random salt is generated, when NULL, only if OpenSSL >= 1.1.1. + * Otherwise it's a required param. + * + * @return A newly created instance of rd_kafka_UserScramCredentialAlteration_t. + * Ownership belongs to the caller, use + * rd_kafka_UserScramCredentialAlteration_destroy to destroy. + */ +RD_EXPORT +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialUpsertion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism, + int32_t iterations, + const unsigned char *password, + size_t password_size, + const unsigned char *salt, + size_t salt_size); + +/** + * @brief Allocates a new UserScramCredentialDeletion given its fields. + * + * @param username The username (not empty). + * @param mechanism SASL/SCRAM mechanism. + * @return A newly created instance of rd_kafka_UserScramCredentialAlteration_t. + * Ownership belongs to the caller, use + * rd_kafka_UserScramCredentialAlteration_destroy to destroy. + */ +RD_EXPORT +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialDeletion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism); + + +/** + * @brief Destroys a UserScramCredentialAlteration given its pointer + */ +RD_EXPORT +void rd_kafka_UserScramCredentialAlteration_destroy( + rd_kafka_UserScramCredentialAlteration_t *alteration); + +/** + * @brief Destroys an array of UserScramCredentialAlteration + */ +RD_EXPORT +void rd_kafka_UserScramCredentialAlteration_destroy_array( + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt); + +/** + * @brief Result of a single user SCRAM alteration. + */ +typedef struct rd_kafka_AlterUserScramCredentials_result_response_s + rd_kafka_AlterUserScramCredentials_result_response_t; + +/** + * @brief Returns the username for a + * rd_kafka_AlterUserScramCredentials_result_response. + */ +RD_EXPORT +const char *rd_kafka_AlterUserScramCredentials_result_response_user( + const rd_kafka_AlterUserScramCredentials_result_response_t *response); + +/** + * @brief Returns the error of a + * rd_kafka_AlterUserScramCredentials_result_response. + */ +RD_EXPORT +const rd_kafka_error_t * +rd_kafka_AlterUserScramCredentials_result_response_error( + const rd_kafka_AlterUserScramCredentials_result_response_t *response); + +/** + * @brief Get an array of responses from a AlterUserScramCredentials result. + * + * The returned value life-time is the same as the \p result object. + * + * @param result Result to get responses from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT +const rd_kafka_AlterUserScramCredentials_result_response_t ** +rd_kafka_AlterUserScramCredentials_result_responses( + const rd_kafka_AlterUserScramCredentials_result_t *result, + size_t *cntp); + +/** + * @brief Alter SASL/SCRAM credentials. + * This operation is supported by brokers with version 2.7.0 or higher. + * + * @remark For upsertions to be processed, librdkfka must be build with + * OpenSSL support. It's needed to calculate the HMAC. + * + * @param rk Client instance. + * @param alterations The alterations to be applied. + * @param alteration_cnt Number of elements in \p alterations array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_AlterUserScramCredentials( + rd_kafka_t *rk, + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ + +/** + * @name Admin API - ACL operations + * @{ + */ + +/** + * @brief ACL Binding is used to create access control lists. + * + * + */ +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t; + +/** + * @brief ACL Binding filter is used to filter access control lists. + * + */ +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t; + +/** + * @returns the error object for the given acl result, or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres); + + +/** + * @returns a string representation of the \p acl_operation + */ +RD_EXPORT const char * +rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation); + +/** + * @enum rd_kafka_AclPermissionType_t + * @brief Apache Kafka ACL permission types. + */ +typedef enum rd_kafka_AclPermissionType_t { + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_ACL_PERMISSION_TYPE_ANY = + 1, /**< In a filter, matches any AclPermissionType */ + RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2, /**< Disallows access */ + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3, /**< Grants access. */ + RD_KAFKA_ACL_PERMISSION_TYPE__CNT +} rd_kafka_AclPermissionType_t; + +/** + * @returns a string representation of the \p acl_permission_type + */ +RD_EXPORT const char *rd_kafka_AclPermissionType_name( + rd_kafka_AclPermissionType_t acl_permission_type); + +/** + * @brief Create a new AclBinding object. This object is later passed to + * rd_kafka_CreateAcls(). + * + * @param restype The ResourceType. + * @param name The resource name. + * @param resource_pattern_type The pattern type. + * @param principal A principal, following the kafka specification. + * @param host An hostname or ip. + * @param operation A Kafka operation. + * @param permission_type A Kafka permission type. + * @param errstr An error string for returning errors or NULL to not use it. + * @param errstr_size The \p errstr size or 0 to not use it. + * + * @returns a new allocated AclBinding object, or NULL if the input parameters + * are invalid. + * Use rd_kafka_AclBinding_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_AclBinding_t * +rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size); + +/** + * @brief Create a new AclBindingFilter object. This object is later passed to + * rd_kafka_DescribeAcls() or + * rd_kafka_DeletesAcls() in order to filter + * the acls to retrieve or to delete. + * Use the same rd_kafka_AclBinding functions to query or destroy it. + * + * @param restype The ResourceType or \c RD_KAFKA_RESOURCE_ANY if + * not filtering by this field. + * @param name The resource name or NULL if not filtering by this field. + * @param resource_pattern_type The pattern type or \c + * RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field. + * @param principal A principal or NULL if not filtering by this field. + * @param host An hostname or ip or NULL if not filtering by this field. + * @param operation A Kafka operation or \c RD_KAFKA_ACL_OPERATION_ANY if not + * filtering by this field. + * @param permission_type A Kafka permission type or \c + * RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field. + * @param errstr An error string for returning errors or NULL to not use it. + * @param errstr_size The \p errstr size or 0 to not use it. + * + * @returns a new allocated AclBindingFilter object, or NULL if the input + * parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when + * done. + */ +RD_EXPORT rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new( + rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size); + +/** + * @returns the resource type for the given acl binding. + */ +RD_EXPORT rd_kafka_ResourceType_t +rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the resource name for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the principal for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the host for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the acl operation for the given acl binding. + */ +RD_EXPORT rd_kafka_AclOperation_t +rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the permission type for the given acl binding. + */ +RD_EXPORT rd_kafka_AclPermissionType_t +rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the resource pattern type for the given acl binding. + */ +RD_EXPORT rd_kafka_ResourcePatternType_t +rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the error object for the given acl binding, or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl); + + +/** + * @brief Destroy and free an AclBinding object previously created with + * rd_kafka_AclBinding_new() + */ +RD_EXPORT void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding); + + +/** + * @brief Helper function to destroy all AclBinding objects in + * the \p acl_bindings array (of \p acl_bindings_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, + size_t acl_bindings_cnt); + +/** + * @brief Get an array of acl results from a CreateAcls result. + * + * The returned \p acl result life-time is the same as the \p result object. + * @param result CreateAcls result to get acl results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_acl_result_t ** +rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, + size_t *cntp); + +/** + * @brief Create acls as specified by the \p new_acls + * array of size \p new_topic_cnt elements. + * + * @param rk Client instance. + * @param new_acls Array of new acls to create. + * @param new_acls_cnt Number of elements in \p new_acls array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_CREATEACLS_RESULT + */ +RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk, + rd_kafka_AclBinding_t **new_acls, + size_t new_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * DescribeAcls - describe access control lists. + * + * + */ + +/** + * @brief Get an array of resource results from a DescribeAcls result. + * + * The returned \p resources life-time is the same as the \p result object. + * @param result DescribeAcls result to get acls from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_AclBinding_t ** +rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, + size_t *cntp); + +/** + * @brief Describe acls matching the filter provided in \p acl_filter + * + * @param rk Client instance. + * @param acl_filter Filter for the returned acls. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + */ +RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t *acl_filter, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * DeleteAcls - delete access control lists. + * + * + */ + +typedef struct rd_kafka_DeleteAcls_result_response_s + rd_kafka_DeleteAcls_result_response_t; + +/** + * @brief Get an array of DeleteAcls result responses from a DeleteAcls result. + * + * The returned \p responses life-time is the same as the \p result object. + * @param result DeleteAcls result to get responses from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_DeleteAcls_result_response_t ** +rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, + size_t *cntp); + +/** + * @returns the error object for the given DeleteAcls result response, + * or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error( + const rd_kafka_DeleteAcls_result_response_t *result_response); + + +/** + * @returns the matching acls array for the given DeleteAcls result response. + * + * @remark lifetime of the returned acl bindings is the same as the \p + * result_response. + */ +RD_EXPORT const rd_kafka_AclBinding_t ** +rd_kafka_DeleteAcls_result_response_matching_acls( + const rd_kafka_DeleteAcls_result_response_t *result_response, + size_t *matching_acls_cntp); + +/** + * @brief Delete acls matching the filteres provided in \p del_acls + * array of size \p del_acls_cnt. + * + * @param rk Client instance. + * @param del_acls Filters for the acls to delete. + * @param del_acls_cnt Number of elements in \p del_acls array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETEACLS_RESULT + */ +RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t **del_acls, + size_t del_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ + +/** + * @name Security APIs + * @{ + * + */ + +/** + * @brief Set SASL/OAUTHBEARER token and metadata + * + * @param rk Client instance. + * @param token_value the mandatory token value to set, often (but not + * necessarily) a JWS compact serialization as per + * https://tools.ietf.org/html/rfc7515#section-3.1. + * @param md_lifetime_ms when the token expires, in terms of the number of + * milliseconds since the epoch. + * @param md_principal_name the mandatory Kafka principal name associated + * with the token. + * @param extensions optional SASL extensions key-value array with + * \p extensions_size elements (number of keys * 2), where [i] is the key and + * [i+1] is the key's value, to be communicated to the broker + * as additional key-value pairs during the initial client response as per + * https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are + * copied. + * @param extension_size the number of SASL extension keys plus values, + * which must be a non-negative multiple of 2. + * @param errstr A human readable error string (nul-terminated) is written to + * this location that must be of at least \p errstr_size bytes. + * The \p errstr is only written in case of error. + * @param errstr_size Writable size in \p errstr. + * + * The SASL/OAUTHBEARER token refresh callback or event handler should invoke + * this method upon success. The extension keys must not include the reserved + * key "`auth`", and all extension keys and values must conform to the required + * format as per https://tools.ietf.org/html/rfc7628#section-3.1: + * + * key = 1*(ALPHA) + * value = *(VCHAR / SP / HTAB / CR / LF ) + * + * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise \p errstr set + * and:
+ * \c RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are + * invalid;
+ * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not + * supported by this build;
+ * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is + * not configured as the client's authentication mechanism.
+ * + * @sa rd_kafka_oauthbearer_set_token_failure + * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb + */ +RD_EXPORT +rd_kafka_resp_err_t +rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size); + +/** + * @brief SASL/OAUTHBEARER token refresh failure indicator. + * + * @param rk Client instance. + * @param errstr mandatory human readable error reason for failing to acquire + * a token. + * + * The SASL/OAUTHBEARER token refresh callback or event handler should invoke + * this method upon failure. + * + * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:
+ * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not + * supported by this build;
+ * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is + * not configured as the client's authentication mechanism,
+ * \c RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied. + * + * @sa rd_kafka_oauthbearer_set_token + * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, + const char *errstr); + +/**@}*/ + + +/** + * @name Transactional producer API + * + * The transactional producer operates on top of the idempotent producer, + * and provides full exactly-once semantics (EOS) for Apache Kafka when used + * with the transaction aware consumer (\c isolation.level=read_committed). + * + * A producer instance is configured for transactions by setting the + * \c transactional.id to an identifier unique for the application. This + * id will be used to fence stale transactions from previous instances of + * the application, typically following an outage or crash. + * + * After creating the transactional producer instance using rd_kafka_new() + * the transactional state must be initialized by calling + * rd_kafka_init_transactions(). This is a blocking call that will + * acquire a runtime producer id from the transaction coordinator broker + * as well as abort any stale transactions and fence any still running producer + * instances with the same \c transactional.id. + * + * Once transactions are initialized the application may begin a new + * transaction by calling rd_kafka_begin_transaction(). + * A producer instance may only have one single on-going transaction. + * + * Any messages produced after the transaction has been started will + * belong to the ongoing transaction and will be committed or aborted + * atomically. + * It is not permitted to produce messages outside a transaction + * boundary, e.g., before rd_kafka_begin_transaction() or after + * rd_kafka_commit_transaction(), rd_kafka_abort_transaction(), or after + * the current transaction has failed. + * + * If consumed messages are used as input to the transaction, the consumer + * instance must be configured with \c enable.auto.commit set to \c false. + * To commit the consumed offsets along with the transaction pass the + * list of consumed partitions and the last offset processed + 1 to + * rd_kafka_send_offsets_to_transaction() prior to committing the transaction. + * This allows an aborted transaction to be restarted using the previously + * committed offsets. + * + * To commit the produced messages, and any consumed offsets, to the + * current transaction, call rd_kafka_commit_transaction(). + * This call will block until the transaction has been fully committed or + * failed (typically due to fencing by a newer producer instance). + * + * Alternatively, if processing fails, or an abortable transaction error is + * raised, the transaction needs to be aborted by calling + * rd_kafka_abort_transaction() which marks any produced messages and + * offset commits as aborted. + * + * After the current transaction has been committed or aborted a new + * transaction may be started by calling rd_kafka_begin_transaction() again. + * + * @par Retriable errors + * Some error cases allow the attempted operation to be retried, this is + * indicated by the error object having the retriable flag set which can + * be detected by calling rd_kafka_error_is_retriable(). + * When this flag is set the application may retry the operation immediately + * or preferably after a shorter grace period (to avoid busy-looping). + * Retriable errors include timeouts, broker transport failures, etc. + * + * @par Abortable errors + * An ongoing transaction may fail permanently due to various errors, + * such as transaction coordinator becoming unavailable, write failures to the + * Apache Kafka log, under-replicated partitions, etc. + * At this point the producer application must abort the current transaction + * using rd_kafka_abort_transaction() and optionally start a new transaction + * by calling rd_kafka_begin_transaction(). + * Whether an error is abortable or not is detected by calling + * rd_kafka_error_txn_requires_abort() on the returned error object. + * + * @par Fatal errors + * While the underlying idempotent producer will typically only raise + * fatal errors for unrecoverable cluster errors where the idempotency + * guarantees can't be maintained, most of these are treated as abortable by + * the transactional producer since transactions may be aborted and retried + * in their entirety; + * The transactional producer on the other hand introduces a set of additional + * fatal errors which the application needs to handle by shutting down the + * producer and terminate. There is no way for a producer instance to recover + * from fatal errors. + * Whether an error is fatal or not is detected by calling + * rd_kafka_error_is_fatal() on the returned error object or by checking + * the global rd_kafka_fatal_error() code. + * Fatal errors are raised by triggering the \c error_cb (see the + * Fatal error chapter in INTRODUCTION.md for more information), and any + * subsequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL + * or have the fatal flag set (see rd_kafka_error_is_fatal()). + * The originating fatal error code can be retrieved by calling + * rd_kafka_fatal_error(). + * + * @par Handling of other errors + * For errors that have neither retriable, abortable or the fatal flag set + * it is not always obvious how to handle them. While some of these errors + * may be indicative of bugs in the application code, such as when + * an invalid parameter is passed to a method, other errors might originate + * from the broker and be passed thru as-is to the application. + * The general recommendation is to treat these errors, that have + * neither the retriable or abortable flags set, as fatal. + * + * @par Error handling example + * @code + * retry: + * rd_kafka_error_t *error; + * + * error = rd_kafka_commit_transaction(producer, 10*1000); + * if (!error) + * return success; + * else if (rd_kafka_error_txn_requires_abort(error)) { + * do_abort_transaction_and_reset_inputs(); + * } else if (rd_kafka_error_is_retriable(error)) { + * rd_kafka_error_destroy(error); + * goto retry; + * } else { // treat all other errors as fatal errors + * fatal_error(rd_kafka_error_string(error)); + * } + * rd_kafka_error_destroy(error); + * @endcode + * + * + * @{ + */ + + +/** + * @brief Initialize transactions for the producer instance. + * + * This function ensures any transactions initiated by previous instances + * of the producer with the same \c transactional.id are completed. + * If the previous instance failed with a transaction in progress the + * previous transaction will be aborted. + * This function needs to be called before any other transactional or + * produce functions are called when the \c transactional.id is configured. + * + * If the last transaction had begun completion (following transaction commit) + * but not yet finished, this function will await the previous transaction's + * completion. + * + * When any previous transactions have been fenced this function + * will acquire the internal producer id and epoch, used in all future + * transactional messages issued by this producer instance. + * + * @param rk Producer instance. + * @param timeout_ms The maximum time to block. On timeout the operation + * may continue in the background, depending on state, + * and it is okay to call init_transactions() again. + * If an infinite timeout (-1) is passed, the timeout will + * be adjusted to 2 * \c transaction.timeout.ms. + * + * @remark This function may block up to \p timeout_ms milliseconds. + * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. + * + * @returns NULL on success or an error object on failure. + * Check whether the returned error object permits retrying + * by calling rd_kafka_error_is_retriable(), or whether a fatal + * error has been raised by calling rd_kafka_error_is_fatal(). + * Error codes: + * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator + * could be not be contacted within \p timeout_ms (retriable), + * RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction + * coordinator is not available (retriable), + * RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction + * would not complete within \p timeout_ms (retriable), + * RD_KAFKA_RESP_ERR__STATE if transactions have already been started + * or upon fatal error, + * RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not + * support transactions ( +#include +#include "select_rdkafka.h" +#include "glue_rdkafka.h" + +void setup_rkmessage (rd_kafka_message_t *rkmessage, + rd_kafka_topic_t *rkt, int32_t partition, + const void *payload, size_t len, + void *key, size_t keyLen, void *opaque) { + rkmessage->rkt = rkt; + rkmessage->partition = partition; + rkmessage->payload = (void *)payload; + rkmessage->len = len; + rkmessage->key = (void *)key; + rkmessage->key_len = keyLen; + rkmessage->_private = opaque; +} +*/ +import "C" + +// TimestampType is a the Message timestamp type or source +type TimestampType int + +const ( + // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support + TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) + // TimestampCreateTime indicates timestamp set by producer (source time) + TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME) + // TimestampLogAppendTime indicates timestamp set set by broker (store time) + TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) +) + +func (t TimestampType) String() string { + switch t { + case TimestampCreateTime: + return "CreateTime" + case TimestampLogAppendTime: + return "LogAppendTime" + case TimestampNotAvailable: + fallthrough + default: + return "NotAvailable" + } +} + +// Message represents a Kafka message +type Message struct { + TopicPartition TopicPartition + Value []byte + Key []byte + Timestamp time.Time + TimestampType TimestampType + Opaque interface{} + Headers []Header + LeaderEpoch *int32 // Deprecated: LeaderEpoch or nil if not available. Use m.TopicPartition.LeaderEpoch instead. +} + +// String returns a human readable representation of a Message. +// Key and payload are not represented. +func (m *Message) String() string { + var topic string + if m.TopicPartition.Topic != nil { + topic = *m.TopicPartition.Topic + } else { + topic = "" + } + return fmt.Sprintf("%s[%d]@%s", topic, m.TopicPartition.Partition, m.TopicPartition.Offset) +} + +func (h *handle) getRktFromMessage(msg *Message) (crkt *C.rd_kafka_topic_t) { + if msg.TopicPartition.Topic == nil { + return nil + } + + return h.getRkt(*msg.TopicPartition.Topic) +} + +// setupHeadersFromGlueMsg converts the C tmp headers in gMsg to +// Go Headers in msg. +// gMsg.tmphdrs will be freed. +func setupHeadersFromGlueMsg(msg *Message, gMsg *C.glue_msg_t) { + msg.Headers = make([]Header, gMsg.tmphdrsCnt) + for n := range msg.Headers { + tmphdr := (*[1 << 30]C.tmphdr_t)(unsafe.Pointer(gMsg.tmphdrs))[n] + msg.Headers[n].Key = C.GoString(tmphdr.key) + if tmphdr.val != nil { + msg.Headers[n].Value = C.GoBytes(unsafe.Pointer(tmphdr.val), C.int(tmphdr.size)) + } else { + msg.Headers[n].Value = nil + } + } + C.free(unsafe.Pointer(gMsg.tmphdrs)) +} + +func (h *handle) newMessageFromGlueMsg(gMsg *C.glue_msg_t) (msg *Message) { + msg = &Message{} + + if gMsg.ts != -1 { + ts := int64(gMsg.ts) + msg.TimestampType = TimestampType(gMsg.tstype) + msg.Timestamp = time.Unix(ts/1000, (ts%1000)*1000000) + } + + if gMsg.tmphdrsCnt > 0 { + setupHeadersFromGlueMsg(msg, gMsg) + } + + h.setupMessageFromC(msg, gMsg.msg) + + return msg +} + +// setupMessageFromC sets up a message object from a C rd_kafka_message_t +func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) { + if cmsg.rkt != nil { + topic := h.getTopicNameFromRkt(cmsg.rkt) + msg.TopicPartition.Topic = &topic + } + msg.TopicPartition.Partition = int32(cmsg.partition) + if cmsg.payload != nil && h.msgFields.Value { + msg.Value = C.GoBytes(unsafe.Pointer(cmsg.payload), C.int(cmsg.len)) + } + if cmsg.key != nil && h.msgFields.Key { + msg.Key = C.GoBytes(unsafe.Pointer(cmsg.key), C.int(cmsg.key_len)) + } + if h.msgFields.Headers { + var gMsg C.glue_msg_t + gMsg.msg = cmsg + gMsg.want_hdrs = C.int8_t(1) + chdrsToTmphdrs(&gMsg) + if gMsg.tmphdrsCnt > 0 { + setupHeadersFromGlueMsg(msg, &gMsg) + } + } + msg.TopicPartition.Offset = Offset(cmsg.offset) + if cmsg.err != 0 { + msg.TopicPartition.Error = newError(cmsg.err) + } + + leaderEpoch := int32(C.rd_kafka_message_leader_epoch(cmsg)) + if leaderEpoch >= 0 { + msg.LeaderEpoch = &leaderEpoch + msg.TopicPartition.LeaderEpoch = &leaderEpoch + } +} + +// newMessageFromC creates a new message object from a C rd_kafka_message_t +// NOTE: For use with Producer: does not set message timestamp fields. +func (h *handle) newMessageFromC(cmsg *C.rd_kafka_message_t) (msg *Message) { + msg = &Message{} + + h.setupMessageFromC(msg, cmsg) + + return msg +} + +// messageToC sets up cmsg as a clone of msg +func (h *handle) messageToC(msg *Message, cmsg *C.rd_kafka_message_t) { + var valp unsafe.Pointer + var keyp unsafe.Pointer + + // to circumvent Cgo constraints we need to allocate C heap memory + // for both Value and Key (one allocation back to back) + // and copy the bytes from Value and Key to the C memory. + // We later tell librdkafka (in produce()) to free the + // C memory pointer when it is done. + var payload unsafe.Pointer + + valueLen := 0 + keyLen := 0 + if msg.Value != nil { + valueLen = len(msg.Value) + } + if msg.Key != nil { + keyLen = len(msg.Key) + } + + allocLen := valueLen + keyLen + if allocLen > 0 { + payload = C.malloc(C.size_t(allocLen)) + if valueLen > 0 { + copy((*[1 << 30]byte)(payload)[0:valueLen], msg.Value) + valp = payload + } + if keyLen > 0 { + copy((*[1 << 30]byte)(payload)[valueLen:allocLen], msg.Key) + keyp = unsafe.Pointer(&((*[1 << 31]byte)(payload)[valueLen])) + } + } + + cmsg.rkt = h.getRktFromMessage(msg) + cmsg.partition = C.int32_t(msg.TopicPartition.Partition) + cmsg.payload = valp + cmsg.len = C.size_t(valueLen) + cmsg.key = keyp + cmsg.key_len = C.size_t(keyLen) + cmsg._private = nil +} + +// used for testing messageToC performance +func (h *handle) messageToCDummy(msg *Message) { + var cmsg C.rd_kafka_message_t + h.messageToC(msg, &cmsg) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/metadata.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/metadata.go new file mode 100644 index 000000000..3b195f488 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/metadata.go @@ -0,0 +1,180 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "unsafe" +) + +/* +#include +#include "select_rdkafka.h" + +struct rd_kafka_metadata_broker *_getMetadata_broker_element(struct rd_kafka_metadata *m, int i) { + return &m->brokers[i]; +} + +struct rd_kafka_metadata_topic *_getMetadata_topic_element(struct rd_kafka_metadata *m, int i) { + return &m->topics[i]; +} + +struct rd_kafka_metadata_partition *_getMetadata_partition_element(struct rd_kafka_metadata *m, int topic_idx, int partition_idx) { + return &m->topics[topic_idx].partitions[partition_idx]; +} + +int32_t _get_int32_element (int32_t *arr, int i) { + return arr[i]; +} + +*/ +import "C" + +// BrokerMetadata contains per-broker metadata +type BrokerMetadata struct { + ID int32 + Host string + Port int +} + +// PartitionMetadata contains per-partition metadata +type PartitionMetadata struct { + ID int32 + Error Error + Leader int32 + Replicas []int32 + Isrs []int32 +} + +// TopicMetadata contains per-topic metadata +type TopicMetadata struct { + Topic string + Partitions []PartitionMetadata + Error Error +} + +// Metadata contains broker and topic metadata for all (matching) topics +type Metadata struct { + Brokers []BrokerMetadata + Topics map[string]TopicMetadata + + OriginatingBroker BrokerMetadata +} + +// getMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +func getMetadata(H Handle, topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + h := H.gethandle() + + var rkt *C.rd_kafka_topic_t + if topic != nil { + rkt = h.getRkt(*topic) + } + + var cMd *C.struct_rd_kafka_metadata + cErr := C.rd_kafka_metadata(h.rk, bool2cint(allTopics), + rkt, &cMd, C.int(timeoutMs)) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + + m := Metadata{} + defer C.rd_kafka_metadata_destroy(cMd) + + m.Brokers = make([]BrokerMetadata, cMd.broker_cnt) + for i := 0; i < int(cMd.broker_cnt); i++ { + b := C._getMetadata_broker_element(cMd, C.int(i)) + m.Brokers[i] = BrokerMetadata{int32(b.id), C.GoString(b.host), + int(b.port)} + } + + m.Topics = make(map[string]TopicMetadata, int(cMd.topic_cnt)) + for i := 0; i < int(cMd.topic_cnt); i++ { + t := C._getMetadata_topic_element(cMd, C.int(i)) + + thisTopic := C.GoString(t.topic) + m.Topics[thisTopic] = TopicMetadata{Topic: thisTopic, + Error: newError(t.err), + Partitions: make([]PartitionMetadata, int(t.partition_cnt))} + + for j := 0; j < int(t.partition_cnt); j++ { + p := C._getMetadata_partition_element(cMd, C.int(i), C.int(j)) + m.Topics[thisTopic].Partitions[j] = PartitionMetadata{ + ID: int32(p.id), + Error: newError(p.err), + Leader: int32(p.leader)} + m.Topics[thisTopic].Partitions[j].Replicas = make([]int32, int(p.replica_cnt)) + for ir := 0; ir < int(p.replica_cnt); ir++ { + m.Topics[thisTopic].Partitions[j].Replicas[ir] = int32(C._get_int32_element(p.replicas, C.int(ir))) + } + + m.Topics[thisTopic].Partitions[j].Isrs = make([]int32, int(p.isr_cnt)) + for ii := 0; ii < int(p.isr_cnt); ii++ { + m.Topics[thisTopic].Partitions[j].Isrs[ii] = int32(C._get_int32_element(p.isrs, C.int(ii))) + } + } + } + + m.OriginatingBroker = BrokerMetadata{int32(cMd.orig_broker_id), + C.GoString(cMd.orig_broker_name), 0} + + return &m, nil +} + +// queryWatermarkOffsets returns the broker's low and high offsets for the given topic +// and partition. +func queryWatermarkOffsets(H Handle, topic string, partition int32, timeoutMs int) (low, high int64, err error) { + h := H.gethandle() + + ctopic := C.CString(topic) + defer C.free(unsafe.Pointer(ctopic)) + + var cLow, cHigh C.int64_t + + e := C.rd_kafka_query_watermark_offsets(h.rk, ctopic, C.int32_t(partition), + &cLow, &cHigh, C.int(timeoutMs)) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return 0, 0, newError(e) + } + + low = int64(cLow) + high = int64(cHigh) + return low, high, nil +} + +// getWatermarkOffsets returns the clients cached low and high offsets for the given topic +// and partition. +func getWatermarkOffsets(H Handle, topic string, partition int32) (low, high int64, err error) { + h := H.gethandle() + + ctopic := C.CString(topic) + defer C.free(unsafe.Pointer(ctopic)) + + var cLow, cHigh C.int64_t + + e := C.rd_kafka_get_watermark_offsets(h.rk, ctopic, C.int32_t(partition), + &cLow, &cHigh) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return 0, 0, newError(e) + } + + low = int64(cLow) + high = int64(cHigh) + + return low, high, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/misc.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/misc.go new file mode 100644 index 000000000..6d602ce77 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/misc.go @@ -0,0 +1,35 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import "C" + +// bool2int converts a bool to a C.int (1 or 0) +func bool2cint(b bool) C.int { + if b { + return 1 + } + return 0 +} + +// cint2bool converts a C.int to a bool +func cint2bool(v C.int) bool { + if v == 0 { + return false + } + return true +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go new file mode 100644 index 000000000..7b16b729e --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go @@ -0,0 +1,134 @@ +/** + * Copyright 2023 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "time" + "unsafe" +) + +/* +#include +#include "select_rdkafka.h" +#include "glue_rdkafka.h" +*/ +import "C" + +// MockCluster represents a Kafka mock cluster instance which can be used +// for testing. +type MockCluster struct { + rk *C.rd_kafka_t + mcluster *C.rd_kafka_mock_cluster_t +} + +// NewMockCluster provides a mock Kafka cluster with a configurable +// number of brokers that support a reasonable subset of Kafka protocol +// operations, error injection, etc. +// +// The broker ids will start at 1 up to and including brokerCount. +// +// Mock clusters provide localhost listeners that can be used as the bootstrap +// servers by multiple Kafka client instances. +// +// Currently supported functionality: +// - Producer +// - Idempotent Producer +// - Transactional Producer +// - Low-level consumer +// - High-level balanced consumer groups with offset commits +// - Topic Metadata and auto creation +// +// Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. +func NewMockCluster(brokerCount int) (*MockCluster, error) { + + mc := &MockCluster{} + + cErrstr := (*C.char)(C.malloc(C.size_t(512))) + defer C.free(unsafe.Pointer(cErrstr)) + + cConf := C.rd_kafka_conf_new() + + mc.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) + if mc.rk == nil { + C.rd_kafka_conf_destroy(cConf) + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + mc.mcluster = C.rd_kafka_mock_cluster_new(mc.rk, C.int(brokerCount)) + if mc.mcluster == nil { + C.rd_kafka_destroy(mc.rk) + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + return mc, nil +} + +// BootstrapServers returns the bootstrap.servers property for this MockCluster +func (mc *MockCluster) BootstrapServers() string { + return C.GoString(C.rd_kafka_mock_cluster_bootstraps(mc.mcluster)) +} + +// SetRoundtripDuration sets the broker round-trip-time delay for the given broker. +// Use brokerID -1 for all brokers, or >= 0 for a specific broker. +func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error { + durationInMillis := C.int(duration.Milliseconds()) + cError := C.rd_kafka_mock_broker_set_rtt(mc.mcluster, C.int(brokerID), durationInMillis) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + +// SetBrokerDown disconnects the broker and disallows any new connections. +// This does NOT trigger leader change. +// Use brokerID -1 for all brokers, or >= 0 for a specific broker. +func (mc *MockCluster) SetBrokerDown(brokerID int) error { + cError := C.rd_kafka_mock_broker_set_down(mc.mcluster, C.int(brokerID)) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + +// SetBrokerUp makes the broker accept connections again. +// This does NOT trigger leader change. +// Use brokerID -1 for all brokers, or >= 0 for a specific broker. +func (mc *MockCluster) SetBrokerUp(brokerID int) error { + cError := C.rd_kafka_mock_broker_set_up(mc.mcluster, C.int(brokerID)) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + +// CreateTopic creates a topic without having to use a producer +func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error { + topicStr := C.CString(topic) + defer C.free(unsafe.Pointer(topicStr)) + + cError := C.rd_kafka_mock_topic_create(mc.mcluster, topicStr, C.int(partitions), C.int(replicationFactor)) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + +// Close and destroy the MockCluster +func (mc *MockCluster) Close() { + C.rd_kafka_mock_cluster_destroy(mc.mcluster) + C.rd_kafka_destroy(mc.rk) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go new file mode 100644 index 000000000..c0d27f95e --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go @@ -0,0 +1,147 @@ +/** + * Copyright 2017 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "strconv" +) + +/* +#include +#include "select_rdkafka.h" + +static int64_t _c_rdkafka_offset_tail(int64_t rel) { + return RD_KAFKA_OFFSET_TAIL(rel); +} +*/ +import "C" + +// Offset type (int64) with support for canonical names +type Offset int64 + +// OffsetBeginning represents the earliest offset (logical) +const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING) + +// OffsetEnd represents the latest offset (logical) +const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END) + +// OffsetInvalid represents an invalid/unspecified offset +const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID) + +// OffsetStored represents a stored offset +const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED) + +func (o Offset) String() string { + switch o { + case OffsetBeginning: + return "beginning" + case OffsetEnd: + return "end" + case OffsetInvalid: + return "unset" + case OffsetStored: + return "stored" + default: + return fmt.Sprintf("%d", int64(o)) + } +} + +// Set offset value, see NewOffset() +func (o *Offset) Set(offset interface{}) error { + n, err := NewOffset(offset) + + if err == nil { + *o = n + } + + return err +} + +// NewOffset creates a new Offset using the provided logical string, an +// absolute int64 offset value, or a concrete Offset type. +// Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored" +func NewOffset(offset interface{}) (Offset, error) { + + switch v := offset.(type) { + case string: + switch v { + case "beginning": + fallthrough + case "earliest": + return Offset(OffsetBeginning), nil + + case "end": + fallthrough + case "latest": + return Offset(OffsetEnd), nil + + case "unset": + fallthrough + case "invalid": + return Offset(OffsetInvalid), nil + + case "stored": + return Offset(OffsetStored), nil + + default: + off, err := strconv.Atoi(v) + return Offset(off), err + } + + case int: + return Offset((int64)(v)), nil + case int64: + return Offset(v), nil + case Offset: + return Offset(v), nil + default: + return OffsetInvalid, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid offset type: %t", v)) + } +} + +// OffsetTail returns the logical offset relativeOffset from current end of partition +func OffsetTail(relativeOffset Offset) Offset { + return Offset(C._c_rdkafka_offset_tail(C.int64_t(relativeOffset))) +} + +// offsetsForTimes looks up offsets by timestamp for the given partitions. +// +// The returned offset for each partition is the earliest offset whose +// timestamp is greater than or equal to the given timestamp in the +// corresponding partition. If the provided timestamp exceeds that of the +// last message in the partition, a value of -1 will be returned. +// +// The timestamps to query are represented as `.Offset` in the `times` +// argument and the looked up offsets are represented as `.Offset` in the returned +// `offsets` list. +// +// The function will block for at most timeoutMs milliseconds. +// +// Duplicate Topic+Partitions are not supported. +// Per-partition errors may be returned in the `.Error` field. +func offsetsForTimes(H Handle, times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + cparts := newCPartsFromTopicPartitions(times) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_offsets_for_times(H.gethandle().rk, cparts, C.int(timeoutMs)) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cerr) + } + + return newTopicPartitionsFromCparts(cparts), nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go new file mode 100644 index 000000000..04003c9c7 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go @@ -0,0 +1,1039 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "context" + "fmt" + "sync/atomic" + "time" + "unsafe" +) + +/* +#include +#include "select_rdkafka.h" +#include "glue_rdkafka.h" + + +#ifdef RD_KAFKA_V_HEADERS +// Convert tmphdrs to chdrs (created by this function). +// If tmphdr.size == -1: value is considered Null +// tmphdr.size == 0: value is considered empty (ignored) +// tmphdr.size > 0: value is considered non-empty +// +// WARNING: The header keys and values will be freed by this function. +void tmphdrs_to_chdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt, + rd_kafka_headers_t **chdrs) { + size_t i; + + *chdrs = rd_kafka_headers_new(tmphdrsCnt); + + for (i = 0 ; i < tmphdrsCnt ; i++) { + rd_kafka_header_add(*chdrs, + tmphdrs[i].key, -1, + tmphdrs[i].size == -1 ? NULL : + (tmphdrs[i].size == 0 ? "" : tmphdrs[i].val), + tmphdrs[i].size == -1 ? 0 : tmphdrs[i].size); + if (tmphdrs[i].size > 0) + free((void *)tmphdrs[i].val); + free((void *)tmphdrs[i].key); + } +} + +#else +void free_tmphdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt) { + size_t i; + for (i = 0 ; i < tmphdrsCnt ; i++) { + if (tmphdrs[i].size > 0) + free((void *)tmphdrs[i].val); + free((void *)tmphdrs[i].key); + } +} +#endif + + +rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, + rd_kafka_topic_t *rkt, int32_t partition, + int msgflags, + int valIsNull, void *val, size_t val_len, + int keyIsNull, void *key, size_t key_len, + int64_t timestamp, + tmphdr_t *tmphdrs, size_t tmphdrsCnt, + uintptr_t cgoid) { + void *valp = valIsNull ? NULL : val; + void *keyp = keyIsNull ? NULL : key; +#ifdef RD_KAFKA_V_TIMESTAMP +rd_kafka_resp_err_t err; +#ifdef RD_KAFKA_V_HEADERS + rd_kafka_headers_t *hdrs = NULL; +#endif +#endif + + + if (tmphdrsCnt > 0) { +#ifdef RD_KAFKA_V_HEADERS + tmphdrs_to_chdrs(tmphdrs, tmphdrsCnt, &hdrs); +#else + free_tmphdrs(tmphdrs, tmphdrsCnt); + return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; +#endif + } + + +#ifdef RD_KAFKA_V_TIMESTAMP + err = rd_kafka_producev(rk, + RD_KAFKA_V_RKT(rkt), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(valp, val_len), + RD_KAFKA_V_KEY(keyp, key_len), + RD_KAFKA_V_TIMESTAMP(timestamp), +#ifdef RD_KAFKA_V_HEADERS + RD_KAFKA_V_HEADERS(hdrs), +#endif + RD_KAFKA_V_OPAQUE((void *)cgoid), + RD_KAFKA_V_END); +#ifdef RD_KAFKA_V_HEADERS + if (err && hdrs) + rd_kafka_headers_destroy(hdrs); +#endif + return err; +#else + if (timestamp) + return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; + if (rd_kafka_produce(rkt, partition, msgflags, + valp, val_len, + keyp, key_len, + (void *)cgoid) == -1) + return rd_kafka_last_error(); + else + return RD_KAFKA_RESP_ERR_NO_ERROR; +#endif +} +*/ +import "C" + +// minInt64 finds the minimum of two int64s. +// Required until we start using Go 1.18 with generic functions. +func minInt64(a int64, b int64) int64 { + if a > b { + return b + } + return a +} + +// Producer implements a High-level Apache Kafka Producer instance +type Producer struct { + events chan Event + produceChannel chan *Message + handle handle + + // Terminates the poller() goroutine + pollerTermChan chan bool + + // checks if Producer has been closed or not. + isClosed uint32 +} + +// IsClosed returns boolean representing if client is closed or not +func (p *Producer) IsClosed() bool { + return atomic.LoadUint32(&p.isClosed) == 1 +} + +func (p *Producer) verifyClient() error { + if p.IsClosed() { + return getOperationNotAllowedErrorForClosedClient() + } + return nil +} + +// String returns a human readable name for a Producer instance +func (p *Producer) String() string { + return p.handle.String() +} + +// get_handle implements the Handle interface +func (p *Producer) gethandle() *handle { + return &p.handle +} + +func (p *Producer) produce(msg *Message, msgFlags int, deliveryChan chan Event) error { + if msg == nil || msg.TopicPartition.Topic == nil || len(*msg.TopicPartition.Topic) == 0 { + return newErrorFromString(ErrInvalidArg, "") + } + + crkt := p.handle.getRkt(*msg.TopicPartition.Topic) + + // Three problems: + // 1) There's a difference between an empty Value or Key (length 0, proper pointer) and + // a null Value or Key (length 0, null pointer). + // 2) we need to be able to send a null Value or Key, but the unsafe.Pointer(&slice[0]) + // dereference can't be performed on a nil slice. + // 3) cgo's pointer checking requires the unsafe.Pointer(slice..) call to be made + // in the call to the C function. + // + // Solution: + // Keep track of whether the Value or Key were nil (1), but let the valp and keyp pointers + // point to a 1-byte slice (but the length to send is still 0) so that the dereference (2) + // works. + // Then perform the unsafe.Pointer() on the valp and keyp pointers (which now either point + // to the original msg.Value and msg.Key or to the 1-byte slices) in the call to C (3). + // + var valp []byte + var keyp []byte + oneByte := []byte{0} + var valIsNull C.int + var keyIsNull C.int + var valLen int + var keyLen int + + if msg.Value == nil { + valIsNull = 1 + valLen = 0 + valp = oneByte + } else { + valLen = len(msg.Value) + if valLen > 0 { + valp = msg.Value + } else { + valp = oneByte + } + } + + if msg.Key == nil { + keyIsNull = 1 + keyLen = 0 + keyp = oneByte + } else { + keyLen = len(msg.Key) + if keyLen > 0 { + keyp = msg.Key + } else { + keyp = oneByte + } + } + + var cgoid int + + // Per-message state that needs to be retained through the C code: + // delivery channel (if specified) + // message opaque (if specified) + // Since these cant be passed as opaque pointers to the C code, + // due to cgo constraints, we add them to a per-producer map for lookup + // when the C code triggers the callbacks or events. + if deliveryChan != nil || msg.Opaque != nil { + cgoid = p.handle.cgoPut(cgoDr{deliveryChan: deliveryChan, opaque: msg.Opaque}) + } + + var timestamp int64 + if !msg.Timestamp.IsZero() { + timestamp = msg.Timestamp.UnixNano() / 1000000 + } + + // Convert headers to C-friendly tmphdrs + var tmphdrs []C.tmphdr_t + tmphdrsCnt := len(msg.Headers) + + if tmphdrsCnt > 0 { + tmphdrs = make([]C.tmphdr_t, tmphdrsCnt) + + for n, hdr := range msg.Headers { + // Make a copy of the key + // to avoid runtime panic with + // foreign Go pointers in cgo. + tmphdrs[n].key = C.CString(hdr.Key) + if hdr.Value != nil { + tmphdrs[n].size = C.ssize_t(len(hdr.Value)) + if tmphdrs[n].size > 0 { + // Make a copy of the value + // to avoid runtime panic with + // foreign Go pointers in cgo. + tmphdrs[n].val = C.CBytes(hdr.Value) + } + } else { + // null value + tmphdrs[n].size = C.ssize_t(-1) + } + } + } else { + // no headers, need a dummy tmphdrs of size 1 to avoid index + // out of bounds panic in do_produce() call below. + // tmphdrsCnt will be 0. + tmphdrs = []C.tmphdr_t{{nil, nil, 0}} + } + + cErr := C.do_produce(p.handle.rk, crkt, + C.int32_t(msg.TopicPartition.Partition), + C.int(msgFlags)|C.RD_KAFKA_MSG_F_COPY, + valIsNull, unsafe.Pointer(&valp[0]), C.size_t(valLen), + keyIsNull, unsafe.Pointer(&keyp[0]), C.size_t(keyLen), + C.int64_t(timestamp), + (*C.tmphdr_t)(unsafe.Pointer(&tmphdrs[0])), C.size_t(tmphdrsCnt), + (C.uintptr_t)(cgoid)) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + if cgoid != 0 { + p.handle.cgoGet(cgoid) + } + return newError(cErr) + } + + return nil +} + +// Produce single message. +// This is an asynchronous call that enqueues the message on the internal +// transmit queue, thus returning immediately. +// The delivery report will be sent on the provided deliveryChan if specified, +// or on the Producer object's Events() channel if not. +// msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented), +// api.version.request=true, and broker >= 0.10.0.0. +// msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented), +// api.version.request=true, and broker >= 0.11.0.0. +// Returns an error if message could not be enqueued. +func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error { + err := p.verifyClient() + if err != nil { + return err + } + return p.produce(msg, 0, deliveryChan) +} + +// Produce a batch of messages. +// These batches do not relate to the message batches sent to the broker, the latter +// are collected on the fly internally in librdkafka. +// WARNING: This is an experimental API. +// NOTE: timestamps and headers are not supported with this API. +func (p *Producer) produceBatch(topic string, msgs []*Message, msgFlags int) error { + crkt := p.handle.getRkt(topic) + + cmsgs := make([]C.rd_kafka_message_t, len(msgs)) + for i, m := range msgs { + p.handle.messageToC(m, &cmsgs[i]) + } + r := C.rd_kafka_produce_batch(crkt, C.RD_KAFKA_PARTITION_UA, C.int(msgFlags)|C.RD_KAFKA_MSG_F_FREE, + (*C.rd_kafka_message_t)(&cmsgs[0]), C.int(len(msgs))) + if r == -1 { + return newError(C.rd_kafka_last_error()) + } + + return nil +} + +// Events returns the Events channel (read) +func (p *Producer) Events() chan Event { + return p.events +} + +// Logs returns the Log channel (if enabled), else nil +func (p *Producer) Logs() chan LogEvent { + return p.handle.logs +} + +// ProduceChannel returns the produce *Message channel (write) +// +// Deprecated: ProduceChannel (channel based producer) is deprecated in favour +// of Produce(). +// Flush() and Len() are not guaranteed to be reliable with ProduceChannel. +func (p *Producer) ProduceChannel() chan *Message { + return p.produceChannel +} + +// Len returns the number of messages and requests waiting to be transmitted to the broker +// as well as delivery reports queued for the application. +// BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. +func (p *Producer) Len() int { + return len(p.produceChannel) + len(p.events) + int(C.rd_kafka_outq_len(p.handle.rk)) +} + +// Flush and wait for outstanding messages and requests to complete delivery. +// Runs until value reaches zero or on timeoutMs. +// Returns the number of outstanding events still un-flushed. +// BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. +func (p *Producer) Flush(timeoutMs int) int { + termChan := make(chan bool) // unused stand-in termChan + // used to specify timeout for the underlying flush + flushIntervalChan := make(chan int64) + + // Keep calling rd_kafka_flush to ignore queue.buffering.max.ms, and + // account for any other state changes that the underlying library + // might do in case it is flushing. + go func() { + for flushInterval := range flushIntervalChan { + C.rd_kafka_flush(p.handle.rk, C.int(flushInterval)) + } + }() + + defer close(flushIntervalChan) + + timeoutDuration := time.Duration(timeoutMs) * time.Millisecond + tEnd := time.Now().Add(timeoutDuration) + for p.Len() > 0 { + remain := time.Until(tEnd).Milliseconds() + if remain <= 0 { + return p.Len() + } + + tWait := minInt64(100, remain) + + // If the previous eventPoll returned immediately, and the previous + // rd_kafka_flush did not, we'll end up in a situation where this + // channel blocks. However, this is acceptable as it may block for + // a maximum of 100ms. + flushIntervalChan <- tWait + p.handle.eventPoll(p.events, + int(tWait), 1000, termChan) + } + + return 0 +} + +// Close a Producer instance. +// The Producer object or its channels are no longer usable after this call. +func (p *Producer) Close() { + if !atomic.CompareAndSwapUint32(&p.isClosed, 0, 1) { + return + } + + // Wait for poller() (signaled by closing pollerTermChan) + // and channel_producer() (signaled by closing ProduceChannel) + close(p.pollerTermChan) + close(p.produceChannel) + p.handle.waitGroup.Wait() + + close(p.events) + + p.handle.cleanup() + + C.rd_kafka_destroy(p.handle.rk) +} + +const ( + // PurgeInFlight purges messages in-flight to or from the broker. + // Purging these messages will void any future acknowledgements from the + // broker, making it impossible for the application to know if these + // messages were successfully delivered or not. + // Retrying these messages may lead to duplicates. + PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT) + + // PurgeQueue Purge messages in internal queues. + PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE) + + // PurgeNonBlocking Don't wait for background thread queue purging to finish. + PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING) +) + +// Purge messages currently handled by this producer instance. +// +// flags is a combination of PurgeQueue, PurgeInFlight and PurgeNonBlocking. +// +// The application will need to call Poll(), Flush() or read the Events() channel +// after this call to serve delivery reports for the purged messages. +// +// Messages purged from internal queues fail with the delivery report +// error code set to ErrPurgeQueue, while purged messages that +// are in-flight to or from the broker will fail with the error code set to +// ErrPurgeInflight. +// +// Warning: Purging messages that are in-flight to or from the broker +// will ignore any sub-sequent acknowledgement for these messages +// received from the broker, effectively making it impossible +// for the application to know if the messages were successfully +// produced or not. This may result in duplicate messages if the +// application retries these messages at a later time. +// +// Note: This call may block for a short time while background thread +// queues are purged. +// +// Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown. +func (p *Producer) Purge(flags int) error { + err := p.verifyClient() + if err != nil { + return err + } + cErr := C.rd_kafka_purge(p.handle.rk, C.int(flags)) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cErr) + } + + return nil +} + +// NewProducer creates a new high-level Producer instance. +// +// conf is a *ConfigMap with standard librdkafka configuration properties. +// +// Supported special configuration properties (type, default): +// +// go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance). +// These batches do not relate to Kafka message batches in any way. +// Note: timestamps and headers are not supported with this interface. +// go.delivery.reports (bool, true) - Forward per-message delivery reports to the +// Events() channel. +// go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports. +// Allowed values: all, none (or empty string), key, value, headers +// Warning: There is a performance penalty to include headers in the delivery report. +// go.events.channel.size (int, 1000000) - Events(). +// go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages) +// go.logs.channel.enable (bool, false) - Forward log to Logs() channel. +// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true. +func NewProducer(conf *ConfigMap) (*Producer, error) { + + err := versionCheck() + if err != nil { + return nil, err + } + + p := &Producer{} + + // before we do anything with the configuration, create a copy such that + // the original is not mutated. + confCopy := conf.clone() + + v, err := confCopy.extract("delivery.report.only.error", false) + if v == true { + // FIXME: The filtering of successful DRs must be done in + // the Go client to avoid cgoDr memory leaks. + return nil, newErrorFromString(ErrUnsupportedFeature, + "delivery.report.only.error=true is not currently supported by the Go client") + } + + v, err = confCopy.extract("go.batch.producer", false) + if err != nil { + return nil, err + } + batchProducer := v.(bool) + + v, err = confCopy.extract("go.delivery.reports", true) + if err != nil { + return nil, err + } + p.handle.fwdDr = v.(bool) + + v, err = confCopy.extract("go.delivery.report.fields", "key,value") + if err != nil { + return nil, err + } + + p.handle.msgFields, err = newMessageFieldsFrom(v) + if err != nil { + return nil, err + } + + v, err = confCopy.extract("go.events.channel.size", 1000000) + if err != nil { + return nil, err + } + eventsChanSize := v.(int) + + v, err = confCopy.extract("go.produce.channel.size", 1000000) + if err != nil { + return nil, err + } + produceChannelSize := v.(int) + + logsChanEnable, logsChan, err := confCopy.extractLogConfig() + if err != nil { + return nil, err + } + + if int(C.rd_kafka_version()) < 0x01000000 { + // produce.offset.report is no longer used in librdkafka >= v1.0.0 + v, _ = confCopy.extract("{topic}.produce.offset.report", nil) + if v == nil { + // Enable offset reporting by default, unless overriden. + confCopy.SetKey("{topic}.produce.offset.report", true) + } + } + + // Convert ConfigMap to librdkafka conf_t + cConf, err := confCopy.convert() + if err != nil { + return nil, err + } + + cErrstr := (*C.char)(C.malloc(C.size_t(256))) + defer C.free(unsafe.Pointer(cErrstr)) + + C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_DR|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) + + // Create librdkafka producer instance + p.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) + if p.handle.rk == nil { + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + p.handle.p = p + p.handle.setup() + p.handle.rkq = C.rd_kafka_queue_get_main(p.handle.rk) + p.events = make(chan Event, eventsChanSize) + p.produceChannel = make(chan *Message, produceChannelSize) + p.pollerTermChan = make(chan bool) + p.isClosed = 0 + + if logsChanEnable { + p.handle.setupLogQueue(logsChan, p.pollerTermChan) + } + + p.handle.waitGroup.Add(1) + go func() { + poller(p, p.pollerTermChan) + p.handle.waitGroup.Done() + }() + + // non-batch or batch producer, only one must be used + var producer func(*Producer) + if batchProducer { + producer = channelBatchProducer + } else { + producer = channelProducer + } + + p.handle.waitGroup.Add(1) + go func() { + producer(p) + p.handle.waitGroup.Done() + }() + + return p, nil +} + +// channel_producer serves the ProduceChannel channel +func channelProducer(p *Producer) { + for m := range p.produceChannel { + err := p.produce(m, C.RD_KAFKA_MSG_F_BLOCK, nil) + if err != nil { + m.TopicPartition.Error = err + p.events <- m + } + } +} + +// channelBatchProducer serves the ProduceChannel channel and attempts to +// improve cgo performance by using the produceBatch() interface. +func channelBatchProducer(p *Producer) { + var buffered = make(map[string][]*Message) + bufferedCnt := 0 + const batchSize int = 1000000 + totMsgCnt := 0 + totBatchCnt := 0 + + for m := range p.produceChannel { + buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) + bufferedCnt++ + + loop2: + for true { + select { + case m, ok := <-p.produceChannel: + if !ok { + break loop2 + } + if m == nil { + panic("nil message received on ProduceChannel") + } + if m.TopicPartition.Topic == nil { + panic(fmt.Sprintf("message without Topic received on ProduceChannel: %v", m)) + } + buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) + bufferedCnt++ + if bufferedCnt >= batchSize { + break loop2 + } + default: + break loop2 + } + } + + totBatchCnt++ + totMsgCnt += len(buffered) + + for topic, buffered2 := range buffered { + err := p.produceBatch(topic, buffered2, C.RD_KAFKA_MSG_F_BLOCK) + if err != nil { + for _, m = range buffered2 { + m.TopicPartition.Error = err + p.events <- m + } + } + } + + buffered = make(map[string][]*Message) + bufferedCnt = 0 + } +} + +// poller polls the rd_kafka_t handle for events until signalled for termination +func poller(p *Producer, termChan chan bool) { + for { + select { + case _ = <-termChan: + return + + default: + _, term := p.handle.eventPoll(p.events, 100, 1000, termChan) + if term { + return + } + break + } + } +} + +// GetMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + err := p.verifyClient() + if err != nil { + return nil, err + } + return getMetadata(p, topic, allTopics, timeoutMs) +} + +// QueryWatermarkOffsets returns the broker's low and high offsets for the given topic +// and partition. +func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { + err = p.verifyClient() + if err != nil { + return -1, -1, err + } + return queryWatermarkOffsets(p, topic, partition, timeoutMs) +} + +// OffsetsForTimes looks up offsets by timestamp for the given partitions. +// +// The returned offset for each partition is the earliest offset whose +// timestamp is greater than or equal to the given timestamp in the +// corresponding partition. If the provided timestamp exceeds that of the +// last message in the partition, a value of -1 will be returned. +// +// The timestamps to query are represented as `.Offset` in the `times` +// argument and the looked up offsets are represented as `.Offset` in the returned +// `offsets` list. +// +// The function will block for at most timeoutMs milliseconds. +// +// Duplicate Topic+Partitions are not supported. +// Per-partition errors may be returned in the `.Error` field. +func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + err = p.verifyClient() + if err != nil { + return nil, err + } + return offsetsForTimes(p, times, timeoutMs) +} + +// GetFatalError returns an Error object if the client instance has raised a fatal error, else nil. +func (p *Producer) GetFatalError() error { + err := p.verifyClient() + if err != nil { + return err + } + return getFatalError(p) +} + +// TestFatalError triggers a fatal error in the underlying client. +// This is to be used strictly for testing purposes. +func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode { + return testFatalError(p, code, str) +} + +// SetOAuthBearerToken sets the the data to be transmitted +// to a broker during SASL/OAUTHBEARER authentication. It will return nil +// on success, otherwise an error if: +// 1) the token data is invalid (meaning an expiration time in the past +// or either a token value or an extension key or value that does not meet +// the regular expression requirements as per +// https://tools.ietf.org/html/rfc7628#section-3.1); +// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 3) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { + err := p.verifyClient() + if err != nil { + return err + } + return p.handle.setOAuthBearerToken(oauthBearerToken) +} + +// SetOAuthBearerTokenFailure sets the error message describing why token +// retrieval/setting failed; it also schedules a new token refresh event for 10 +// seconds later so the attempt may be retried. It will return nil on +// success, otherwise an error if: +// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 2) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error { + err := p.verifyClient() + if err != nil { + return err + } + return p.handle.setOAuthBearerTokenFailure(errstr) +} + +// Transactional API + +// InitTransactions Initializes transactions for the producer instance. +// +// This function ensures any transactions initiated by previous instances +// of the producer with the same `transactional.id` are completed. +// If the previous instance failed with a transaction in progress the +// previous transaction will be aborted. +// This function needs to be called before any other transactional or +// produce functions are called when the `transactional.id` is configured. +// +// If the last transaction had begun completion (following transaction commit) +// but not yet finished, this function will await the previous transaction's +// completion. +// +// When any previous transactions have been fenced this function +// will acquire the internal producer id and epoch, used in all future +// transactional messages issued by this producer instance. +// +// Upon successful return from this function the application has to perform at +// least one of the following operations within `transaction.timeout.ms` to +// avoid timing out the transaction on the broker: +// - `Produce()` (et.al) +// - `SendOffsetsToTransaction()` +// - `CommitTransaction()` +// - `AbortTransaction()` +// +// Parameters: +// - `ctx` - The maximum time to block, or nil for indefinite. +// On timeout the operation may continue in the background, +// depending on state, and it is okay to call `InitTransactions()` +// again. +// Providing a nil context or a context without a deadline uses +// the timeout 2*transaction.timeout.ms. +// +// Returns nil on success or an error on failure. +// Check whether the returned error object permits retrying +// by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal +// error has been raised by calling `err.(kafka.Error).IsFatal()`. +func (p *Producer) InitTransactions(ctx context.Context) error { + err := p.verifyClient() + if err != nil { + return err + } + cError := C.rd_kafka_init_transactions(p.handle.rk, + cTimeoutFromContext(ctx)) + if cError != nil { + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// BeginTransaction starts a new transaction. +// +// `InitTransactions()` must have been called successfully (once) +// before this function is called. +// +// Any messages produced, offsets sent (`SendOffsetsToTransaction()`), +// etc, after the successful return of this function will be part of +// the transaction and committed or aborted atomatically. +// +// Finish the transaction by calling `CommitTransaction()` or +// abort the transaction by calling `AbortTransaction()`. +// +// Returns nil on success or an error object on failure. +// Check whether a fatal error has been raised by +// calling `err.(kafka.Error).IsFatal()`. +// +// Note: With the transactional producer, `Produce()`, et.al, are only +// allowed during an on-going transaction, as started with this function. +// Any produce call outside an on-going transaction, or for a failed +// transaction, will fail. +func (p *Producer) BeginTransaction() error { + err := p.verifyClient() + if err != nil { + return err + } + cError := C.rd_kafka_begin_transaction(p.handle.rk) + if cError != nil { + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SendOffsetsToTransaction sends a list of topic partition offsets to the +// consumer group coordinator for `consumerMetadata`, and marks the offsets +// as part part of the current transaction. +// These offsets will be considered committed only if the transaction is +// committed successfully. +// +// The offsets should be the next message your application will consume, +// i.e., the last processed message's offset + 1 for each partition. +// Either track the offsets manually during processing or use +// `consumer.Position()` (on the consumer) to get the current offsets for +// the partitions assigned to the consumer. +// +// Use this method at the end of a consume-transform-produce loop prior +// to committing the transaction with `CommitTransaction()`. +// +// Parameters: +// - `ctx` - The maximum amount of time to block, or nil for indefinite. +// - `offsets` - List of offsets to commit to the consumer group upon +// successful commit of the transaction. Offsets should be +// the next message to consume, e.g., last processed message + 1. +// - `consumerMetadata` - The current consumer group metadata as returned by +// `consumer.GetConsumerGroupMetadata()` on the consumer +// instance the provided offsets were consumed from. +// +// Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer). +// +// Note: Logical and invalid offsets (e.g., OffsetInvalid) in +// `offsets` will be ignored. If there are no valid offsets in +// `offsets` the function will return nil and no action will be taken. +// +// Returns nil on success or an error object on failure. +// Check whether the returned error object permits retrying +// by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable +// or fatal error has been raised by calling +// `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` +// respectively. +func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error { + err := p.verifyClient() + if err != nil { + return err + } + var cOffsets *C.rd_kafka_topic_partition_list_t + if offsets != nil { + cOffsets = newCPartsFromTopicPartitions(offsets) + defer C.rd_kafka_topic_partition_list_destroy(cOffsets) + } + + cgmd, err := deserializeConsumerGroupMetadata(consumerMetadata.serialized) + if err != nil { + return err + } + defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) + + cError := C.rd_kafka_send_offsets_to_transaction( + p.handle.rk, + cOffsets, + cgmd, + cTimeoutFromContext(ctx)) + if cError != nil { + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// CommitTransaction commits the current transaction. +// +// Any outstanding messages will be flushed (delivered) before actually +// committing the transaction. +// +// If any of the outstanding messages fail permanently the current +// transaction will enter the abortable error state and this +// function will return an abortable error, in this case the application +// must call `AbortTransaction()` before attempting a new +// transaction with `BeginTransaction()`. +// +// Parameters: +// - `ctx` - The maximum amount of time to block, or nil for indefinite. +// +// Note: This function will block until all outstanding messages are +// delivered and the transaction commit request has been successfully +// handled by the transaction coordinator, or until the `ctx` expires, +// which ever comes first. On timeout the application may +// call the function again. +// +// Note: Will automatically call `Flush()` to ensure all queued +// messages are delivered before attempting to commit the transaction. +// The application MUST serve the `producer.Events()` channel for delivery +// reports in a separate go-routine during this time. +// +// Returns nil on success or an error object on failure. +// Check whether the returned error object permits retrying +// by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable +// or fatal error has been raised by calling +// `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` +// respectively. +func (p *Producer) CommitTransaction(ctx context.Context) error { + err := p.verifyClient() + if err != nil { + return err + } + cError := C.rd_kafka_commit_transaction(p.handle.rk, + cTimeoutFromContext(ctx)) + if cError != nil { + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// AbortTransaction aborts the ongoing transaction. +// +// This function should also be used to recover from non-fatal abortable +// transaction errors. +// +// Any outstanding messages will be purged and fail with +// `ErrPurgeInflight` or `ErrPurgeQueue`. +// +// Parameters: +// - `ctx` - The maximum amount of time to block, or nil for indefinite. +// +// Note: This function will block until all outstanding messages are purged +// and the transaction abort request has been successfully +// handled by the transaction coordinator, or until the `ctx` expires, +// which ever comes first. On timeout the application may +// call the function again. +// +// Note: Will automatically call `Purge()` and `Flush()` to ensure all queued +// and in-flight messages are purged before attempting to abort the transaction. +// The application MUST serve the `producer.Events()` channel for delivery +// reports in a separate go-routine during this time. +// +// Returns nil on success or an error object on failure. +// Check whether the returned error object permits retrying +// by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error +// has been raised by calling `err.(kafka.Error).IsFatal()`. +func (p *Producer) AbortTransaction(ctx context.Context) error { + err := p.verifyClient() + if err != nil { + return err + } + cError := C.rd_kafka_abort_transaction(p.handle.rk, + cTimeoutFromContext(ctx)) + if cError != nil { + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SetSaslCredentials sets the SASL credentials used for this producer. The new credentials +// will overwrite the old ones (which were set when creating the producer or by a previous +// call to SetSaslCredentials). The new credentials will be used the next time this +// producer needs to authenticate to a broker. This method will not disconnect +// existing broker connections that were established with the old credentials. +// This method applies only to the SASL PLAIN and SCRAM mechanisms. +func (p *Producer) SetSaslCredentials(username, password string) error { + err := p.verifyClient() + if err != nil { + return err + } + return setSaslCredentials(p.handle.rk, username, password) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/select_rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/select_rdkafka.h new file mode 100644 index 000000000..3cfd095b2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/select_rdkafka.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file uses a preprocessor macro defined by the various build_*.go +// files to determine whether to import the bundled librdkafka header, or +// the system one. +// This is needed because cgo will automatically add -I. to the include +// path, so would find a bundled header instead of +// the system one if it were called librdkafka/rdkafka.h instead of +// librdkafka_vendor/rdkafka.h + +#ifdef USE_VENDORED_LIBRDKAFKA +#include "librdkafka_vendor/rdkafka.h" +#include "librdkafka_vendor/rdkafka_mock.h" +#else +#include +#include +#endif diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json new file mode 100644 index 000000000..349cdd379 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json @@ -0,0 +1,12 @@ +{ + "Brokers": "mybroker or $BROKERS env", + "BrokersSasl": "mybroker or $BROKERSSASL env", + "SaslUsername": "testuser", + "SaslPassword": "testpass", + "SaslMechanism": "PLAIN", + "TopicName": "test", + "GroupID": "testgroup", + "PerfMsgCount": 1000000, + "PerfMsgSize": 100, + "Config": ["api.version.request=true"] +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/time.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/time.go new file mode 100644 index 000000000..ff93f0ad2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/time.go @@ -0,0 +1,55 @@ +/** + * Copyright 2019 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import "C" + +import ( + "context" + "time" +) + +const ( + cTimeoutInfinite = C.int(-1) // Blocks indefinitely until completion. + cTimeoutNoWait = C.int(0) // Returns immediately without blocking. +) + +// cTimeoutFromContext returns the remaining time after which work done on behalf of this context +// should be canceled, in milliseconds. +// +// If no deadline/timeout is set, or if the timeout does not fit in an int32, it returns +// cTimeoutInfinite; +// If there is no time left in this context, it returns cTimeoutNoWait. +func cTimeoutFromContext(ctx context.Context) C.int { + if ctx == nil { + return cTimeoutInfinite + } + timeout, hasTimeout := timeout(ctx) + if !hasTimeout { + return cTimeoutInfinite + } + if timeout <= 0 { + return cTimeoutNoWait + } + + timeoutMs := int64(timeout / time.Millisecond) + if int64(int32(timeoutMs)) < timeoutMs { + return cTimeoutInfinite + } + + return C.int(timeoutMs) +} diff --git a/vendor/github.com/openshift/library-go/pkg/certs/pem.go b/vendor/github.com/openshift/library-go/pkg/certs/pem.go deleted file mode 100644 index c3f7ff306..000000000 --- a/vendor/github.com/openshift/library-go/pkg/certs/pem.go +++ /dev/null @@ -1,57 +0,0 @@ -package certs - -import ( - "bytes" - "encoding/pem" - "io/ioutil" - "os" - "path/filepath" -) - -const ( - // StringSourceEncryptedBlockType is the PEM block type used to store an encrypted string - StringSourceEncryptedBlockType = "ENCRYPTED STRING" - // StringSourceKeyBlockType is the PEM block type used to store an encrypting key - StringSourceKeyBlockType = "ENCRYPTING KEY" -) - -func BlockFromFile(path string, blockType string) (*pem.Block, bool, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return nil, false, err - } - block, ok := BlockFromBytes(data, blockType) - return block, ok, nil -} - -func BlockFromBytes(data []byte, blockType string) (*pem.Block, bool) { - for { - block, remaining := pem.Decode(data) - if block == nil { - return nil, false - } - if block.Type == blockType { - return block, true - } - data = remaining - } -} - -func BlockToFile(path string, block *pem.Block, mode os.FileMode) error { - b, err := BlockToBytes(block) - if err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { - return err - } - return ioutil.WriteFile(path, b, mode) -} - -func BlockToBytes(block *pem.Block) ([]byte, error) { - b := bytes.Buffer{} - if err := pem.Encode(&b, block); err != nil { - return nil, err - } - return b.Bytes(), nil -} diff --git a/vendor/github.com/openshift/library-go/pkg/certs/util.go b/vendor/github.com/openshift/library-go/pkg/certs/util.go deleted file mode 100644 index 5ec6354a5..000000000 --- a/vendor/github.com/openshift/library-go/pkg/certs/util.go +++ /dev/null @@ -1,70 +0,0 @@ -package certs - -import ( - "crypto/x509" - "fmt" - "strings" - "time" -) - -const defaultOutputTimeFormat = "Jan 2 15:04:05 2006" - -// nowFn is used in unit test to freeze time. -var nowFn = time.Now().UTC - -// CertificateToString converts a certificate into a human readable string. -// This function should guarantee consistent output format for must-gather tooling and any code -// that prints the certificate details. -func CertificateToString(certificate *x509.Certificate) string { - humanName := certificate.Subject.CommonName - signerHumanName := certificate.Issuer.CommonName - - if certificate.Subject.CommonName == certificate.Issuer.CommonName { - signerHumanName = "" - } - - usages := []string{} - for _, curr := range certificate.ExtKeyUsage { - if curr == x509.ExtKeyUsageClientAuth { - usages = append(usages, "client") - continue - } - if curr == x509.ExtKeyUsageServerAuth { - usages = append(usages, "serving") - continue - } - - usages = append(usages, fmt.Sprintf("%d", curr)) - } - - validServingNames := []string{} - for _, ip := range certificate.IPAddresses { - validServingNames = append(validServingNames, ip.String()) - } - for _, dnsName := range certificate.DNSNames { - validServingNames = append(validServingNames, dnsName) - } - - servingString := "" - if len(validServingNames) > 0 { - servingString = fmt.Sprintf(" validServingFor=[%s]", strings.Join(validServingNames, ",")) - } - - groupString := "" - if len(certificate.Subject.Organization) > 0 { - groupString = fmt.Sprintf(" groups=[%s]", strings.Join(certificate.Subject.Organization, ",")) - } - - return fmt.Sprintf("%q [%s]%s%s issuer=%q (%v to %v (now=%v))", humanName, strings.Join(usages, ","), groupString, - servingString, signerHumanName, certificate.NotBefore.UTC().Format(defaultOutputTimeFormat), - certificate.NotAfter.UTC().Format(defaultOutputTimeFormat), nowFn().Format(defaultOutputTimeFormat)) -} - -// CertificateBundleToString converts a certificate bundle into a human readable string. -func CertificateBundleToString(bundle []*x509.Certificate) string { - output := []string{} - for i, cert := range bundle { - output = append(output, fmt.Sprintf("[#%d]: %s", i, CertificateToString(cert))) - } - return strings.Join(output, "\n") -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5deeb..b3c1699bf 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d85..43557ab7e 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1564,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984fd..3b9f06b96 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c6408..ce2e8b40e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 000000000..61075bd16 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c..ce375c8c7 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2911,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2955,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go index 69a4ac7ee..1e64157f3 100644 --- a/vendor/golang.org/x/net/websocket/client.go +++ b/vendor/golang.org/x/net/websocket/client.go @@ -6,10 +6,12 @@ package websocket import ( "bufio" + "context" "io" "net" "net/http" "net/url" + "time" ) // DialError is an error that occurs while dialling a websocket server. @@ -79,28 +81,59 @@ func parseAuthority(location *url.URL) string { // DialConfig opens a new client connection to a WebSocket with a config. func DialConfig(config *Config) (ws *Conn, err error) { - var client net.Conn + return config.DialContext(context.Background()) +} + +// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation. +func (config *Config) DialContext(ctx context.Context) (*Conn, error) { if config.Location == nil { return nil, &DialError{config, ErrBadWebSocketLocation} } if config.Origin == nil { return nil, &DialError{config, ErrBadWebSocketOrigin} } + dialer := config.Dialer if dialer == nil { dialer = &net.Dialer{} } - client, err = dialWithDialer(dialer, config) - if err != nil { - goto Error - } - ws, err = NewClient(config, client) + + client, err := dialWithDialer(ctx, dialer, config) if err != nil { - client.Close() - goto Error + return nil, &DialError{config, err} } - return -Error: - return nil, &DialError{config, err} + // Cleanup the connection if we fail to create the websocket successfully + success := false + defer func() { + if !success { + _ = client.Close() + } + }() + + var ws *Conn + var wsErr error + doneConnecting := make(chan struct{}) + go func() { + defer close(doneConnecting) + ws, err = NewClient(config, client) + if err != nil { + wsErr = &DialError{config, err} + } + }() + + // The websocket.NewClient() function can block indefinitely, make sure that we + // respect the deadlines specified by the context. + select { + case <-ctx.Done(): + // Force the pending operations to fail, terminating the pending connection attempt + _ = client.SetDeadline(time.Now()) + <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish + return nil, &DialError{config, ctx.Err()} + case <-doneConnecting: + if wsErr == nil { + success = true // Disarm the deferred connection cleanup + } + return ws, wsErr + } } diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go index 2dab943a4..8a2d83c47 100644 --- a/vendor/golang.org/x/net/websocket/dial.go +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -5,18 +5,23 @@ package websocket import ( + "context" "crypto/tls" "net" ) -func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { +func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) { switch config.Location.Scheme { case "ws": - conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) case "wss": - conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + tlsDialer := &tls.Dialer{ + NetDialer: dialer, + Config: config.TlsConfig, + } + conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) default: err = ErrBadScheme } diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4bd..b0e419857 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc69937..2f0fa76e4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4db..2b57e0f73 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e6..5682e2628 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d2712..87d8612a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index dc0c955ee..eff6bcdef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -836,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1550,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1565,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1612,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1649,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1670,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1693,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1726,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1740,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1752,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1781,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1796,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1854,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1885,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( diff --git a/vendor/modules.txt b/vendor/modules.txt index a25ed314b..4c439d666 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -35,10 +35,13 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 # github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 ## explicit; go 1.18 github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 -# github.com/cloudevents/sdk-go/v2 v2.15.2 +# github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf ## explicit; go 1.18 github.com/cloudevents/sdk-go/v2 github.com/cloudevents/sdk-go/v2/binding @@ -54,6 +57,10 @@ github.com/cloudevents/sdk-go/v2/event/datacodec/xml github.com/cloudevents/sdk-go/v2/protocol github.com/cloudevents/sdk-go/v2/protocol/http github.com/cloudevents/sdk-go/v2/types +# github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 +## explicit; go 1.17 +github.com/confluentinc/confluent-kafka-go/v2/kafka +github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver @@ -220,6 +227,10 @@ github.com/mitchellh/copystructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk +# github.com/moby/patternmatcher v0.6.0 +## explicit; go 1.19 +# github.com/moby/sys/sequential v0.5.0 +## explicit; go 1.17 # github.com/mochi-mqtt/server/v2 v2.4.6 ## explicit; go 1.21 github.com/mochi-mqtt/server/v2 @@ -291,7 +302,6 @@ github.com/openshift/build-machinery-go/make/targets/openshift/operator github.com/openshift/build-machinery-go/scripts # github.com/openshift/library-go v0.0.0-20240116081341-964bcb3f545c ## explicit; go 1.21 -github.com/openshift/library-go/pkg/certs github.com/openshift/library-go/pkg/crypto github.com/openshift/library-go/pkg/operator/events # github.com/pkg/errors v0.9.1 @@ -444,7 +454,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.19.0 +# golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -461,7 +471,7 @@ golang.org/x/crypto/scrypt ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.21.0 +# golang.org/x/net v0.23.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -484,14 +494,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.17.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.17.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -1391,31 +1401,36 @@ open-cluster-management.io/api/utils/work/v1/workapplier open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 -# open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 +# open-cluster-management.io/sdk-go v0.13.1-0.20240614070053-a01091a14da7 ## explicit; go 1.21 open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator +open-cluster-management.io/sdk-go/pkg/basecontroller/events +open-cluster-management.io/sdk-go/pkg/basecontroller/factory +open-cluster-management.io/sdk-go/pkg/cloudevents/constants open-cluster-management.io/sdk-go/pkg/cloudevents/generic open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/cert open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1 open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types open-cluster-management.io/sdk-go/pkg/cloudevents/work open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec -open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler +open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister open-cluster-management.io/sdk-go/pkg/cloudevents/work/common open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec -open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister +open-cluster-management.io/sdk-go/pkg/cloudevents/work/store open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils -open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher open-cluster-management.io/sdk-go/pkg/patcher # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 ## explicit; go 1.20 diff --git a/pkg/basecontroller/events/recorder.go b/vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/events/recorder.go similarity index 96% rename from pkg/basecontroller/events/recorder.go rename to vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/events/recorder.go index 7c67b03a8..dc23b1dff 100644 --- a/pkg/basecontroller/events/recorder.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/events/recorder.go @@ -76,12 +76,12 @@ func (r *recorder) WithComponentSuffix(suffix string) Recorder { return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) } -// Event emits the normal type event and allow formatting of message. +// Eventf emits the normal type event and allow formatting of message. func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { r.Event(reason, fmt.Sprintf(messageFmt, args...)) } -// Warning emits the warning type event and allow formatting of message. +// Warningf emits the warning type event and allow formatting of message. func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { r.Warning(reason, fmt.Sprintf(messageFmt, args...)) } diff --git a/pkg/basecontroller/factory/base_controller.go b/vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/base_controller.go similarity index 100% rename from pkg/basecontroller/factory/base_controller.go rename to vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/base_controller.go diff --git a/pkg/basecontroller/factory/controller_context.go b/vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/controller_context.go similarity index 100% rename from pkg/basecontroller/factory/controller_context.go rename to vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/controller_context.go diff --git a/pkg/basecontroller/factory/factory.go b/vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/factory.go similarity index 100% rename from pkg/basecontroller/factory/factory.go rename to vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/factory.go diff --git a/pkg/basecontroller/factory/interfaces.go b/vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/interfaces.go similarity index 100% rename from pkg/basecontroller/factory/interfaces.go rename to vendor/open-cluster-management.io/sdk-go/pkg/basecontroller/factory/interfaces.go diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/constants/constants.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/constants/constants.go new file mode 100644 index 000000000..28c0a6a6c --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/constants/constants.go @@ -0,0 +1,7 @@ +package constants + +const ( + ConfigTypeMQTT = "mqtt" + ConfigTypeGRPC = "grpc" + ConfigTypeKafka = "kafka" +) diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go index 417b31665..86237d8c6 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go @@ -213,7 +213,8 @@ func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents. // received resource status hash with the current resource status hash. If they are not equal, the agent sends the // resource status message. func (c *CloudEventAgentClient[T]) respondResyncStatusRequest( - ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event) error { + ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event, +) error { objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: evt.Source()}) if err != nil { return err diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go index 0ba5c7187..9c5316a4e 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go @@ -22,6 +22,15 @@ const ( stopReceiverSignal ) +// the reconnect backoff will stop at [1,5) min interval. If we don't backoff for 10min, we reset the backoff. +var DelayFn = wait.Backoff{ + Duration: 5 * time.Second, + Cap: 1 * time.Minute, + Steps: 12, // now a required argument + Factor: 5.0, + Jitter: 1.0, +}.DelayWithReset(&clock.RealClock{}, 10*time.Minute) + type receiveFn func(ctx context.Context, evt cloudevents.Event) type baseClient struct { @@ -43,32 +52,19 @@ func (c *baseClient) connect(ctx context.Context) error { // start a go routine to handle cloudevents client connection errors go func() { - var err error - - // the reconnect backoff will stop at [1,5) min interval. If we don't backoff for 10min, we reset the backoff. - delayFn := wait.Backoff{ - Duration: 5 * time.Second, - Cap: 1 * time.Minute, - Steps: 12, // now a required argument - Factor: 5.0, - Jitter: 1.0, - }.DelayWithReset(&clock.RealClock{}, 10*time.Minute) - cloudEventsClient := c.cloudEventsClient - for { - if cloudEventsClient == nil { + if c.currentClient() == nil { klog.V(4).Infof("reconnecting the cloudevents client") - c.cloudEventsClient, err = c.newCloudEventsClient(ctx) + cloudEventsClient, err := c.newCloudEventsClient(ctx) // TODO enhance the cloudevents SKD to avoid wrapping the error type to distinguish the net connection // errors if err != nil { // failed to reconnect, try agin runtime.HandleError(fmt.Errorf("the cloudevents client reconnect failed, %v", err)) - <-wait.RealTimer(delayFn()).C() + <-wait.RealTimer(DelayFn()).C() continue } - // the cloudevents network connection is back, refresh the current cloudevents client and send the // receiver restart signal klog.V(4).Infof("the cloudevents client is reconnected") @@ -99,11 +95,9 @@ func (c *baseClient) connect(ctx context.Context) error { if err != nil { runtime.HandleError(fmt.Errorf("failed to close the cloudevents protocol, %v", err)) } - cloudEventsClient = nil - - c.resetClient(cloudEventsClient) + c.resetClient(nil) - <-wait.RealTimer(delayFn()).C() + <-wait.RealTimer(DelayFn()).C() } } }() @@ -129,17 +123,14 @@ func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error { return err } - klog.V(4).Infof("Sent event: %v\n%s", ctx, evt) - - // make sure the current client is the newest - c.RLock() - defer c.RUnlock() + cloudEventsClient := c.currentClient() - if c.cloudEventsClient == nil { + if cloudEventsClient == nil { return fmt.Errorf("the cloudevents client is not ready") } - if result := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) { + klog.V(4).Infof("Sending event: %v\n%s", sendingCtx, evt) + if result := cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) { return fmt.Errorf("failed to send event %s, %v", evt, result) } @@ -161,12 +152,13 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { // start a go routine to handle cloudevents subscription go func() { receiverCtx, receiverCancel := context.WithCancel(context.TODO()) - cloudEventsClient := c.cloudEventsClient + cloudEventsClient := c.currentClient() for { if cloudEventsClient != nil { go func() { if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) { + klog.V(4).Infof("Received event: %s", evt) receive(receiverCtx, evt) }); err != nil { runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err)) @@ -188,11 +180,8 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { switch signal { case restartReceiverSignal: klog.V(4).Infof("restart the cloudevents receiver") - // make sure the current client is the newest - c.RLock() - cloudEventsClient = c.cloudEventsClient - c.RUnlock() - + // refresh the client + cloudEventsClient = c.currentClient() // rebuild the receiver context receiverCtx, receiverCancel = context.WithCancel(context.TODO()) case stopReceiverSignal: @@ -207,6 +196,14 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { }() } +func (c *baseClient) currentClient() cloudevents.Client { + // make sure the current client is the newest + c.RLock() + defer c.RUnlock() + + return c.cloudEventsClient +} + func (c *baseClient) resetClient(client cloudevents.Client) { c.Lock() defer c.Unlock() @@ -235,9 +232,10 @@ func (c *baseClient) newCloudEventsClient(ctx context.Context) (cloudevents.Clie if err != nil { return nil, err } - c.cloudEventsClient, err = cloudevents.NewClient(c.cloudEventsProtocol) + + cloudEventsClient, err := cloudevents.NewClient(c.cloudEventsProtocol) if err != nil { return nil, err } - return c.cloudEventsClient, nil + return cloudEventsClient, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/cert/cert_rotation.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/cert/cert_rotation.go new file mode 100644 index 000000000..10a98b6bf --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/cert/cert_rotation.go @@ -0,0 +1,214 @@ +package cert + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "reflect" + "sync" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const workItemKey = "key" + +type Connection interface { + Close() error +} + +type reloadFunc func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// CertCallbackRefreshDuration is exposed so that integration tests can crank up the reload speed. +var CertCallbackRefreshDuration = 5 * time.Minute + +type clientCertRotating struct { + sync.RWMutex + + clientCert *tls.Certificate + + reload reloadFunc + conn Connection + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +func StartClientCertRotating(reload reloadFunc, conn Connection) { + r := &clientCertRotating{ + reload: reload, + conn: conn, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClientCertRotator"), + } + + go r.run(context.Background()) +} + +// run starts the controller and blocks until context is closed. +func (c *clientCertRotating) run(ctx context.Context) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.V(3).Infof("Starting client certificate rotation controller") + defer klog.V(3).Infof("Shutting down client certificate rotation controller") + + go wait.Until(c.runWorker, time.Second, ctx.Done()) + + go func() { + if err := wait.PollUntilContextCancel( + ctx, + CertCallbackRefreshDuration, + true, + func(ctx context.Context) (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, + ); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to poll client certs: %w", err)) + } + }() + + <-ctx.Done() +} + +func (c *clientCertRotating) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *clientCertRotating) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadClientCert() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// loadClientCert calls the callback and rotates connections if needed +func (c *clientCertRotating) loadClientCert() error { + cert, err := c.reload(nil) + if err != nil { + return err + } + + // check to see if we have a change. If the values are the same, do nothing. + c.RLock() + haveCert := c.clientCert != nil + if certsEqual(c.clientCert, cert) { + c.RUnlock() + return nil + } + c.RUnlock() + + c.Lock() + c.clientCert = cert + c.Unlock() + + // The first certificate requested is not a rotation that is worth closing connections for + if !haveCert { + return nil + } + + if c.conn == nil { + return fmt.Errorf("no connection close function set") + } + + klog.V(1).Infof("certificate rotation detected, shutting down client connections to start using new credentials") + c.conn.Close() + + return nil +} + +// certsEqual compares tls Certificates, ignoring the Leaf which may get filled in dynamically +func certsEqual(left, right *tls.Certificate) bool { + if left == nil || right == nil { + return left == right + } + + if !byteMatrixEqual(left.Certificate, right.Certificate) { + return false + } + + if !reflect.DeepEqual(left.PrivateKey, right.PrivateKey) { + return false + } + + if !byteMatrixEqual(left.SignedCertificateTimestamps, right.SignedCertificateTimestamps) { + return false + } + + if !bytes.Equal(left.OCSPStaple, right.OCSPStaple) { + return false + } + + return true +} + +func byteMatrixEqual(left, right [][]byte) bool { + if len(left) != len(right) { + return false + } + + for i := range left { + if !bytes.Equal(left[i], right[i]) { + return false + } + } + return true +} + +type certificateCacheEntry struct { + cert *tls.Certificate + err error + birth time.Time +} + +// isStale returns true when this cache entry is too old to be usable +func (c *certificateCacheEntry) isStale() bool { + return time.Since(c.birth) > time.Second +} + +func newCertificateCacheEntry(certFile, keyFile string) certificateCacheEntry { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + return certificateCacheEntry{cert: &cert, err: err, birth: time.Now()} +} + +// CachingCertificateLoader ensures that we don't hammer the filesystem when opening many connections +// the underlying cert files are read at most once every second +func CachingCertificateLoader(certFile, keyFile string) func() (*tls.Certificate, error) { + current := newCertificateCacheEntry(certFile, keyFile) + var currentMtx sync.RWMutex + + return func() (*tls.Certificate, error) { + currentMtx.RLock() + if current.isStale() { + currentMtx.RUnlock() + + currentMtx.Lock() + defer currentMtx.Unlock() + + if current.isStale() { + current = newCertificateCacheEntry(certFile, keyFile) + } + } else { + defer currentMtx.RUnlock() + } + + return current.cert, current.err + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/agentoptions.go new file mode 100644 index 000000000..1b79b4b4c --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/agentoptions.go @@ -0,0 +1,99 @@ +//go:build kafka + +package kafka + +import ( + "context" + "fmt" + "strings" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/confluentinc/confluent-kafka-go/v2/kafka" + + confluent "github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type kafkaAgentOptions struct { + KafkaOptions + clusterName string + agentID string + errorChan chan error +} + +func NewAgentOptions(kafkaOptions *KafkaOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + kafkaAgentOptions := &kafkaAgentOptions{ + KafkaOptions: *kafkaOptions, + clusterName: clusterName, + agentID: agentID, + errorChan: make(chan error), + } + + groupID, err := kafkaOptions.ConfigMap.Get("group.id", "") + if groupID == "" || err != nil { + _ = kafkaOptions.ConfigMap.SetKey("group.id", agentID) + } + + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: kafkaAgentOptions, + AgentID: agentID, + ClusterName: clusterName, + } +} + +// encode the source and agent to the message key +func (o *kafkaAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, err + } + + // agent publishes event to status topic to send the resource status from a specified cluster + originalSource, err := evtCtx.GetExtension(types.ExtensionOriginalSource) + if err != nil { + return nil, err + } + + if eventType.Action == types.ResyncRequestAction && originalSource == types.SourceAll { + // TODO support multiple sources, agent may need a source list instead of the broadcast + topic := strings.Replace(agentBroadcastTopic, "*", o.clusterName, 1) + return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), o.clusterName), nil + } + + topic := strings.Replace(agentEventsTopic, "*", fmt.Sprintf("%s", originalSource), 1) + topic = strings.Replace(topic, "*", o.clusterName, 1) + messageKey := fmt.Sprintf("%s@%s", originalSource, o.clusterName) + return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), messageKey), nil +} + +func (o *kafkaAgentOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) { + protocol, err := confluent.New(confluent.WithConfigMap(&o.KafkaOptions.ConfigMap), + confluent.WithReceiverTopics([]string{ + fmt.Sprintf("^%s", replaceLast(sourceEventsTopic, "*", o.clusterName)), + fmt.Sprintf("^%s", sourceBroadcastTopic), + }), + confluent.WithSenderTopic("agentevents"), + confluent.WithErrorHandler(func(ctx context.Context, err kafka.Error) { + o.errorChan <- err + })) + if err != nil { + return nil, err + } + producerEvents, _ := protocol.Events() + handleProduceEvents(producerEvents, o.errorChan) + return protocol, nil +} + +func (o *kafkaAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} + +func replaceLast(str, old, new string) string { + last := strings.LastIndex(str, old) + if last == -1 { + return str + } + return str[:last] + new + str[last+len(old):] +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/options.go new file mode 100644 index 000000000..ca5c0d82f --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/options.go @@ -0,0 +1,153 @@ +//go:build kafka + +package kafka + +import ( + "fmt" + "os" + + "gopkg.in/yaml.v2" + "k8s.io/klog/v2" + + "github.com/confluentinc/confluent-kafka-go/v2/kafka" +) + +const ( + // sourceEventsTopic is a topic for sources to publish their resource create/update/delete events, the first + // asterisk is a wildcard for source, the second asterisk is a wildcard for cluster. + sourceEventsTopic = "sourceevents.*.*" + // agentEventsTopic is a topic for agents to publish their resource status update events, the first + // asterisk is a wildcard for source, the second asterisk is a wildcard for cluster. + agentEventsTopic = "agentevents.*.*" + // sourceBroadcastTopic is for a source to publish its events to all agents, the asterisk is a wildcard for source. + sourceBroadcastTopic = "sourcebroadcast.*" + // agentBroadcastTopic is for a agent to publish its events to all sources, the asterisk is a wildcard for cluster. + agentBroadcastTopic = "agentbroadcast.*" +) + +type KafkaOptions struct { + ConfigMap kafka.ConfigMap +} + +type KafkaConfig struct { + // BootstrapServer is the host of the Kafka broker (hostname:port). + BootstrapServer string `json:"bootstrapServer" yaml:"bootstrapServer"` + + // CAFile is the file path to a cert file for the MQTT broker certificate authority. + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + // ClientCertFile is the file path to a client cert file for TLS. + ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` + // ClientKeyFile is the file path to a client key file for TLS. + ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` + + // GroupID is a string that uniquely identifies the group of consumer processes to which this consumer belongs. + // Each different application will have a unique consumer GroupID. The default value is agentID for agent, sourceID for source + GroupID string `json:"groupID,omitempty" yaml:"groupID,omitempty"` +} + +// Listen to all the events on the default events channel +// It's important to read these events otherwise the events channel will eventually fill up +// Detail: https://github.com/cloudevents/sdk-go/blob/main/protocol/kafka_confluent/v2/protocol.go#L90 +func handleProduceEvents(producerEvents chan kafka.Event, errChan chan error) { + if producerEvents == nil { + return + } + go func() { + for e := range producerEvents { + switch ev := e.(type) { + case *kafka.Message: + // The message delivery report, indicating success or failure when sending message + if ev.TopicPartition.Error != nil { + klog.Errorf("Delivery failed: %v", ev.TopicPartition.Error) + } + case kafka.Error: + // Generic client instance-level errors, such as + // broker connection failures, authentication issues, etc. + if ev.Code() == kafka.ErrAllBrokersDown { + // ALL_BROKERS_DOWN doesn't really mean anything to librdkafka, it is just a friendly indication + // to the application that currently there are no brokers to communicate with. + // But librdkafka will continue to try to reconnect indefinately, + // and it will attempt to re-send messages until message.timeout.ms or message.max.retries are exceeded. + klog.V(4).Infof("Producer received the error %v", ev) + } else { + errChan <- fmt.Errorf("client error %w", ev) + } + } + } + }() +} + +// BuildKafkaOptionsFromFlags builds configs from a config filepath. +func BuildKafkaOptionsFromFlags(configPath string) (*KafkaOptions, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + // TODO: failed to unmarshal the data to kafka.ConfigMap directly. + // Further investigation is required to understand the reasons behind it. + config := &KafkaConfig{} + if err := yaml.Unmarshal(configData, config); err != nil { + return nil, err + } + + if config.BootstrapServer == "" { + return nil, fmt.Errorf("bootstrapServer is required") + } + + if (config.ClientCertFile == "" && config.ClientKeyFile != "") || + (config.ClientCertFile != "" && config.ClientKeyFile == "") { + return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") + } + if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") + } + + configMap := kafka.ConfigMap{ + "bootstrap.servers": config.BootstrapServer, + "socket.keepalive.enable": true, + // silence spontaneous disconnection logs, kafka recovers by itself. + "log.connection.close": false, + // https://github.com/confluentinc/librdkafka/issues/4349 + "ssl.endpoint.identification.algorithm": "none", + // the events channel server for both producer and consumer + "go.events.channel.size": 1000, + + // producer + "acks": "1", + "retries": "0", + + // consumer + "group.id": config.GroupID, + + // If true the consumer’s offset will be periodically committed in the background. + "enable.auto.commit": true, + + // If true (default) the client will automatically store the offset+1 of the message just prior to passing the message to the application. + // The offset is stored in memory and will be used by the next call to commit() (without explicit offsets specified) or the next auto commit. + // If false and enable.auto.commit=true, the application will manually have to call rd_kafka_offset_store() to store the offset to auto commit. (optional) + "enable.auto.offset.store": true, + "queued.max.messages.kbytes": 32768, // 32 MB + + // earliest: automatically reset the offset to the earliest offset + // latest: automatically reset the offset to the latest offset + // We must use earliest due to the source client may not start to watch a new topic + // when the agent is sending the events to that topic. + // the source client may lose the events if we set as latest. + "auto.offset.reset": "earliest", + + // The frequency in milliseconds that the consumer offsets are commited (written) to offset storage + "auto.commit.interval.ms": 5000, + } + + if config.ClientCertFile != "" { + _ = configMap.SetKey("security.protocol", "ssl") + _ = configMap.SetKey("ssl.ca.location", config.CAFile) + _ = configMap.SetKey("ssl.certificate.location", config.ClientCertFile) + _ = configMap.SetKey("ssl.key.location", config.ClientKeyFile) + } + + return &KafkaOptions{ + ConfigMap: configMap, + }, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/options_noop.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/options_noop.go new file mode 100644 index 000000000..2851097cc --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/options_noop.go @@ -0,0 +1,28 @@ +//go:build !kafka + +// This is the dummy code to pass the compile when Kafka is not enabled. +// Cannot enable Kafka by default due to confluent-kafka-go not supporting cross-compilation. +// Try adding -tags=kafka to build when you need Kafka. +package kafka + +import ( + "fmt" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" +) + +type KafkaOptions struct { + ConfigMap map[string]interface{} +} + +func NewSourceOptions(configMap *KafkaOptions, sourceID string) *options.CloudEventsSourceOptions { + return nil +} + +func NewAgentOptions(configMap *KafkaOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + return nil +} + +func BuildKafkaOptionsFromFlags(configPath string) (*KafkaOptions, error) { + return nil, fmt.Errorf("try adding -tags=kafka to build") +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/sourceoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/sourceoptions.go new file mode 100644 index 000000000..0d9bfc59b --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka/sourceoptions.go @@ -0,0 +1,89 @@ +//go:build kafka + +package kafka + +import ( + "context" + "fmt" + "strings" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/confluentinc/confluent-kafka-go/v2/kafka" + + confluent "github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type kafkaSourceOptions struct { + KafkaOptions + sourceID string + errorChan chan error +} + +func NewSourceOptions(kafkaOptions *KafkaOptions, sourceID string) *options.CloudEventsSourceOptions { + sourceOptions := &kafkaSourceOptions{ + KafkaOptions: *kafkaOptions, + sourceID: sourceID, + errorChan: make(chan error), + } + + groupID, err := kafkaOptions.ConfigMap.Get("group.id", "") + if groupID == "" || err != nil { + _ = kafkaOptions.ConfigMap.SetKey("group.id", sourceID) + } + + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: sourceOptions, + SourceID: sourceID, + } +} + +func (o *kafkaSourceOptions) WithContext(ctx context.Context, + evtCtx cloudevents.EventContext, +) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, err + } + + clusterName, err := evtCtx.GetExtension(types.ExtensionClusterName) + if err != nil { + return nil, err + } + + if eventType.Action == types.ResyncRequestAction && clusterName == types.ClusterAll { + // source request to get resources status from all agents + topic := strings.Replace(sourceBroadcastTopic, "*", o.sourceID, 1) + return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), o.sourceID), nil + } + + // source publishes event to source topic to send the resource spec to a specified cluster + messageKey := fmt.Sprintf("%s@%s", o.sourceID, clusterName) + topic := strings.Replace(sourceEventsTopic, "*", o.sourceID, 1) + topic = strings.Replace(topic, "*", fmt.Sprintf("%s", clusterName), 1) + return confluent.WithMessageKey(cloudeventscontext.WithTopic(ctx, topic), messageKey), nil +} + +func (o *kafkaSourceOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) { + protocol, err := confluent.New(confluent.WithConfigMap(&o.KafkaOptions.ConfigMap), + confluent.WithReceiverTopics([]string{ + fmt.Sprintf("^%s", strings.Replace(agentEventsTopic, "*", o.sourceID, 1)), + fmt.Sprintf("^%s", agentBroadcastTopic), + }), + confluent.WithSenderTopic("sourceevents"), + confluent.WithErrorHandler(func(ctx context.Context, err kafka.Error) { + o.errorChan <- err + })) + if err != nil { + return nil, err + } + producerEvents, _ := protocol.Events() + handleProduceEvents(producerEvents, o.errorChan) + return protocol, nil +} + +func (o *kafkaSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go index cfce3004c..d0ed8ff26 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go @@ -16,7 +16,9 @@ import ( "github.com/eclipse/paho.golang/paho" "gopkg.in/yaml.v2" "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/cert" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" ) @@ -28,19 +30,53 @@ const ( MQTT_AGENT_PUB_TOPIC_KEY TopicKey = "mqtt_agent_pub_topic" ) +type MQTTDialer struct { + TLSConfig *tls.Config + BrokerHost string + Timeout time.Duration + + conn net.Conn +} + +func (d *MQTTDialer) Dial() (net.Conn, error) { + if d.TLSConfig != nil { + conn, err := tls.DialWithDialer(&net.Dialer{Timeout: d.Timeout}, "tcp", d.BrokerHost, d.TLSConfig) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", d.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + d.conn = packets.NewThreadSafeConn(conn) + return d.conn, nil + } + + conn, err := net.DialTimeout("tcp", d.BrokerHost, d.Timeout) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", d.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + d.conn = packets.NewThreadSafeConn(conn) + return d.conn, nil +} + +func (d *MQTTDialer) Close() error { + if d.conn != nil { + return d.conn.Close() + } + return nil +} + // MQTTOptions holds the options that are used to build MQTT client. type MQTTOptions struct { - Topics types.Topics - BrokerHost string - Username string - Password string - CAFile string - ClientCertFile string - ClientKeyFile string - KeepAlive uint16 - DialTimeout time.Duration - PubQoS int - SubQoS int + Topics types.Topics + Username string + Password string + KeepAlive uint16 + PubQoS int + SubQoS int + + Dialer *MQTTDialer } // MQTTConfig holds the information needed to build connect to MQTT broker as a given user. @@ -95,36 +131,24 @@ func BuildMQTTOptionsFromFlags(configPath string) (*MQTTOptions, error) { (config.ClientCertFile != "" && config.ClientKeyFile == "") { return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") } - if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { - return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") - } if err := validateTopics(config.Topics); err != nil { return nil, err } options := &MQTTOptions{ - BrokerHost: config.BrokerHost, - Username: config.Username, - Password: config.Password, - CAFile: config.CAFile, - ClientCertFile: config.ClientCertFile, - ClientKeyFile: config.ClientKeyFile, - KeepAlive: 60, - PubQoS: 1, - SubQoS: 1, - DialTimeout: 60 * time.Second, - Topics: *config.Topics, + Username: config.Username, + Password: config.Password, + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + Topics: *config.Topics, } if config.KeepAlive != nil { options.KeepAlive = *config.KeepAlive } - if config.DialTimeout != nil { - options.DialTimeout = *config.DialTimeout - } - if config.PubQoS != nil { options.PubQoS = *config.PubQoS } @@ -133,49 +157,40 @@ func BuildMQTTOptionsFromFlags(configPath string) (*MQTTOptions, error) { options.SubQoS = *config.SubQoS } - return options, nil -} - -func (o *MQTTOptions) GetNetConn() (net.Conn, error) { - if len(o.CAFile) != 0 { - certPool, err := x509.SystemCertPool() - if err != nil { - return nil, err - } + dialTimeout := 60 * time.Second + if config.DialTimeout != nil { + dialTimeout = *config.DialTimeout + } - caPEM, err := os.ReadFile(o.CAFile) + if config.ClientCertFile != "" && config.ClientKeyFile != "" { + certPool, err := rootCAs(config.CAFile) if err != nil { return nil, err } - if ok := certPool.AppendCertsFromPEM(caPEM); !ok { - return nil, fmt.Errorf("invalid CA %s", o.CAFile) - } - - clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) - if err != nil { - return nil, err + tlsConfig := &tls.Config{ + RootCAs: certPool, + GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return cert.CachingCertificateLoader(config.ClientCertFile, config.ClientKeyFile)() + }, } - conn, err := tls.DialWithDialer(&net.Dialer{Timeout: o.DialTimeout}, "tcp", o.BrokerHost, &tls.Config{ - RootCAs: certPool, - Certificates: []tls.Certificate{clientCerts}, - }) - if err != nil { - return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + options.Dialer = &MQTTDialer{ + BrokerHost: config.BrokerHost, + TLSConfig: tlsConfig, + Timeout: dialTimeout, } - // ensure parallel writes are thread-Safe - return packets.NewThreadSafeConn(conn), nil + // start a goroutine to periodically refresh client certificates for this connection + cert.StartClientCertRotating(tlsConfig.GetClientCertificate, options.Dialer) + return options, nil } - conn, err := net.DialTimeout("tcp", o.BrokerHost, o.DialTimeout) - if err != nil { - return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + options.Dialer = &MQTTDialer{ + BrokerHost: config.BrokerHost, + Timeout: dialTimeout, } - - // ensure parallel writes are thread-Safe - return packets.NewThreadSafeConn(conn), nil + return options, nil } func (o *MQTTOptions) GetMQTTConnectOption(clientID string) *paho.Connect { @@ -204,7 +219,7 @@ func (o *MQTTOptions) GetCloudEventsProtocol( errorHandler func(error), clientOpts ...cloudeventsmqtt.Option, ) (options.CloudEventsProtocol, error) { - netConn, err := o.GetNetConn() + netConn, err := o.Dialer.Dial() if err != nil { return nil, err } @@ -319,3 +334,29 @@ func getAgentPubTopic(ctx context.Context) (*PubTopic, error) { return nil, fmt.Errorf("invalid agent pub topic") } + +// rootCAs returns a cert pool to verify the TLS connection. +// If the caFile is not provided, the default system certificate pool will be returned +// If the caFile is provided, the provided CA will be appended to the system certificate pool +func rootCAs(caFile string) (*x509.CertPool, error) { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + if len(caFile) == 0 { + klog.Warningf("CA file is not provided, TLS connection will be verified with the system cert pool") + return certPool, nil + } + + caPEM, err := os.ReadFile(caFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", caFile) + } + + return certPool, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go index 9bd231c89..388b5506c 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go @@ -11,6 +11,8 @@ import ( // // Available implementations: // - MQTT +// - KAFKA +// - gRPC type CloudEventsOptions interface { // WithContext returns back a new context with the given cloudevent context. The new context will be used when // sending a cloudevent.The new context is protocol-dependent, for example, for MQTT, the new context should contain diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/optionsbuilder.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/optionsbuilder.go new file mode 100644 index 000000000..b839c86af --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/optionsbuilder.go @@ -0,0 +1,95 @@ +package generic + +import ( + "fmt" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/constants" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/kafka" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" +) + +// ConfigLoader loads a configuration object with a configuration file. +type ConfigLoader struct { + configType string + configPath string +} + +// NewConfigLoader returns a ConfigLoader with the given configuration type and configuration file path. +// +// Available configuration types: +// - mqtt +// - grpc +// - kafka +func NewConfigLoader(configType, configPath string) *ConfigLoader { + return &ConfigLoader{ + configType: configType, + configPath: configPath, + } +} + +// TODO using a specified config instead of any +func (l *ConfigLoader) LoadConfig() (string, any, error) { + switch l.configType { + case constants.ConfigTypeMQTT: + mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(l.configPath) + if err != nil { + return "", nil, err + } + + return mqttOptions.Dialer.BrokerHost, mqttOptions, nil + case constants.ConfigTypeGRPC: + grpcOptions, err := grpc.BuildGRPCOptionsFromFlags(l.configPath) + if err != nil { + return "", nil, err + } + + return grpcOptions.URL, grpcOptions, nil + + case constants.ConfigTypeKafka: + kafkaOptions, err := kafka.BuildKafkaOptionsFromFlags(l.configPath) + if err != nil { + return "", nil, err + } + val, found := kafkaOptions.ConfigMap["bootstrap.servers"] + if found { + server, ok := val.(string) + if !ok { + return "", nil, fmt.Errorf("failed to get kafka bootstrap.servers from configMap") + } + return server, kafkaOptions, nil + } + return "", nil, fmt.Errorf("failed to get kafka bootstrap.servers from configMap") + } + + return "", nil, fmt.Errorf("unsupported config type %s", l.configType) +} + +// BuildCloudEventsSourceOptions builds the cloudevents source options based on the broker type +func BuildCloudEventsSourceOptions(config any, clientId, sourceId string) (*options.CloudEventsSourceOptions, error) { + switch config := config.(type) { + case *mqtt.MQTTOptions: + return mqtt.NewSourceOptions(config, clientId, sourceId), nil + case *grpc.GRPCOptions: + return grpc.NewSourceOptions(config, sourceId), nil + case *kafka.KafkaOptions: + return kafka.NewSourceOptions(config, sourceId), nil + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} + +// BuildCloudEventsAgentOptions builds the cloudevents agent options based on the broker type +func BuildCloudEventsAgentOptions(config any, clusterName, clientId string) (*options.CloudEventsAgentOptions, error) { + switch config := config.(type) { + case *mqtt.MQTTOptions: + return mqtt.NewAgentOptions(config, clusterName, clientId), nil + case *grpc.GRPCOptions: + return grpc.NewAgentOptions(config, clusterName, clientId), nil + case *kafka.KafkaOptions: + return kafka.NewAgentOptions(config, clusterName, clientId), nil + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go index 58da81556..b27c1adf7 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go @@ -176,31 +176,14 @@ func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents return } - clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) - if err != nil { - klog.Errorf("failed to find cluster name, %v", err) - return - } - obj, err := codec.Decode(&evt) if err != nil { klog.Errorf("failed to decode status, %v", err) return } - action, err := c.statusAction(fmt.Sprintf("%s", clusterName), obj) - if err != nil { - klog.Errorf("failed to generate status event %s, %v", evt, err) - return - } - - if len(action) == 0 { - // no action is required, ignore - return - } - for _, handler := range handlers { - if err := handler(action, obj); err != nil { + if err := handler(types.StatusModified, obj); err != nil { klog.Errorf("failed to handle status event %s, %v", evt, err) } } @@ -215,7 +198,8 @@ func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents // - If the requested resource version is older than the source's current maintained resource version, the source // sends the resource. func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( - ctx context.Context, evtDataType types.CloudEventsDataType, evt cloudevents.Event) error { + ctx context.Context, evtDataType types.CloudEventsDataType, evt cloudevents.Event, +) error { resourceVersions, err := payload.DecodeSpecResyncRequest(evt) if err != nil { return err @@ -237,14 +221,32 @@ func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( return err } + // TODO we cannot list objs now, the lister may be not ready, we may need to add HasSynced + // for the lister + if len(objs) == 0 { + klog.V(4).Infof("there are is no objs from the list, do nothing") + return nil + } + for _, obj := range objs { + // respond with the deleting resource regardless of the resource version + if !obj.GetDeletionTimestamp().IsZero() { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + continue + } + lastResourceVersion := findResourceVersion(string(obj.GetUID()), resourceVersions.Versions) currentResourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) if err != nil { + klog.V(4).Infof("ignore the obj %v since it has a invalid resourceVersion, %v", obj, err) continue } - if currentResourceVersion > lastResourceVersion { + // the version of the work is not maintained on source or the source's work is newer than agent, send + // the newer work to agent + if currentResourceVersion == 0 || currentResourceVersion > lastResourceVersion { if err := c.Publish(ctx, eventType, obj); err != nil { return err } @@ -273,36 +275,6 @@ func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( return nil } -func (c *CloudEventSourceClient[T]) statusAction(clusterName string, obj T) (evt types.ResourceAction, err error) { - objs, err := c.lister.List(types.ListOptions{ClusterName: clusterName, Source: c.sourceID}) - if err != nil { - return evt, err - } - - lastObj, exists := getObj(string(obj.GetUID()), objs) - if !exists { - return evt, nil - } - - lastStatusHash, err := c.statusHashGetter(lastObj) - if err != nil { - klog.Warningf("failed to hash object %s status, %v", lastObj.GetUID(), err) - return evt, err - } - - currentStatusHash, err := c.statusHashGetter(obj) - if err != nil { - klog.Warningf("failed to hash object %s status, %v", obj.GetUID(), err) - return evt, nil - } - - if lastStatusHash == currentStatusHash { - return evt, nil - } - - return types.StatusModified, nil -} - func findResourceVersion(id string, versions []payload.ResourceVersion) int64 { for _, version := range versions { if id == version.ResourceID { diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go index b39361162..1c0ff503c 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go @@ -186,7 +186,7 @@ func ParseCloudEventsType(cloudEventsType string) (*CloudEventsType, error) { types := strings.Split(cloudEventsType, ".") length := len(types) if length < 5 { - return nil, fmt.Errorf("unsupported cloudevents type format") + return nil, fmt.Errorf("unsupported cloudevents type format: %s", cloudEventsType) } subResource := EventSubResource(types[length-2]) diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go index 2b0624241..47754f67f 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go @@ -12,36 +12,37 @@ import ( "k8s.io/klog/v2" workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" ) // ManifestWorkAgentClient implements the ManifestWorkInterface. It sends the manifestworks status back to source by // CloudEventAgentClient. type ManifestWorkAgentClient struct { cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork] - watcher *watcher.ManifestWorkWatcher - lister workv1lister.ManifestWorkNamespaceLister + watcherStore store.WorkClientWatcherStore + clusterName string } var _ workv1client.ManifestWorkInterface = &ManifestWorkAgentClient{} -func NewManifestWorkAgentClient(cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork], watcher *watcher.ManifestWorkWatcher) *ManifestWorkAgentClient { +func NewManifestWorkAgentClient( + cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork], + watcherStore store.WorkClientWatcherStore, + clusterName string, +) *ManifestWorkAgentClient { return &ManifestWorkAgentClient{ cloudEventsClient: cloudEventsClient, - watcher: watcher, + watcherStore: watcherStore, + clusterName: clusterName, } } -func (c *ManifestWorkAgentClient) SetLister(lister workv1lister.ManifestWorkNamespaceLister) { - c.lister = lister -} - func (c *ManifestWorkAgentClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "create") } @@ -64,27 +65,32 @@ func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts met func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { klog.V(4).Infof("getting manifestwork %s", name) - return c.lister.Get(name) + return c.watcherStore.Get(c.clusterName, name) } func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { - klog.V(4).Infof("sync manifestworks") - // send resync request to fetch manifestworks from source when the ManifestWorkInformer starts - if err := c.cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + klog.V(4).Infof("list manifestworks") + works, err := c.watcherStore.List(opts) + if err != nil { return nil, err } - return &workv1.ManifestWorkList{}, nil + items := []workv1.ManifestWork{} + for _, work := range works { + items = append(items, *work) + } + + return &workv1.ManifestWorkList{Items: items}, nil } func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.watcher, nil + return c.watcherStore, nil } func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { klog.V(4).Infof("patching manifestwork %s", name) - lastWork, err := c.lister.Get(name) + lastWork, err := c.watcherStore.Get(c.clusterName, name) if err != nil { return nil, err } @@ -117,8 +123,10 @@ func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kub return nil, err } - // refresh the work status in the ManifestWorkInformer local cache with patched work. - c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + if err := c.watcherStore.Update(newWork); err != nil { + return nil, err + + } return newWork, nil } @@ -137,13 +145,17 @@ func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kub return nil, err } - // delete the manifestwork from the ManifestWorkInformer local cache. - c.watcher.Receive(watch.Event{Type: watch.Deleted, Object: newWork}) + if err := c.watcherStore.Delete(newWork); err != nil { + return nil, err + } + return newWork, nil } - // refresh the work in the ManifestWorkInformer local cache with patched work. - c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + if err := c.watcherStore.Update(newWork); err != nil { + return nil, err + } + return newWork, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go index bb0f4f9ad..5ffc5726a 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go @@ -65,8 +65,12 @@ func (c *ManifestCodec) Encode(source string, eventType types.CloudEventsType, w return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) } - if len(work.Spec.Workload.Manifests) != 1 { - return nil, fmt.Errorf("too many manifests in the work %s", work.UID) + // for the manifest deletion case: no manifest in the spec will be rebuilt in the cache upon agent restart. + // for status update cases other than manifest deletion case, there should be only one manifest in the work. + if !meta.IsStatusConditionTrue(work.Status.Conditions, common.ManifestsDeleted) { + if len(work.Spec.Workload.Manifests) != 1 { + return nil, fmt.Errorf("too many manifests in the work %s", work.UID) + } } evt := types.NewEventBuilder(source, eventType). @@ -142,6 +146,9 @@ func (c *ManifestCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, er return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) } + // In the case of an agent restart, the manifestwork finalizer is cleared. + // Explicitly re-add the finalizer to ensure proper cleanup of the manifestwork. + work.Finalizers = []string{workv1.ManifestWorkFinalizer} work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} return work, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go index fc17ffaa4..ec2a0cd1f 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go @@ -115,6 +115,9 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) } + // In the case of an agent restart, the manifestwork finalizer is cleared. + // Explicitly re-add the finalizer to ensure proper cleanup of the manifestwork. + work.Finalizers = []string{workv1.ManifestWorkFinalizer} work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} return work, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go deleted file mode 100644 index 983c7634c..000000000 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go +++ /dev/null @@ -1,59 +0,0 @@ -package handler - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/watch" - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" - - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" -) - -// NewManifestWorkAgentHandler returns a ResourceHandler for a ManifestWork on managed cluster. It sends the kube events -// with ManifestWorWatcher after CloudEventAgentClient received the ManifestWork specs from source, then the -// ManifestWorkInformer handles the kube events in its local cache. -func NewManifestWorkAgentHandler(lister workv1lister.ManifestWorkNamespaceLister, watcher *watcher.ManifestWorkWatcher) generic.ResourceHandler[*workv1.ManifestWork] { - return func(action types.ResourceAction, work *workv1.ManifestWork) error { - switch action { - case types.Added: - watcher.Receive(watch.Event{Type: watch.Added, Object: work}) - case types.Modified: - lastWork, err := lister.Get(work.Name) - if err != nil { - return err - } - - updatedWork := work.DeepCopy() - - // restore the fields that are maintained by local agent - updatedWork.Labels = lastWork.Labels - updatedWork.Annotations = lastWork.Annotations - updatedWork.Finalizers = lastWork.Finalizers - updatedWork.Status = lastWork.Status - - watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) - case types.Deleted: - // the manifestwork is deleting on the source, we just update its deletion timestamp. - lastWork, err := lister.Get(work.Name) - if errors.IsNotFound(err) { - return nil - } - - if err != nil { - return err - } - - updatedWork := lastWork.DeepCopy() - updatedWork.DeletionTimestamp = work.DeletionTimestamp - watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) - default: - return fmt.Errorf("unsupported resource action %s", action) - } - - return nil - } -} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister/lister.go new file mode 100644 index 000000000..e67bd9c1b --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister/lister.go @@ -0,0 +1,35 @@ +package lister + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + workv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" +) + +// WatcherStoreLister list the ManifestWorks from WorkClientWatcherStore +type WatcherStoreLister struct { + store store.WorkClientWatcherStore +} + +func NewWatcherStoreLister(store store.WorkClientWatcherStore) *WatcherStoreLister { + return &WatcherStoreLister{ + store: store, + } +} + +// List returns the ManifestWorks from a WorkClientWatcherStore with list options +func (l *WatcherStoreLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { + opts := metav1.ListOptions{} + + if options.Source != types.SourceAll { + opts.LabelSelector = fmt.Sprintf("%s=%s", common.CloudEventsOriginalSourceLabelKey, options.Source) + } + + return l.store.List(opts) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go index c75dd34c4..4d2a96f26 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go @@ -3,38 +3,28 @@ package work import ( "context" "fmt" - "time" - "k8s.io/client-go/rest" "k8s.io/klog/v2" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" - workinformers "open-cluster-management.io/api/client/work/informers/externalversions" - workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" agentclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client" - agenthandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler" + agentlister "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal" sourceclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client" - sourcehandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" + sourcelister "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" ) -const defaultInformerResyncTime = 10 * time.Minute - // ClientHolder holds a manifestwork client that implements the ManifestWorkInterface based on different configuration -// and a ManifestWorkInformer that is built with the manifestWork client. // // ClientHolder also implements the ManifestWorksGetter interface. type ClientHolder struct { - workClientSet workclientset.Interface - manifestWorkInformer workv1informers.ManifestWorkInformer + workClientSet workclientset.Interface } var _ workv1client.ManifestWorksGetter = &ClientHolder{} @@ -49,34 +39,29 @@ func (h *ClientHolder) ManifestWorks(namespace string) workv1client.ManifestWork return h.workClientSet.WorkV1().ManifestWorks(namespace) } -// ManifestWorkInformer returns a ManifestWorkInformer -func (h *ClientHolder) ManifestWorkInformer() workv1informers.ManifestWorkInformer { - return h.manifestWorkInformer -} - // ClientHolderBuilder builds the ClientHolder with different configuration. type ClientHolderBuilder struct { - config any - codecs []generic.Codec[*workv1.ManifestWork] - informerOptions []workinformers.SharedInformerOption - informerResyncTime time.Duration - sourceID string - clusterName string - clientID string + config any + watcherStore store.WorkClientWatcherStore + codecs []generic.Codec[*workv1.ManifestWork] + sourceID string + clusterName string + clientID string + resync bool } // NewClientHolderBuilder returns a ClientHolderBuilder with a given configuration. // // Available configurations: -// - Kubeconfig (*rest.Config): builds a manifestwork client with kubeconfig // - MQTTOptions (*mqtt.MQTTOptions): builds a manifestwork client based on cloudevents with MQTT // - GRPCOptions (*grpc.GRPCOptions): builds a manifestwork client based on cloudevents with GRPC +// - KafkaOptions (*kafka.KafkaOptions): builds a manifestwork client based on cloudevents with Kafka // // TODO using a specified config instead of any func NewClientHolderBuilder(config any) *ClientHolderBuilder { return &ClientHolderBuilder{ - config: config, - informerResyncTime: defaultInformerResyncTime, + config: config, + resync: true, } } @@ -104,58 +89,44 @@ func (b *ClientHolderBuilder) WithCodecs(codecs ...generic.Codec[*workv1.Manifes return b } -// WithInformerConfig set the ManifestWorkInformer configs. If the resync time is not set, the default time (10 minutes) -// will be used when building the ManifestWorkInformer. -func (b *ClientHolderBuilder) WithInformerConfig( - resyncTime time.Duration, options ...workinformers.SharedInformerOption) *ClientHolderBuilder { - b.informerResyncTime = resyncTime - b.informerOptions = options +// WithWorkClientWatcherStore set the WorkClientWatcherStore. The client will use this store to caches the works and +// watch the work events. +func (b *ClientHolderBuilder) WithWorkClientWatcherStore(store store.WorkClientWatcherStore) *ClientHolderBuilder { + b.watcherStore = store return b } -// NewSourceClientHolder returns a ClientHolder for source +// WithResyncEnabled control the client resync (Default is true), if it's true, the resync happens when +// 1. after the client's store is initiated +// 2. the client reconnected +func (b *ClientHolderBuilder) WithResyncEnabled(resync bool) *ClientHolderBuilder { + b.resync = resync + return b +} + +// NewSourceClientHolder returns a ClientHolder for a source func (b *ClientHolderBuilder) NewSourceClientHolder(ctx context.Context) (*ClientHolder, error) { - switch config := b.config.(type) { - case *rest.Config: - return b.newKubeClients(config) - case *mqtt.MQTTOptions: - return b.newSourceClients(ctx, mqtt.NewSourceOptions(config, b.clientID, b.sourceID)) - case *grpc.GRPCOptions: - return b.newSourceClients(ctx, grpc.NewSourceOptions(config, b.sourceID)) - default: - return nil, fmt.Errorf("unsupported client configuration type %T", config) + if len(b.clientID) == 0 { + return nil, fmt.Errorf("client id is required") } -} -// NewAgentClientHolder returns a ClientHolder for agent -func (b *ClientHolderBuilder) NewAgentClientHolder(ctx context.Context) (*ClientHolder, error) { - switch config := b.config.(type) { - case *rest.Config: - return b.newKubeClients(config) - case *mqtt.MQTTOptions: - return b.newAgentClients(ctx, mqtt.NewAgentOptions(config, b.clusterName, b.clientID)) - case *grpc.GRPCOptions: - return b.newAgentClients(ctx, grpc.NewAgentOptions(config, b.clusterName, b.clientID)) - default: - return nil, fmt.Errorf("unsupported client configuration type %T", config) + if len(b.sourceID) == 0 { + return nil, fmt.Errorf("source id is required") } -} -func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, agentOptions *options.CloudEventsAgentOptions) (*ClientHolder, error) { - if len(b.clientID) == 0 { - return nil, fmt.Errorf("client id is required") + if b.watcherStore == nil { + return nil, fmt.Errorf("a watcher store is required") } - if len(b.clusterName) == 0 { - return nil, fmt.Errorf("cluster name is required") + options, err := generic.BuildCloudEventsSourceOptions(b.config, b.clientID, b.sourceID) + if err != nil { + return nil, err } - workLister := &ManifestWorkLister{} - watcher := watcher.NewManifestWorkWatcher() - cloudEventsClient, err := generic.NewCloudEventAgentClient[*workv1.ManifestWork]( + cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork]( ctx, - agentOptions, - workLister, + options, + sourcelister.NewWatcherStoreLister(b.watcherStore), ManifestWorkStatusHash, b.codecs..., ) @@ -163,58 +134,67 @@ func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, agentOptions return nil, err } - manifestWorkClient := agentclient.NewManifestWorkAgentClient(cloudEventsClient, watcher) + // start to subscribe + cloudEventsClient.Subscribe(ctx, b.watcherStore.HandleReceivedWork) + + manifestWorkClient := sourceclient.NewManifestWorkSourceClient(b.sourceID, cloudEventsClient, b.watcherStore) workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} - factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) - informers := factory.Work().V1().ManifestWorks() - manifestWorkLister := informers.Lister() - namespacedLister := manifestWorkLister.ManifestWorks(b.clusterName) - - // Set informer lister back to work lister and client. - workLister.Lister = manifestWorkLister - // TODO the work client and informer share a same store in the current implementation, ideally, the store should be - // only written from the server. we may need to revisit the implementation in the future. - manifestWorkClient.SetLister(namespacedLister) - cloudEventsClient.Subscribe(ctx, agenthandler.NewManifestWorkAgentHandler(namespacedLister, watcher)) + if !b.resync { + return &ClientHolder{workClientSet: workClientSet}, nil + } + // start a go routine to resync the works when this client reconnected go func() { for { select { case <-ctx.Done(): return case <-cloudEventsClient.ReconnectedChan(): - // when receiving a client reconnected signal, we resync all sources for this agent - // TODO after supporting multiple sources, we should only resync agent known sources - if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + // when receiving a client reconnected signal, we resync all clusters for this source + if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { klog.Errorf("failed to send resync request, %v", err) } } } }() - return &ClientHolder{ - workClientSet: workClientSet, - manifestWorkInformer: informers, - }, nil + // start a go routine to resync the works after this client's store is initiated + go func() { + if store.WaitForStoreInit(ctx, b.watcherStore.HasInitiated) { + if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + }() + + return &ClientHolder{workClientSet: workClientSet}, nil } -func (b *ClientHolderBuilder) newSourceClients(ctx context.Context, sourceOptions *options.CloudEventsSourceOptions) (*ClientHolder, error) { +// NewAgentClientHolder returns a ClientHolder for an agent +func (b *ClientHolderBuilder) NewAgentClientHolder(ctx context.Context) (*ClientHolder, error) { if len(b.clientID) == 0 { return nil, fmt.Errorf("client id is required") } - if len(b.sourceID) == 0 { - return nil, fmt.Errorf("source id is required") + if len(b.clusterName) == 0 { + return nil, fmt.Errorf("cluster name is required") } - workLister := &ManifestWorkLister{} - watcher := watcher.NewManifestWorkWatcher() - cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork]( + if b.watcherStore == nil { + return nil, fmt.Errorf("watcher store is required") + } + + options, err := generic.BuildCloudEventsAgentOptions(b.config, b.clusterName, b.clientID) + if err != nil { + return nil, err + } + + cloudEventsClient, err := generic.NewCloudEventAgentClient[*workv1.ManifestWork]( ctx, - sourceOptions, - workLister, + options, + agentlister.NewWatcherStoreLister(b.watcherStore), ManifestWorkStatusHash, b.codecs..., ) @@ -222,49 +202,40 @@ func (b *ClientHolderBuilder) newSourceClients(ctx context.Context, sourceOption return nil, err } - manifestWorkClient := sourceclient.NewManifestWorkSourceClient(b.sourceID, cloudEventsClient, watcher) + // start to subscribe + cloudEventsClient.Subscribe(ctx, b.watcherStore.HandleReceivedWork) + + manifestWorkClient := agentclient.NewManifestWorkAgentClient(cloudEventsClient, b.watcherStore, b.clusterName) workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} - factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) - informers := factory.Work().V1().ManifestWorks() - manifestWorkLister := informers.Lister() - // Set informer lister back to work lister and client. - workLister.Lister = manifestWorkLister - manifestWorkClient.SetLister(manifestWorkLister) - sourceHandler := sourcehandler.NewManifestWorkSourceHandler(manifestWorkLister, watcher) - cloudEventsClient.Subscribe(ctx, sourceHandler.HandlerFunc()) + if !b.resync { + return &ClientHolder{workClientSet: workClientSet}, nil + } - go sourceHandler.Run(ctx.Done()) go func() { for { select { case <-ctx.Done(): return case <-cloudEventsClient.ReconnectedChan(): - // when receiving a client reconnected signal, we resync all clusters for this source - if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + // when receiving a client reconnected signal, we resync all sources for this agent + // TODO after supporting multiple sources, we should only resync agent known sources + if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { klog.Errorf("failed to send resync request, %v", err) } } } }() - return &ClientHolder{ - workClientSet: workClientSet, - manifestWorkInformer: informers, - }, nil -} - -func (b *ClientHolderBuilder) newKubeClients(config *rest.Config) (*ClientHolder, error) { - kubeWorkClientSet, err := workclientset.NewForConfig(config) - if err != nil { - return nil, err - } + // start a go routine to resync the works after this client's store is initiated + go func() { + if store.WaitForStoreInit(ctx, b.watcherStore.HasInitiated) { + if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + }() - factory := workinformers.NewSharedInformerFactoryWithOptions(kubeWorkClientSet, b.informerResyncTime, b.informerOptions...) - return &ClientHolder{ - workClientSet: kubeWorkClientSet, - manifestWorkInformer: factory.Work().V1().ManifestWorks(), - }, nil + return &ClientHolder{workClientSet: workClientSet}, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go index 53134eb89..3c06d9b97 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go @@ -10,8 +10,11 @@ const ( // CloudEventsDataTypeAnnotationKey is the key of the cloudevents data type annotation. CloudEventsDataTypeAnnotationKey = "cloudevents.open-cluster-management.io/datatype" - // CloudEventsGenerationAnnotationKey is the key of the manifestwork generation annotation. - CloudEventsGenerationAnnotationKey = "cloudevents.open-cluster-management.io/generation" + // CloudEventsResourceVersionAnnotationKey is the key of the manifestwork resourceversion annotation. + // + // This annotation is used for tracing the ManifestWork specific changes, the value of this annotation + // should be a sequence number representing the ManifestWork specific generation. + CloudEventsResourceVersionAnnotationKey = "cloudevents.open-cluster-management.io/resourceversion" ) // CloudEventsOriginalSourceLabelKey is the key of the cloudevents original source label. @@ -26,4 +29,5 @@ const ( DeleteRequestAction = "delete_request" ) +var ManifestWorkGK = schema.GroupKind{Group: workv1.GroupName, Kind: "ManifestWork"} var ManifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go deleted file mode 100644 index 724143c33..000000000 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go +++ /dev/null @@ -1,81 +0,0 @@ -package work - -import ( - "fmt" - - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" -) - -const ( - ConfigTypeKube = "kube" - ConfigTypeMQTT = "mqtt" - ConfigTypeGRPC = "grpc" -) - -// ConfigLoader loads a configuration object with a configuration file. -type ConfigLoader struct { - configType string - configPath string - - kubeConfig *rest.Config -} - -// NewConfigLoader returns a ConfigLoader with the given configuration type and configuration file path. -// -// Available configuration types: -// - kube -// - mqtt -// - grpc -func NewConfigLoader(configType, configPath string) *ConfigLoader { - return &ConfigLoader{ - configType: configType, - configPath: configPath, - } -} - -// WithKubeConfig sets a kube config, this config will be used when configuration type is kube and the kube -// configuration file path not set. -func (l *ConfigLoader) WithKubeConfig(kubeConfig *rest.Config) *ConfigLoader { - l.kubeConfig = kubeConfig - return l -} - -// TODO using a specified config instead of any -func (l *ConfigLoader) LoadConfig() (string, any, error) { - switch l.configType { - case ConfigTypeKube: - if l.configPath == "" { - if l.kubeConfig == nil { - return "", nil, fmt.Errorf("neither the kube config path nor kube config object was specified") - } - - return l.kubeConfig.Host, l.kubeConfig, nil - } - - kubeConfig, err := clientcmd.BuildConfigFromFlags("", l.configPath) - if err != nil { - return "", nil, err - } - - return kubeConfig.Host, kubeConfig, nil - case ConfigTypeMQTT: - mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(l.configPath) - if err != nil { - return "", nil, err - } - - return mqttOptions.BrokerHost, mqttOptions, nil - case ConfigTypeGRPC: - grpcOptions, err := grpc.BuildGRPCOptionsFromFlags(l.configPath) - if err != nil { - return "", nil, err - } - - return grpcOptions.URL, grpcOptions, nil - } - - return "", nil, fmt.Errorf("unsupported config type %s", l.configType) -} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go deleted file mode 100644 index 0911ee381..000000000 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go +++ /dev/null @@ -1,31 +0,0 @@ -package work - -import ( - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" -) - -// ManifestWorkLister list the ManifestWorks from a ManifestWorkInformer's local cache. -type ManifestWorkLister struct { - Lister workv1lister.ManifestWorkLister -} - -// List returns the ManifestWorks from a ManifestWorkInformer's local cache. -func (l *ManifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { - selector := labels.Everything() - if options.Source != types.SourceAll { - req, err := labels.NewRequirement(common.CloudEventsOriginalSourceLabelKey, selection.Equals, []string{options.Source}) - if err != nil { - return nil, err - } - - selector = labels.NewSelector().Add(*req) - } - - return l.Lister.ManifestWorks(options.ClusterName).List(selector) -} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/mainfiest.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifest.go similarity index 100% rename from vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/mainfiest.go rename to vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifest.go diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go index f7ca9fa98..2b744b425 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go @@ -28,6 +28,11 @@ type ManifestBundle struct { // ManifestBundleStatus represents the data in a cloudevent, it contains the status of a ManifestBundle on a managed // cluster. type ManifestBundleStatus struct { + // ManifestBundle represents the specific of this status. + // This is an optional field, it can be used by a source work client without local cache to + // rebuild a whole work when received the work's status update. + ManifestBundle *ManifestBundle `json:"manifestBundle,omitempty"` + // Conditions contains the different condition statuses for a ManifestBundle on managed cluster. // Valid condition types are: // 1. Applied represents the manifests in a ManifestBundle are applied successfully on a managed cluster. diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go index ed6eb8a46..26bf55e32 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go @@ -3,56 +3,63 @@ package client import ( "context" "fmt" - "strconv" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/watch" "k8s.io/klog/v2" workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" ) // ManifestWorkSourceClient implements the ManifestWorkInterface. type ManifestWorkSourceClient struct { cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork] - watcher *watcher.ManifestWorkWatcher - lister workv1lister.ManifestWorkLister + watcherStore store.WorkClientWatcherStore namespace string sourceID string } var _ workv1client.ManifestWorkInterface = &ManifestWorkSourceClient{} -func NewManifestWorkSourceClient(sourceID string, +func NewManifestWorkSourceClient( + sourceID string, cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork], - watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceClient { + watcherStore store.WorkClientWatcherStore, +) *ManifestWorkSourceClient { return &ManifestWorkSourceClient{ cloudEventsClient: cloudEventsClient, - watcher: watcher, + watcherStore: watcherStore, sourceID: sourceID, } } -func (c *ManifestWorkSourceClient) SetLister(lister workv1lister.ManifestWorkLister) { - c.lister = lister -} - -func (mw *ManifestWorkSourceClient) SetNamespace(namespace string) { - mw.namespace = namespace +func (c *ManifestWorkSourceClient) SetNamespace(namespace string) { + c.namespace = namespace } func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { - _, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) + if manifestWork.Namespace != "" && manifestWork.Namespace != c.namespace { + return nil, errors.NewInvalid(common.ManifestWorkGK, "namespace", field.ErrorList{ + field.Invalid( + field.NewPath("metadata").Child("namespace"), + manifestWork.Namespace, + fmt.Sprintf("does not match the namespace %s", c.namespace), + ), + }) + } + + _, err := c.watcherStore.Get(c.namespace, manifestWork.Name) if err == nil { return nil, errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name) } @@ -69,21 +76,23 @@ func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *wor Action: common.CreateRequestAction, } - generation, err := getWorkGeneration(manifestWork) - if err != nil { + newWork := manifestWork.DeepCopy() + newWork.UID = kubetypes.UID(utils.UID(c.sourceID, c.namespace, newWork.Name)) + newWork.Namespace = c.namespace + newWork.ResourceVersion = getWorkResourceVersion(manifestWork) + + if err := utils.Validate(newWork); err != nil { return nil, err } - newWork := manifestWork.DeepCopy() - newWork.UID = kubetypes.UID(utils.UID(c.sourceID, c.namespace, newWork.Name)) - newWork.Generation = generation - ensureSourceLabel(c.sourceID, newWork) if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { return nil, err } - // add the new work to the ManifestWorkInformer local cache. - c.watcher.Receive(watch.Event{Type: watch.Added, Object: newWork}) + // add the new work to the local cache. + if err := c.watcherStore.Add(newWork); err != nil { + return nil, err + } return newWork.DeepCopy(), nil } @@ -96,7 +105,7 @@ func (c *ManifestWorkSourceClient) UpdateStatus(ctx context.Context, manifestWor } func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - work, err := c.lister.ManifestWorks(c.namespace).Get(name) + work, err := c.watcherStore.Get(c.namespace, name) if errors.IsNotFound(err) { return nil } @@ -120,9 +129,16 @@ func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts return err } - // update the deleting work in the ManifestWorkInformer local cache. - c.watcher.Receive(watch.Event{Type: watch.Modified, Object: deletingWork}) - return nil + if len(work.Finalizers) == 0 { + // the work has no any finalizers, there are two cases in this scenario + // 1) the agent does not start yet, we delete this work from the local cache directly. + // 2) the agent is running, but the status response does not be handled by source yet, + // after the deleted status is back, we need ignore this work in the ManifestWorkSourceHandler. + return c.watcherStore.Delete(deletingWork) + } + + // update the work with deletion timestamp in the local cache. + return c.watcherStore.Update(deletingWork) } func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { @@ -131,21 +147,26 @@ func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts me func (c *ManifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { klog.V(4).Infof("getting manifestwork %s", name) - return c.lister.ManifestWorks(c.namespace).Get(name) + return c.watcherStore.Get(c.namespace, name) } func (c *ManifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { klog.V(4).Infof("list manifestworks") - // send resync request to fetch manifestwork status from agents when the ManifestWorkInformer starts - if err := c.cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + works, err := c.watcherStore.List(opts) + if err != nil { return nil, err } - return &workv1.ManifestWorkList{}, nil + items := []workv1.ManifestWork{} + for _, work := range works { + items = append(items, *work) + } + + return &workv1.ManifestWorkList{Items: items}, nil } func (c *ManifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.watcher, nil + return c.watcherStore, nil } func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { @@ -155,7 +176,7 @@ func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt ku return nil, fmt.Errorf("unsupported to update subresources %v", subresources) } - lastWork, err := c.lister.ManifestWorks(c.namespace).Get(name) + lastWork, err := c.watcherStore.Get(c.namespace, name) if err != nil { return nil, err } @@ -165,11 +186,6 @@ func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt ku return nil, err } - generation, err := getWorkGeneration(patchedWork) - if err != nil { - return nil, err - } - // TODO if we support multiple data type in future, we may need to get the data type from // the cloudevents data type annotation eventType := types.CloudEventsType{ @@ -179,38 +195,32 @@ func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt ku } newWork := patchedWork.DeepCopy() - newWork.Generation = generation - if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + newWork.ResourceVersion = getWorkResourceVersion(patchedWork) + + if err := utils.Validate(newWork); err != nil { return nil, err } - // refresh the work in the ManifestWorkInformer local cache with patched work. - c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) - return newWork.DeepCopy(), nil -} - -// getWorkGeneration retrieves the work generation from the annotation with the key -// "cloudevents.open-cluster-management.io/generation". -// if no generation is set in the annotation, then 0 is returned, which means the message -// broker guarantees the message order. -func getWorkGeneration(work *workv1.ManifestWork) (int64, error) { - generation, ok := work.Annotations[common.CloudEventsGenerationAnnotationKey] - if !ok { - return 0, nil + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err } - generationInt, err := strconv.Atoi(generation) - if err != nil { - return 0, fmt.Errorf("failed to convert generation %s to int: %v", generation, err) + // modify the updated work in the local cache. + if err := c.watcherStore.Update(newWork); err != nil { + return nil, err } - - return int64(generationInt), nil + return newWork.DeepCopy(), nil } -func ensureSourceLabel(sourceID string, work *workv1.ManifestWork) { - if work.Labels == nil { - work.Labels = map[string]string{} +// getWorkResourceVersion retrieves the work generation from the annotation with the key +// "cloudevents.open-cluster-management.io/resourceversion". +// If no value is set in this annotation, then "0" is returned, which means the version of the work will not be +// maintained on source, the message broker guarantees the work update order. +func getWorkResourceVersion(work *workv1.ManifestWork) string { + resourceVersion, ok := work.Annotations[common.CloudEventsResourceVersionAnnotationKey] + if !ok { + return "0" } - work.Labels[common.CloudEventsOriginalSourceLabelKey] = sourceID + return resourceVersion } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go index f19e70b11..8ff7dc7de 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go @@ -1,7 +1,9 @@ package codec import ( + "encoding/json" "fmt" + "strconv" cloudevents "github.com/cloudevents/sdk-go/v2" cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" @@ -14,6 +16,9 @@ import ( "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" ) +// ExtensionWorkMeta is an extension attribute for work meta data. +const ExtensionWorkMeta = "metadata" + // ManifestBundleCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for a source. type ManifestBundleCodec struct{} @@ -32,11 +37,24 @@ func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) } + resourceVersion, err := strconv.Atoi(work.ResourceVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert resource version %s to int: %v", work.ResourceVersion, err) + } + evt := types.NewEventBuilder(source, eventType). WithClusterName(work.Namespace). WithResourceID(string(work.UID)). - WithResourceVersion(work.Generation). + WithResourceVersion(int64(resourceVersion)). NewEvent() + + // set the work's meta data to its cloud event + metaJson, err := json.Marshal(work.ObjectMeta) + if err != nil { + return nil, err + } + evt.SetExtension(ExtensionWorkMeta, string(metaJson)) + if !work.DeletionTimestamp.IsZero() { evt.SetExtension(types.ExtensionDeletionTimestamp, work.DeletionTimestamp.Time) return &evt, nil @@ -77,12 +95,28 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) } + metaObj := metav1.ObjectMeta{} + + // the agent sends the work meta data back, restore the meta to the received work, otherwise only set the + // UID and ResourceVersion to the received work, for the work's other meta data will be got from the work + // client local cache. + if workMetaExtension, ok := evtExtensions[ExtensionWorkMeta]; ok { + metaJson, err := cloudeventstypes.ToString(workMetaExtension) + if err != nil { + return nil, err + } + + if err := json.Unmarshal([]byte(metaJson), &metaObj); err != nil { + return nil, err + } + } + + metaObj.UID = kubetypes.UID(resourceID) + metaObj.ResourceVersion = fmt.Sprintf("%d", resourceVersion) + work := &workv1.ManifestWork{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - UID: kubetypes.UID(resourceID), - ResourceVersion: fmt.Sprintf("%d", resourceVersion), - }, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metaObj, } manifestStatus := &payload.ManifestBundleStatus{} @@ -90,6 +124,13 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) } + // the agent sends the work spec back, restore it + if manifestStatus.ManifestBundle != nil { + work.Spec.Workload.Manifests = manifestStatus.ManifestBundle.Manifests + work.Spec.DeleteOption = manifestStatus.ManifestBundle.DeleteOption + work.Spec.ManifestConfigs = manifestStatus.ManifestBundle.ManifestConfigs + } + work.Status = workv1.ManifestWorkStatus{ Conditions: manifestStatus.Conditions, ResourceStatus: workv1.ManifestResourceStatus{ diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go deleted file mode 100644 index 0ad31f15d..000000000 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go +++ /dev/null @@ -1,167 +0,0 @@ -package handler - -import ( - "fmt" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - kubetypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" - - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" -) - -const ManifestWorkFinalizer = "cloudevents.open-cluster-management.io/manifest-work-cleanup" - -type ManifestWorkSourceHandler struct { - works workqueue.RateLimitingInterface - lister workv1lister.ManifestWorkLister - watcher *watcher.ManifestWorkWatcher -} - -// NewManifestWorkSourceHandler returns a ResourceHandler for a ManifestWork source client. It sends the kube events -// with ManifestWorWatcher after CloudEventSourceClient received the ManifestWork status from agent, then the -// ManifestWorkInformer handles the kube events in its local cache. -func NewManifestWorkSourceHandler(lister workv1lister.ManifestWorkLister, watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceHandler { - return &ManifestWorkSourceHandler{ - works: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "manifestwork-source-handler"), - lister: lister, - watcher: watcher, - } -} - -func (h *ManifestWorkSourceHandler) Run(stopCh <-chan struct{}) { - defer h.works.ShutDown() - - // start a goroutine to handle the works from the queue - // the .Until will re-kick the runWorker one second after the runWorker completes - go wait.Until(h.runWorker, time.Second, stopCh) - - // wait until we're told to stop - <-stopCh -} - -func (h *ManifestWorkSourceHandler) HandlerFunc() generic.ResourceHandler[*workv1.ManifestWork] { - return func(action types.ResourceAction, obj *workv1.ManifestWork) error { - switch action { - case types.StatusModified: - h.works.Add(obj) - default: - return fmt.Errorf("unsupported resource action %s", action) - } - return nil - } -} - -func (h *ManifestWorkSourceHandler) runWorker() { - // hot loop until we're told to stop. processNextEvent will automatically wait until there's work available, so - // we don't worry about secondary waits - for h.processNextWork() { - } -} - -// processNextWork deals with one key off the queue. -func (h *ManifestWorkSourceHandler) processNextWork() bool { - // pull the next event item from queue. - // events queue blocks until it can return an item to be processed - key, quit := h.works.Get() - if quit { - // the current queue is shutdown and becomes empty, quit this process - return false - } - defer h.works.Done(key) - - if err := h.handleWork(key.(*workv1.ManifestWork)); err != nil { - // we failed to handle the work, we should requeue the item to work on later - // this method will add a backoff to avoid hotlooping on particular items - h.works.AddRateLimited(key) - return true - } - - // we handle the event successfully, tell the queue to stop tracking history for this event - h.works.Forget(key) - return true -} - -func (h *ManifestWorkSourceHandler) handleWork(work *workv1.ManifestWork) error { - lastWork := h.getWorkByUID(work.UID) - if lastWork == nil { - // the work is not found, this may be the client is restarted and the local cache is not ready, requeue this - // work - return errors.NewNotFound(common.ManifestWorkGR, string(work.UID)) - } - - updatedWork := lastWork.DeepCopy() - if meta.IsStatusConditionTrue(work.Status.Conditions, common.ManifestsDeleted) { - updatedWork.Finalizers = []string{} - h.watcher.Receive(watch.Event{Type: watch.Deleted, Object: updatedWork}) - return nil - } - - resourceVersion, err := strconv.Atoi(work.ResourceVersion) - if err != nil { - klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err) - return nil - } - - if int64(resourceVersion) > lastWork.Generation { - klog.Warningf("the work %s/%s resource version %d is great than its generation %d, ignore", - lastWork.Namespace, lastWork.Name, resourceVersion, work.Generation) - return nil - } - - // no status change - if equality.Semantic.DeepEqual(lastWork.Status, work.Status) { - return nil - } - - // the work has been handled by agent, we ensure a finalizer on the work - updatedWork.Finalizers = ensureFinalizers(updatedWork.Finalizers) - updatedWork.Status = work.Status - h.watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) - return nil -} - -func (h *ManifestWorkSourceHandler) getWorkByUID(uid kubetypes.UID) *workv1.ManifestWork { - works, err := h.lister.List(labels.Everything()) - if err != nil { - klog.Errorf("failed to lists works, %v", err) - return nil - } - - for _, work := range works { - if work.UID == uid { - return work - } - } - - return nil -} - -func ensureFinalizers(workFinalizers []string) []string { - has := false - for _, f := range workFinalizers { - if f == ManifestWorkFinalizer { - has = true - break - } - } - - if !has { - workFinalizers = append(workFinalizers, ManifestWorkFinalizer) - } - - return workFinalizers -} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister/lister.go new file mode 100644 index 000000000..f939beac7 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister/lister.go @@ -0,0 +1,32 @@ +package lister + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" +) + +// WatcherStoreLister list the ManifestWorks from the WorkClientWatcherStore. +type WatcherStoreLister struct { + store store.WorkClientWatcherStore +} + +func NewWatcherStoreLister(store store.WorkClientWatcherStore) *WatcherStoreLister { + return &WatcherStoreLister{ + store: store, + } +} + +// List returns the ManifestWorks from the WorkClientWatcherCache with list options. +func (l *WatcherStoreLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { + opts := metav1.ListOptions{} + if options.ClusterName != types.ClusterAll { + opts.FieldSelector = fmt.Sprintf("metadata.namespace=%s", options.ClusterName) + } + + return l.store.List(opts) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/base.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/base.go new file mode 100644 index 000000000..5e21e7167 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/base.go @@ -0,0 +1,256 @@ +package store + +import ( + "fmt" + "strconv" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + workv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" +) + +const ManifestWorkFinalizer = "cloudevents.open-cluster-management.io/manifest-work-cleanup" + +type baseStore struct { + sync.RWMutex + + result chan watch.Event + done chan struct{} + + store cache.Store + + initiated bool + + // a queue to save the received work events + receivedWorks workqueue.RateLimitingInterface +} + +// ResultChan implements watch interface. +func (b *baseStore) ResultChan() <-chan watch.Event { + return b.result +} + +// Stop implements watch interface. +func (b *baseStore) Stop() { + // Call Close() exactly once by locking and setting a flag. + b.Lock() + defer b.Unlock() + + // closing a closed channel always panics, therefore check before closing + select { + case <-b.done: + close(b.result) + default: + close(b.done) + } +} + +// List the works from the store with the list options +func (b *baseStore) List(opts metav1.ListOptions) ([]*workv1.ManifestWork, error) { + b.RLock() + defer b.RUnlock() + + return utils.ListWorksWithOptions(b.store, opts) +} + +// Get a works from the store +func (b *baseStore) Get(namespace, name string) (*workv1.ManifestWork, error) { + b.RLock() + defer b.RUnlock() + + obj, exists, err := b.store.GetByKey(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return nil, err + } + + if !exists { + return nil, errors.NewNotFound(common.ManifestWorkGR, name) + } + + work, ok := obj.(*workv1.ManifestWork) + if !ok { + return nil, fmt.Errorf("unknown type %T", obj) + } + + return work, nil +} + +// List all of works from the store +func (b *baseStore) ListAll() ([]*workv1.ManifestWork, error) { + b.RLock() + defer b.RUnlock() + + works := []*workv1.ManifestWork{} + for _, obj := range b.store.List() { + if work, ok := obj.(*workv1.ManifestWork); ok { + works = append(works, work) + } + } + + return works, nil +} + +func (b *baseStore) HandleReceivedWork(action types.ResourceAction, work *workv1.ManifestWork) error { + switch action { + case types.StatusModified: + b.receivedWorks.Add(work) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + return nil +} + +// workProcessor process the received works from given work queue with a specific store +type workProcessor struct { + works workqueue.RateLimitingInterface + store WorkClientWatcherStore +} + +func newWorkProcessor(works workqueue.RateLimitingInterface, store WorkClientWatcherStore) *workProcessor { + return &workProcessor{ + works: works, + store: store, + } +} + +func (b *workProcessor) run(stopCh <-chan struct{}) { + defer b.works.ShutDown() + + // start a goroutine to handle the works from the queue + // the .Until will re-kick the runWorker one second after the runWorker completes + go wait.Until(b.runWorker, time.Second, stopCh) + + // wait until we're told to stop + <-stopCh +} + +func (b *workProcessor) runWorker() { + // hot loop until we're told to stop. processNextEvent will automatically wait until there's work available, so + // we don't worry about secondary waits + for b.processNextWork() { + } +} + +// processNextWork deals with one key off the queue. +func (b *workProcessor) processNextWork() bool { + // pull the next event item from queue. + // events queue blocks until it can return an item to be processed + key, quit := b.works.Get() + if quit { + // the current queue is shutdown and becomes empty, quit this process + return false + } + defer b.works.Done(key) + + if err := b.handleWork(key.(*workv1.ManifestWork)); err != nil { + // we failed to handle the work, we should requeue the item to work on later + // this method will add a backoff to avoid hotlooping on particular items + b.works.AddRateLimited(key) + return true + } + + // we handle the event successfully, tell the queue to stop tracking history for this event + b.works.Forget(key) + return true +} + +func (b *workProcessor) handleWork(work *workv1.ManifestWork) error { + lastWork := b.getWork(work.UID) + if lastWork == nil { + // the work is not found from the local cache and it has been deleted by the agent, + // ignore this work. + if meta.IsStatusConditionTrue(work.Status.Conditions, common.ManifestsDeleted) { + return nil + } + + // the work is not found, there are two cases: + // 1) the source is restarted and the local cache is not ready, requeue this work. + // 2) (TODO) during the source restart, the work is deleted forcibly, we may need an + // eviction mechanism for this. + return errors.NewNotFound(common.ManifestWorkGR, string(work.UID)) + } + + updatedWork := lastWork.DeepCopy() + if meta.IsStatusConditionTrue(work.Status.Conditions, common.ManifestsDeleted) { + updatedWork.Finalizers = []string{} + // delete the work from the local cache. + return b.store.Delete(updatedWork) + } + + lastResourceVersion, err := strconv.Atoi(lastWork.ResourceVersion) + if err != nil { + klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err) + return nil + } + + resourceVersion, err := strconv.Atoi(work.ResourceVersion) + if err != nil { + klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err) + return nil + } + + // the current work's version is maintained on source and the agent's work is newer than source, ignore + if lastResourceVersion != 0 && resourceVersion > lastResourceVersion { + klog.Warningf("the work %s/%s resource version %d is great than its generation %d, ignore", + lastWork.Namespace, lastWork.Name, resourceVersion, lastResourceVersion) + return nil + } + + // no status change + if equality.Semantic.DeepEqual(lastWork.Status, work.Status) { + return nil + } + + // the work has been handled by agent, we ensure a finalizer on the work + updatedWork.Finalizers = ensureFinalizers(updatedWork.Finalizers) + updatedWork.Status = work.Status + // update the work with status in the local cache. + return b.store.Update(updatedWork) +} + +func (b *workProcessor) getWork(uid kubetypes.UID) *workv1.ManifestWork { + works, err := b.store.ListAll() + if err != nil { + klog.Errorf("failed to lists works, %v", err) + return nil + } + + for _, work := range works { + if work.UID == uid { + return work + } + } + + return nil +} + +func ensureFinalizers(workFinalizers []string) []string { + has := false + for _, f := range workFinalizers { + if f == ManifestWorkFinalizer { + has = true + break + } + } + + if !has { + workFinalizers = append(workFinalizers, ManifestWorkFinalizer) + } + + return workFinalizers +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/informer.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/informer.go new file mode 100644 index 000000000..f00f2f617 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/informer.go @@ -0,0 +1,148 @@ +package store + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +// SourceInformerWatcherStore extends the baseStore. +// It gets/lists the works from the given informer store and send +// the work add/update/delete event to the watch channel directly. +// +// It is used for building ManifestWork source client. +type SourceInformerWatcherStore struct { + baseStore +} + +var _ watch.Interface = &SourceInformerWatcherStore{} +var _ WorkClientWatcherStore = &SourceInformerWatcherStore{} + +func NewSourceInformerWatcherStore(ctx context.Context) *SourceInformerWatcherStore { + s := &SourceInformerWatcherStore{ + baseStore: baseStore{ + result: make(chan watch.Event), + done: make(chan struct{}), + receivedWorks: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "informer-watcher-store"), + }, + } + + // start a goroutine to process the received work events from the work queue with current store. + go newWorkProcessor(s.baseStore.receivedWorks, s).run(ctx.Done()) + + return s +} + +func (s *SourceInformerWatcherStore) Add(work *workv1.ManifestWork) error { + s.result <- watch.Event{Type: watch.Added, Object: work} + return nil +} + +func (s *SourceInformerWatcherStore) Update(work *workv1.ManifestWork) error { + s.result <- watch.Event{Type: watch.Modified, Object: work} + return nil +} + +func (s *SourceInformerWatcherStore) Delete(work *workv1.ManifestWork) error { + s.result <- watch.Event{Type: watch.Deleted, Object: work} + return nil +} + +func (s *SourceInformerWatcherStore) HasInitiated() bool { + return s.initiated +} + +func (s *SourceInformerWatcherStore) SetStore(store cache.Store) { + s.store = store + s.initiated = true +} + +// AgentInformerWatcherStore extends the baseStore. +// It gets/lists the works from the given informer store and send +// the work add/update/delete event to the watch channel directly. +// +// It is used for building ManifestWork agent client. +type AgentInformerWatcherStore struct { + baseStore +} + +var _ watch.Interface = &AgentInformerWatcherStore{} +var _ WorkClientWatcherStore = &AgentInformerWatcherStore{} + +func NewAgentInformerWatcherStore() *AgentInformerWatcherStore { + return &AgentInformerWatcherStore{ + baseStore: baseStore{ + result: make(chan watch.Event), + done: make(chan struct{}), + }, + } +} + +func (s *AgentInformerWatcherStore) Add(work *workv1.ManifestWork) error { + s.result <- watch.Event{Type: watch.Added, Object: work} + return nil +} + +func (s *AgentInformerWatcherStore) Update(work *workv1.ManifestWork) error { + s.result <- watch.Event{Type: watch.Modified, Object: work} + return nil +} + +func (s *AgentInformerWatcherStore) Delete(work *workv1.ManifestWork) error { + s.result <- watch.Event{Type: watch.Deleted, Object: work} + return nil +} + +func (s *AgentInformerWatcherStore) HandleReceivedWork(action types.ResourceAction, work *workv1.ManifestWork) error { + switch action { + case types.Added: + return s.Add(work.DeepCopy()) + case types.Modified: + lastWork, err := s.Get(work.Namespace, work.Name) + if err != nil { + return err + } + + updatedWork := work.DeepCopy() + + // restore the fields that are maintained by local agent + updatedWork.Labels = lastWork.Labels + updatedWork.Annotations = lastWork.Annotations + updatedWork.Finalizers = lastWork.Finalizers + updatedWork.Status = lastWork.Status + + return s.Update(updatedWork) + case types.Deleted: + // the manifestwork is deleting on the source, we just update its deletion timestamp. + lastWork, err := s.Get(work.Namespace, work.Name) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + updatedWork := lastWork.DeepCopy() + updatedWork.DeletionTimestamp = work.DeletionTimestamp + return s.Update(updatedWork) + default: + return fmt.Errorf("unsupported resource action %s", action) + } +} + +func (s *AgentInformerWatcherStore) HasInitiated() bool { + return s.initiated +} + +func (s *AgentInformerWatcherStore) SetStore(store cache.Store) { + s.store = store + s.initiated = true +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/interface.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/interface.go new file mode 100644 index 000000000..f5f72d1e3 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/interface.go @@ -0,0 +1,75 @@ +package store + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +const syncedPollPeriod = 100 * time.Millisecond + +// StoreInitiated is a function that can be used to determine if an informer has synced. +// This is useful for determining if caches have synced. +type StoreInitiated func() bool + +// WorkClientWatcherStore extends the watch interface with a work store. +type WorkClientWatcherStore interface { + watch.Interface + + // HandleReceivedWork handles the client received work events. + HandleReceivedWork(action types.ResourceAction, work *workv1.ManifestWork) error + + // Add will be called by work client when adding works. The implementation is based on the specific + // watcher store, in some case, it does not need to update a store, but just send a watch event. + Add(work *workv1.ManifestWork) error + + // Update will be called by work client when updating works. The implementation is based on the specific + // watcher store, in some case, it does not need to update a store, but just send a watch event. + Update(work *workv1.ManifestWork) error + + // Delete will be called by work client when deleting works. The implementation is based on the specific + // watcher store, in some case, it does not need to update a store, but just send a watch event. + Delete(work *workv1.ManifestWork) error + + // List returns the works from store with list options + List(opts metav1.ListOptions) ([]*workv1.ManifestWork, error) + + // ListAll list all of the works from store + ListAll() ([]*workv1.ManifestWork, error) + + // Get returns a work from store with work namespace and name + Get(namespace, name string) (*workv1.ManifestWork, error) + + // HasInitiated marks the store has been initiated, A resync may be required after the store is initiated + // when building a work client. + HasInitiated() bool +} + +func WaitForStoreInit(ctx context.Context, cacheSyncs ...StoreInitiated) bool { + err := wait.PollUntilContextCancel( + ctx, + syncedPollPeriod, + true, + func(ctx context.Context) (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + ) + if err != nil { + klog.Errorf("stop WaitForStoreInit, %v", err) + return false + } + + return true +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/local.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/local.go new file mode 100644 index 000000000..17e6579a9 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/local.go @@ -0,0 +1,211 @@ +package store + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + workv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" +) + +// ListLocalWorksFunc loads the works from the local environment. +type ListLocalWorksFunc func(ctx context.Context) ([]*workv1.ManifestWork, error) + +type watchEvent struct { + Key string + Type watch.EventType +} + +var _ watch.Interface = &SourceLocalWatcherStore{} +var _ WorkClientWatcherStore = &SourceLocalWatcherStore{} + +// SourceLocalWatcherStore caches the works in this local store and provide the watch ability by watch event channel. +// +// It is used for building ManifestWork source client. +type SourceLocalWatcherStore struct { + baseStore + eventQueue cache.Queue +} + +// NewSourceLocalWatcherStore returns a LocalWatcherStore with works that list by ListLocalWorksFunc +func NewSourceLocalWatcherStore(ctx context.Context, listFunc ListLocalWorksFunc) (*SourceLocalWatcherStore, error) { + works, err := listFunc(ctx) + if err != nil { + return nil, err + } + + // A local store to cache the works + store := cache.NewStore(cache.MetaNamespaceKeyFunc) + for _, work := range works { + if err := utils.Validate(work); err != nil { + return nil, err + } + + if err := store.Add(work.DeepCopy()); err != nil { + return nil, err + } + } + + s := &SourceLocalWatcherStore{ + baseStore: baseStore{ + // A channel for watcher, it's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, so nonbuffered is better. + result: make(chan watch.Event), + // If the watcher is externally stopped there is no receiver anymore + // and the send operations on the result channel, especially the + // error reporting might block forever. + // Therefore a dedicated stop channel is used to resolve this blocking. + done: make(chan struct{}), + + store: store, + initiated: true, + + // A queue to save the received work events, it helps us retry events + // where errors occurred while processing + receivedWorks: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "local-watcher-store"), + }, + + // A queue to save the work client send events, if run a client without a watcher, + // it will block the client, this queue helps to resolve this blocking. + // Only save the latest event for a work. + eventQueue: cache.NewFIFO(func(obj interface{}) (string, error) { + evt, ok := obj.(*watchEvent) + if !ok { + return "", fmt.Errorf("unknown object type %T", obj) + } + + return evt.Key, nil + }), + } + + // start a goroutine to process the received work events from the work queue with current store. + go newWorkProcessor(s.baseStore.receivedWorks, s).run(ctx.Done()) + + // start a goroutine to handle the events that are produced by work client + go wait.Until(s.processLoop, time.Second, ctx.Done()) + + return s, nil +} + +// Add a work to the cache and send an event to the event queue +func (s *SourceLocalWatcherStore) Add(work *workv1.ManifestWork) error { + s.Lock() + defer s.Unlock() + + if err := s.store.Add(work); err != nil { + return err + } + + return s.eventQueue.Add(&watchEvent{Key: key(work), Type: watch.Added}) +} + +// Update a work in the cache and send an event to the event queue +func (s *SourceLocalWatcherStore) Update(work *workv1.ManifestWork) error { + s.Lock() + defer s.Unlock() + + if err := s.store.Update(work); err != nil { + return err + } + + return s.eventQueue.Update(&watchEvent{Key: key(work), Type: watch.Modified}) +} + +// Delete a work from the cache and send an event to the event queue +func (s *SourceLocalWatcherStore) Delete(work *workv1.ManifestWork) error { + s.Lock() + defer s.Unlock() + + if err := s.store.Delete(work); err != nil { + return err + } + + return s.eventQueue.Update(&watchEvent{Key: key(work), Type: watch.Deleted}) +} + +// processLoop drains the work event queue and send the event to the watch channel. +func (s *SourceLocalWatcherStore) processLoop() { + for { + // this will be blocked until the event queue has events + obj, err := s.eventQueue.Pop(func(interface{}, bool) error { + // do nothing + return nil + }) + if err != nil { + if err == cache.ErrFIFOClosed { + return + } + + klog.Warningf("failed to pop the %v requeue it, %v", obj, err) + // this is the safe way to re-enqueue. + if err := s.eventQueue.AddIfNotPresent(obj); err != nil { + klog.Errorf("failed to requeue the obj %v, %v", obj, err) + return + } + } + + evt, ok := obj.(*watchEvent) + if !ok { + klog.Errorf("unknown the object type %T from the event queue", obj) + return + } + + obj, exists, err := s.store.GetByKey(evt.Key) + if err != nil { + klog.Errorf("failed to get the work %s, %v", evt.Key, err) + return + } + + if !exists { + if evt.Type == watch.Deleted { + namespace, name, err := cache.SplitMetaNamespaceKey(evt.Key) + if err != nil { + klog.Errorf("unexpected event key %s, %v", evt.Key, err) + return + } + + // the work has been deleted, return a work only with its namespace and name + // this will be blocked until this event is consumed + s.result <- watch.Event{ + Type: watch.Deleted, + Object: &workv1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, + } + return + } + + klog.Errorf("the work %s does not exist in the cache", evt.Key) + return + } + + work, ok := obj.(*workv1.ManifestWork) + if !ok { + klog.Errorf("unknown the object type %T from the cache", obj) + return + } + + // this will be blocked until this event is consumed + s.result <- watch.Event{Type: evt.Type, Object: work} + } +} + +func (c *SourceLocalWatcherStore) HasInitiated() bool { + return c.initiated +} + +func key(work *workv1.ManifestWork) string { + return work.Namespace + "/" + work.Name +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go index 51b0b3354..886cab552 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go @@ -6,8 +6,19 @@ import ( jsonpatch "github.com/evanphx/json-patch" "github.com/google/uuid" + + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/tools/cache" + workv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" ) @@ -53,3 +64,92 @@ func UID(sourceID, namespace, name string) string { id := fmt.Sprintf("%s-%s-%s-%s", sourceID, common.ManifestWorkGR.String(), namespace, name) return uuid.NewSHA1(uuid.NameSpaceOID, []byte(id)).String() } + +// ListWorksWithOptions retrieves the manifestworks from store which matches the options. +func ListWorksWithOptions(store cache.Store, opts metav1.ListOptions) ([]*workv1.ManifestWork, error) { + var err error + + labelSelector := labels.Everything() + fieldSelector := fields.Everything() + + if len(opts.LabelSelector) != 0 { + labelSelector, err = labels.Parse(opts.LabelSelector) + if err != nil { + return nil, fmt.Errorf("invalid labels selector %q: %v", opts.LabelSelector, err) + } + } + + if len(opts.FieldSelector) != 0 { + fieldSelector, err = fields.ParseSelector(opts.FieldSelector) + if err != nil { + return nil, fmt.Errorf("invalid fields selector %q: %v", opts.FieldSelector, err) + } + } + + works := []*workv1.ManifestWork{} + // list with labels + if err := cache.ListAll(store, labelSelector, func(obj interface{}) { + work, ok := obj.(*workv1.ManifestWork) + if !ok { + return + } + + workFieldSet := fields.Set{ + "metadata.name": work.Name, + "metadata.namespace": work.Namespace, + } + + if !fieldSelector.Matches(workFieldSet) { + return + } + + works = append(works, work) + }); err != nil { + return nil, err + } + + return works, nil +} + +func Validate(work *workv1.ManifestWork) error { + fldPath := field.NewPath("metadata") + errs := field.ErrorList{} + + if work.UID == "" { + errs = append(errs, field.Required(fldPath.Child("uid"), "field not set")) + } + + if work.ResourceVersion == "" { + errs = append(errs, field.Required(fldPath.Child("resourceVersion"), "field not set")) + } + + if work.Name == "" { + errs = append(errs, field.Required(fldPath.Child("name"), "field not set")) + } + + for _, msg := range validation.ValidateNamespaceName(work.Name, false) { + errs = append(errs, field.Invalid(fldPath.Child("name"), work.Name, msg)) + } + + if work.Namespace == "" { + errs = append(errs, field.Required(fldPath.Child("namespace"), "field not set")) + } + + for _, msg := range validation.ValidateNamespaceName(work.Namespace, false) { + errs = append(errs, field.Invalid(fldPath.Child("namespace"), work.Namespace, msg)) + } + + errs = append(errs, validation.ValidateAnnotations(work.Annotations, fldPath.Child("annotations"))...) + errs = append(errs, validation.ValidateFinalizers(work.Finalizers, fldPath.Child("finalizers"))...) + errs = append(errs, metav1validation.ValidateLabels(work.Labels, fldPath.Child("labels"))...) + + if err := validator.ManifestValidator.ValidateManifests(work.Spec.Workload.Manifests); err != nil { + errs = append(errs, field.Invalid(field.NewPath("spec"), "spec", err.Error())) + } + + if len(errs) == 0 { + return nil + } + + return fmt.Errorf(errs.ToAggregate().Error()) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go deleted file mode 100644 index d22a7512a..000000000 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go +++ /dev/null @@ -1,64 +0,0 @@ -package watcher - -import ( - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/klog/v2" -) - -// ManifestWorkWatcher implements the watch.Interface. It returns a chan which will receive all the events. -type ManifestWorkWatcher struct { - sync.Mutex - - result chan watch.Event - done chan struct{} -} - -var _ watch.Interface = &ManifestWorkWatcher{} - -func NewManifestWorkWatcher() *ManifestWorkWatcher { - mw := &ManifestWorkWatcher{ - // It's easy for a consumer to add buffering via an extra - // goroutine/channel, but impossible for them to remove it, - // so nonbuffered is better. - result: make(chan watch.Event), - // If the watcher is externally stopped there is no receiver anymore - // and the send operations on the result channel, especially the - // error reporting might block forever. - // Therefore a dedicated stop channel is used to resolve this blocking. - done: make(chan struct{}), - } - - return mw -} - -// ResultChan implements Interface. -func (mw *ManifestWorkWatcher) ResultChan() <-chan watch.Event { - return mw.result -} - -// Stop implements Interface. -func (mw *ManifestWorkWatcher) Stop() { - // Call Close() exactly once by locking and setting a flag. - mw.Lock() - defer mw.Unlock() - // closing a closed channel always panics, therefore check before closing - select { - case <-mw.done: - close(mw.result) - default: - close(mw.done) - } -} - -// Receive a event from the work client and sends down the result channel. -func (mw *ManifestWorkWatcher) Receive(evt watch.Event) { - if klog.V(4).Enabled() { - obj, _ := meta.Accessor(evt.Object) - klog.V(4).Infof("Receive the event %v for %v", evt.Type, obj.GetName()) - } - - mw.result <- evt -}