diff --git a/.clang-tidy b/.clang-tidy index 446bc659f..71c2b9b31 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -25,6 +25,8 @@ Checks: "*,\ -cppcoreguidelines-macro-usage,\ -cppcoreguidelines-avoid-magic-numbers,\ -cppcoreguidelines-avoid-non-const-global-variables,\ + -cppcoreguidelines-avoid-const-or-ref-data-members,\ + -cppcoreguidelines-avoid-do-while,\ -cppcoreguidelines-pro-type-vararg,\ -cppcoreguidelines-pro-bounds-array-to-pointer-decay,\ -cppcoreguidelines-pro-bounds-pointer-arithmetic,\ diff --git a/.github/workflows/kafka_api_bazel_build.yml b/.github/workflows/kafka_api_bazel_build.yml index 47c37ddc2..f32ec5959 100644 --- a/.github/workflows/kafka_api_bazel_build.yml +++ b/.github/workflows/kafka_api_bazel_build.yml @@ -7,9 +7,9 @@ on: - main env: - KAFKA_SRC_LINK: https://downloads.apache.org/kafka/3.3.1/kafka_2.13-3.3.1.tgz + KAFKA_SRC_LINK: https://archive.apache.org/dist/kafka/3.3.1/kafka_2.13-3.3.1.tgz CPU_CORE_NUM: 2 - LIBRDKAFKA_TAG: v1.9.2 + LIBRDKAFKA_TAG: v2.0.2 jobs: kafka-api-bazel-build: diff --git a/.github/workflows/kafka_api_ci_tests.yml b/.github/workflows/kafka_api_ci_tests.yml index db9e8debb..1ca2a5e40 100644 --- a/.github/workflows/kafka_api_ci_tests.yml +++ b/.github/workflows/kafka_api_ci_tests.yml @@ -7,9 +7,9 @@ on: - main env: - KAFKA_SRC_LINK: https://downloads.apache.org/kafka/3.3.1/kafka_2.13-3.3.1.tgz + KAFKA_SRC_LINK: https://archive.apache.org/dist/kafka/3.3.1/kafka_2.13-3.3.1.tgz CPU_CORE_NUM: 2 - LIBRDKAFKA_TAG: v1.9.2 + LIBRDKAFKA_TAG: v2.0.2 BUILD_SUB_DIR: builds/sub-build jobs: diff --git a/.github/workflows/kafka_api_demo_conan_build.yml b/.github/workflows/kafka_api_demo_conan_build.yml index 7192d3c8c..8d13a0cd4 100644 --- a/.github/workflows/kafka_api_demo_conan_build.yml +++ b/.github/workflows/kafka_api_demo_conan_build.yml @@ -26,19 +26,9 @@ jobs: steps: - uses: actions/checkout@v2 - - name: Prepare (non-windows) - if: ${{!contains(matrix.os, 'windows')}} - run: | - if [[ ${OS_VERSION} == 'macos'* ]]; then - brew install conan - else - pip3 install conan - fi - - - name: Prepare (windows) - if: ${{contains(matrix.os, 'windows')}} + - name: Prepare run: | - pip3 install conan + pip3 install conan==1.59.0 - name: Build (non-windows) if: ${{!contains(matrix.os, 'windows')}} @@ -52,11 +42,8 @@ jobs: cmake .. -G "Unix Makefiles" cmake --build . - bin/kafka_sync_producer - bin/kafka_async_producer_copy_payload - bin/kafka_async_producer_not_copy_payload - bin/kafka_auto_commit_consumer - bin/kafka_manual_commit_consumer + bin/kafka_producer + bin/kafka_consumer - name: Build (windows) if: contains(matrix.os, 'windows') @@ -70,9 +57,6 @@ jobs: cmake .. cmake --build . - bin/kafka_sync_producer.exe - bin/kafka_async_producer_copy_payload.exe - bin/kafka_async_producer_not_copy_payload.exe - bin/kafka_auto_commit_consumer.exe - bin/kafka_manual_commit_consumer.exe + bin/kafka_producer.exe + bin/kafka_consumer.exe diff --git a/CMakeLists.txt b/CMakeLists.txt index 4de8a6003..11f81aaeb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION "3.8") +cmake_minimum_required(VERSION "3.20") project("Modern C++ Kafka API" VERSION 1.0.0) diff --git a/README.md b/README.md index b21c280d7..dd0f6a7bf 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,13 @@ -# About the *Modern C++ Kafka API* +About the *Modern C++ Kafka API* +================================= ![Lifecycle Active](https://badgen.net/badge/Lifecycle/Active/green) -## Introduction -The [Modern C++ Kafka API](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/annotated.html) is a layer of C++ wrapper based on [librdkafka](https://github.com/edenhill/librdkafka) (the C part), with high quality, but more friendly to users. +The [modern-cpp-kafka API](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/annotated.html) is a layer of ***C++*** wrapper based on [librdkafka](https://github.com/confluentinc/librdkafka) (the ***C*** part only), with high quality, but more friendly to users. + +- By now, [modern-cpp-kafka](https://github.com/morganstanley/modern-cpp-kafka) is compatible with [librdkafka v2.0.2](https://github.com/confluentinc/librdkafka/releases/tag/v2.0.2). -- By now, [modern-cpp-kafka](https://github.com/morganstanley/modern-cpp-kafka) is compatible with [librdkafka v1.9.2](https://github.com/edenhill/librdkafka/releases/tag/v1.9.2). ``` KAFKA is a registered trademark of The Apache Software Foundation and @@ -14,23 +15,27 @@ has been licensed for use by modern-cpp-kafka. modern-cpp-kafka has no affiliation with and is not endorsed by The Apache Software Foundation. ``` -## Why it's here + + +# Why it's here The ***librdkafka*** is a robust high performance C/C++ library, widely used and well maintained. -Unfortunately, to maintain C++98 compatibility, the C++ interface of ***librdkafka*** is not quite object-oriented or user-friendly. +Unfortunately, to maintain ***C++98*** compatibility, the ***C++*** interface of ***librdkafka*** is not quite object-oriented or user-friendly. + +Since C++ is evolving quickly, we want to take advantage of new C++ features, thus making life easier for developers. And this led us to create a new C++ API for Kafka clients. + +Eventually, we worked out the ***modern-cpp-kafka***, -- a ***header-only*** library that uses idiomatic ***C++*** features to provide a safe, efficient and easy to use way of producing and consuming Kafka messages. -Since C++ is evolving quickly, we want to take advantage of new C++ features, thus make the life easier for developers. And this led us to create a new C++ API for Kafka clients. -Eventually, we worked out the ***modern-cpp-kafka***, -- a header-only library that uses idiomatic C++ features to provide a safe, efficient and easy to use way of producing and consuming Kafka messages. -## Features +# Features -* Header-only +* __Header-only__ * Easy to deploy, and no extra library required to link -* Ease of Use +* __Ease of Use__ * Interface/Naming matches the Java API @@ -40,7 +45,7 @@ Eventually, we worked out the ***modern-cpp-kafka***, -- a header-only library t * ***librdkafka***'s polling and queue management is now hidden -* Robust +* __Robust__ * Verified with kinds of test cases, which cover many abnormal scenarios (edge cases) @@ -50,181 +55,708 @@ Eventually, we worked out the ***modern-cpp-kafka***, -- a header-only library t * Client failure and taking over, etc. -* Efficient +* __Efficient__ * No extra performance cost (No deep copy introduced internally) * Much better (2~4 times throughput) performance result than those native language (Java/Scala) implementation, in most commonly used cases (message size: 256 B ~ 2 KB) -## Build -* No need to build for installation +# Installation / Requirements + +* Just include the [`include/kafka`](https://github.com/morganstanley/modern-cpp-kafka/tree/main/include/kafka) directory for your project + +* The compiler should support ***C++17*** + + * Or, ***C++14***, but with pre-requirements + + - Need ***boost*** headers (for `boost::optional`) + + - For ***GCC*** compiler, it needs optimization options (e.g. `-O2`) + +* Dependencies + + * [**librdkafka**](https://github.com/confluentinc/librdkafka) headers and library (only the C part) + + - Also see the [requirements from **librdkafka**](https://github.com/confluentinc/librdkafka#requirements) + + * [**rapidjson**](https://github.com/Tencent/rapidjson) headers: only required by `addons/KafkaMetrics.h` + + + +# User Manual + +* [Release Notes](https://github.com/morganstanley/modern-cpp-kafka/releases) + +* [Class List](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/annotated.html) + + +## Properties + +[kafka::Properties Class Reference](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1Properties.html) + +* It is a map which contains all configuration info needed to initialize a Kafka client, and it's **the only** parameter needed for a constructor. + +* The configuration items are ***key-value*** pairs, -- the type of ***key*** is always `std::string`, while the type for a ***value*** could be one of the followings + + * `std::string` + + * Most items are identical with [**librdkafka** configuration](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md) + + * But with exceptions + + * Default value changes + + | Key String | Default | Description | + | ----------- | ------------- | --------------------------------------------------------- | + | `log_level` | `5` | Default was `6` from **librdkafka** | + | `client.id` | random string | No default from **librdkafka** | + | `group.id` | random string | (for `KafkaConsumer` only) No default from **librdkafka** |- + + * Additional options + + | Key String | Default | Description | + | --------------------------- | ------------- | --------------------------------------------------------------------------------------------------- | + | `enable.manual.events.poll` | `false` | To poll the (offset-commit/message-delivery callback) events manually | + | `max.poll.records` | `500` | (for `KafkaConsumer` only) The maximum number of records that a single call to `poll()` would return | + + * Ignored options + + | Key String | Explanation | + | --------------------------- | ---------------------------------------------------------------------------------- | + | `enable.auto.offset.store` | ***modern-cpp-kafka*** will save the offsets in its own way | + | `auto.commit.interval.ms` | ***modern-cpp-kafka*** will only commit the offsets within each `poll()` operation | + + * `std::function<...>` + + * For kinds of callbacks + + | Key String | Value Type | + | ------------------------------ | --------------------------------------------------------------------------------------------- | + | `log_cb` | `LogCallback` (`std::function`) | + | `error_cb` | `ErrorCallback` (`std::function`) | + | `stats_cb` | `StatsCallback` (`std::function`) | + | `oauthbearer_token_refresh_cb` | `OauthbearerTokenRefreshCallback` (`std::function`) | + + * `Interceptors` + + * To intercept thread start/exit events, etc. + + | Key String | Value Type | + | -------------- | -------------- | + | `interceptors` | `Interceptors` | + +### Examples + +1. + ``` + std::string brokers = "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092"; + + kafka::Properties props ({ + {"bootstrap.servers", {brokers}}, + {"enable.idempotence", {"true"}}, + }); + ``` + +2. + ``` + kafka::Properties props; + props.put("bootstrap.servers", brokers); + props.put("enable.idempotence", "true"); + ``` +* Note: `bootstrap.servers` is the only mandatory property for a Kafka client + + +## KafkaProducer + +[kafka::clients::producer::KafkaProducer Class Reference](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1clients_1_1producer_1_1KafkaProducer.html) + +### A Simple Example + +Here's a very simple example to see how to send a message with a `KafkaProducer`. + +``` +#include + +#include +#include +#include + + +int main() +{ + using namespace kafka; + using namespace kafka::clients::producer; + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + const Properties props({{"bootstrap.servers", brokers}}); + + // Create a producer + KafkaProducer producer(props); + + // Prepare a message + std::cout << "Type message value and hit enter to produce message..." << std::endl; + std::string line; + std::getline(std::cin, line); + + ProducerRecord record(topic, NullKey, Value(line.c_str(), line.size())); + + // Prepare delivery callback + auto deliveryCb = [](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << std::endl; + } + }; + + // Send a message + producer.send(record, deliveryCb); + + // Close the producer explicitly(or not, since RAII will take care of it) + producer.close(); +} +``` + +#### Notes + +* The `send()` is an unblocked operation unless the message buffering queue is full. + +* Make sure the memory block for `ProducerRecord`'s `key` is valid until the `send` is called. -* To build its `tools`/`tests`/`examples`, you should +* Make sure the memory block for `ProducerRecord`'s `value` is valid until the message delivery callback is called (unless the `send` is with option `KafkaProducer::SendOption::ToCopyRecordValue`). - * Specify library locations with environment variables +* It's guaranteed that the message delivery callback would be triggered anyway after `send`, -- a producer would even be waiting for it before close. - * `LIBRDKAFKA_INCLUDE_DIR` -- ***librdkafka*** headers +* At the end, we could close Kafka client (i.e. `KafkaProducer` or `KafkaConsumer`) explicitly, or just leave it to the destructor. - * `LIBRDKAFKA_LIBRARY_DIR` -- ***librdkafka*** libraries +### The Lifecycle of the Message - * `GTEST_ROOT` -- ***googletest*** headers and libraries +The message for the KafkaProducer is called `ProducerRecord`, it contains `Topic`, `Partition` (optional), `Key` and `Value`. Both `Key` & `Value` are `const_buffer`, and since there's no deep-copy for the `Value`, the user should make sure the memory block for the `Value` be valid, until the delivery callback has been executed. - * `BOOST_ROOT` -- ***boost*** headers and libraries +In the previous example, we don't need to worry about the lifecycle of `Value`, since the content of the `line` keeps to be available before closing the producer, and all message delivery callbacks would be triggered before finishing closing the producer. - * `SASL_LIBRARYDIR`/`SASL_LIBRARY` -- if SASL connection support is wanted +#### Example for shared_ptr - * `RAPIDJSON_INCLUDE_DIRS` -- `addons/KafkaMetrics` requires **rapidjson** headers +A trick is capturing the shared pointer (for the memory block of `Value`) in the message delivery callback. - * Create an empty directory for the build, and `cd` to it +``` + std::cout << "Type message value and hit enter to produce message... (empty line to quit)" << std::endl; + + // Get input lines and forward them to Kafka + for (auto line = std::make_shared(); + std::getline(std::cin, *line); + line = std::make_shared()) { + + // Empty line to quit + if (line->empty()) break; + + // Prepare a message + ProducerRecord record(topic, NullKey, Value(line->c_str(), line->size())); + + // Prepare delivery callback + // Note: Here we capture the shared pointer of `line`, which holds the content for `record.value()` + auto deliveryCb = [line](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << std::endl; + } + }; + + // Send the message + producer.send(record, deliveryCb); + } +``` - * Build commands +#### Example for deep-copy - * Type `cmake path-to-project-root` +The option `KafkaProducer::SendOption::ToCopyRecordValue` could be used for `producer.send(...)`, thus the memory block of `record.value()` would be copied into the internal sending buffer. - * Type `make` (could follow build options with `-D`) +``` + std::cout << "Type message value and hit enter to produce message... (empty line to quit)" << std::endl; - * `BUILD_OPTION_USE_ASAN=ON` -- Use Address Sanitizer + // Get input lines and forward them to Kafka + for (std::string line; std::getline(std::cin, line); ) { - * `BUILD_OPTION_USE_TSAN=ON` -- Use Thread Sanitizer + // Empty line to quit + if (line.empty()) break; - * `BUILD_OPTION_USE_UBSAN=ON` -- Use Undefined Behavior Sanitizer + // Prepare a message + ProducerRecord record(topic, NullKey, Value(line.c_str(), line.size())); - * `BUILD_OPTION_CLANG_TIDY=ON` -- Enable clang-tidy checking + // Prepare delivery callback + auto deliveryCb = [](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << std::endl; + } + }; - * `BUILD_OPTION_GEN_DOC=ON` -- Generate documentation as well + // Send the message (deep-copy the payload) + producer.send(record, deliveryCb, KafkaProducer::SendOption::ToCopyRecordValue); + } +``` - * `BUILD_OPTION_DOC_ONLY=ON` -- Only generate documentation +### Embed More Info in a `ProducerRecord` - * `BUILD_OPTION_GEN_COVERAGE=ON` -- Generate test coverage, only support by clang currently +Besides the `payload` (i.e. `value()`), a `ProducerRecord` could also put extra info in its `key()` & `headers()`. - * Type `make install` +`Headers` is a vector of `Header` which contains `kafka::Header::Key` (i.e. `std::string`) and `kafka::Header::Value` (i.e. `const_buffer`). -## Install +#### Example -* Include the `include/kafka` directory in your project +``` + const kafka::Topic topic = "someTopic"; + const kafka::Partition partition = 0; -* To work together with ***modern-cpp-kafka*** API, the compiler should support + const std::string key = "some key"; + const std::string value = "some payload"; - * Option 1: C++17 + const std::string category = "categoryA"; + const std::size_t sessionId = 1; + + { + kafka::clients::producer::ProducerRecord record(topic, + partition, + kafka::Key{key.c_str(), key.size()}, + kafka::Value{value.c_str(), value.size()}); + + record.headers() = {{ + kafka::Header{kafka::Header::Key{"Category"}, kafka::Header::Value{category.c_str(), category.size()}}, + kafka::Header{kafka::Header::Key{"SessionId"}, kafka::Header::Value{&sessionId, sizeof(sessionId)}} + }}; + + std::cout << "ProducerRecord: " << record.toString() << std::endl; + } +``` + +### About `enable.manual.events.poll` + +By default, `KafkaProducer` would be constructed with `enable.manual.events.poll=false` configuration. +That means, a background thread would be created, which keeps polling the events (thus calls the message delivery callbacks) + +Here we have another choice, -- using `enable.manual.events.poll=true`, thus the MessageDelivery callbacks would be called within member function `pollEvents()`. + +* Note: in this case, the send() will be an unblocked operation even if the message buffering queue is full, -- it would throw an exception (or return an error code with the input reference parameter), instead of blocking there. + +#### Example + +``` + // Prepare the configuration (with "enable.manual.events.poll=true") + const Properties props({{"bootstrap.servers", {brokers}}, + {"enable.manual.events.poll", {"true" }}}); + + // Create a producer + KafkaProducer producer(props); + + std::cout << "Type message value and hit enter to produce message... (empty line to finish)" << std::endl; + + // Get all input lines + std::list> messages; + for (auto line = std::make_shared(); std::getline(std::cin, *line) && !line->empty();) { + messages.emplace_back(line); + } + + while (!messages.empty()) { + // Pop out a message to be sent + auto payload = messages.front(); + messages.pop_front(); + + // Prepare the message + ProducerRecord record(topic, NullKey, Value(payload->c_str(), payload->size())); + + // Prepare the delivery callback + // Note: if fails, the message will be pushed back to the sending queue, and then retries later + auto deliveryCb = [payload, &messages](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << ", will be retried later" << std::endl; + messages.emplace_back(payload); + } + }; + + // Send the message + producer.send(record, deliveryCb); + + // Poll events (e.g. message delivery callback) + producer.pollEvents(std::chrono::milliseconds(0)); + } +``` - * Option 2: C++14 (with pre-requirements) +### Error Handling - * Need ***boost*** headers (for `boost::optional`) +[`kafka::Error`](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1Error.html) might occur at different places while sending a message, - * GCC only (with optimization, e.g. -O2) +* A [`kafka::KafkaException`](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1KafkaException.html) would be triggered if `KafkaProducer` fails to call the `send` operation. -## How to Run Tests +* Delivery [`kafka::Error`](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1Error.html) could be fetched via the delivery-callback. -* Unit test (`tests/unit`) +* The `kafka::Error::value()` for failures - * The test could be run with no Kafka cluster depolyed + * Local errors -* Integration test (`tests/integration`) + - `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC` -- The topic doesn't exist - * The test should be run with Kafka cluster depolyed + - `RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION` -- The partition doesn't exist - * The environment variable `KAFKA_BROKER_LIST` should be set + - `RD_KAFKA_RESP_ERR__INVALID_ARG` -- Invalid topic (topic is null or the length is too long (>512)) - * E.g. `export KAFKA_BROKER_LIST=127.0.0.1:29091,127.0.0.1:29092,127.0.0.1:29093` + - `RD_KAFKA_RESP_ERR__MSG_TIMED_OUT` -- No ack received within the time limit -* Robustness test (`tests/robustness`) + - `RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE` -- The message size conflicts with local configuration `message.max.bytes` - * The test should be run with Kafka cluster depolyed locally + * Broker errors - * The environment variable `KAFKA_BROKER_LIST` should be set + - [Error Codes](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) - * The environment variable `KAFKA_BROKER_PIDS` should be set + - Typical errors are - * Make sure the test runner gets the privilege to stop/resume the pids + * Invalid message: `RD_KAFKA_RESP_ERR_CORRUPT_MESSAGE`, `RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE`, `RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS`, `RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT`, `RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE`. - * E.g. `export KAFKA_BROKER_PIDS=61567,61569,61571` + * Topic/Partition not exist: `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART`, -- automatic topic creation is disabled on the broker or the application is specifying a partition that does not exist. + + * Authorization failure: `RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED`, `RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED` + +### Idempotent Producer + +The `enable.idempotence=true` configuration is highly RECOMMENDED. + +#### Example + +``` + kafka::Properties props; + props.put("bootstrap.servers", brokers); + props.put("enable.idempotence", "true"); + + // Create an idempotent producer + kafka::clients::producer::KafkaProducer producer(props); +``` + +* Note: please refer to the [document from **librdkafka**](https://github.com/confluentinc/librdkafka/blob/master/INTRODUCTION.md#idempotent-producer) for more details. + + +## Kafka Consumer + +[kafka::clients::consumer::KafkaConsumer Class Reference](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1clients_1_1consumer_1_1KafkaConsumer.html) + +### A Simple Example + +``` +#include + +#include +#include +#include +#include + +std::atomic_bool running = {true}; + +void stopRunning(int sig) { + if (sig != SIGINT) return; + + if (running) { + running = false; + } else { + // Restore the signal handler, -- to avoid stuck with this handler + signal(SIGINT, SIG_IGN); // NOLINT + } +} + +int main() +{ + using namespace kafka; + using namespace kafka::clients::consumer; + + // Use Ctrl-C to terminate the program + signal(SIGINT, stopRunning); // NOLINT + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + const Properties props({{"bootstrap.servers", {brokers}}}); + + // Create a consumer instance + KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + while (running) { + // Poll messages from Kafka brokers + auto records = consumer.poll(std::chrono::milliseconds(100)); + + for (const auto& record: records) { + if (!record.error()) { + std::cout << "Got a new message..." << std::endl; + std::cout << " Topic : " << record.topic() << std::endl; + std::cout << " Partition: " << record.partition() << std::endl; + std::cout << " Offset : " << record.offset() << std::endl; + std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; + std::cout << " Headers : " << toString(record.headers()) << std::endl; + std::cout << " Key [" << record.key().toString() << "]" << std::endl; + std::cout << " Value [" << record.value().toString() << "]" << std::endl; + } else { + std::cerr << record.toString() << std::endl; + } + } + } + + // No explicit close is needed, RAII will take care of it + consumer.close(); +} +``` -* Additional settings for clients +* By default, the `KafkaConsumer` is constructed with property `enable.auto.commit=true` - * The environment variable `KAFKA_CLIENT_ADDITIONAL_SETTINGS` could be used for customized test environment + * It means it will automatically commit previously polled offsets on each poll (and the final close) operations. - * Especially for Kafka cluster with SASL(or SSL) connections + * Note: the internal offset commit is asynchronous, which is not guaranteed to succeed. Since the operation is supposed to be triggered (again) at a later time (within each `poll`), thus the occasional failure doesn't matter. - * E.g. `export KAFKA_CLIENT_ADDITIONAL_SETTINGS="security.protocol=SASL_PLAINTEXT;sasl.kerberos.service.name=...;sasl.kerberos.keytab=...;sasl.kerberos.principal=..."` +* `subscribe` could take a topic list. It's a block operation, and would wait for the consumer to get partitions assigned. -## To Start +* `poll` must be called periodically, thus to trigger kinds of callback handling internally. In practice, it could be put in a `while loop`. -* Tutorial +### Rebalance events - * Confluent Blog [Debuting a Modern C++ API for Apache Kafka](https://www.confluent.io/blog/modern-cpp-kafka-api-for-safe-easy-messaging) +The `KafkaConsumer` could specify the `RebalanceCallback` while it subscribes the topics, and the callback will be triggered while partitions are assigned or revoked. - * [KafkaProducer Quick Start](doc/KafkaProducerQuickStart.md) +#### Example + +``` + // The consumer would read all messages from the topic and then quit. + + // Prepare the configuration + const Properties props({{"bootstrap.servers", {brokers}}, + // Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event + // whenever the consumer reaches the end of a partition. + {"enable.partition.eof", {"true"}}, + // Action to take when there is no initial offset in offset store + // it means the consumer would read from the very beginning + {"auto.offset.reset", {"earliest"}}}); + + // Create a consumer instance + KafkaConsumer consumer(props); + + // Prepare the rebalance callbacks + std::atomic assignedPartitions{}; + auto rebalanceCb = [&assignedPartitions](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& tps) { + if (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned) { + assignedPartitions += tps.size(); + std::cout << "Assigned partitions: " << kafka::toString(tps) << std::endl; + } else { + assignedPartitions -= tps.size(); + std::cout << "Revoked partitions: " << kafka::toString(tps) << std::endl; + } + }; + + // Subscribe to topics with rebalance callback + consumer.subscribe({topic}, rebalanceCb); + + TopicPartitions finishedPartitions; + while (finishedPartitions.size() != assignedPartitions.load()) { + // Poll messages from Kafka brokers + auto records = consumer.poll(std::chrono::milliseconds(100)); + + for (const auto& record: records) { + if (!record.error()) { + std::cerr << record.toString() << std::endl; + } else { + if (record.error().value() == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + // Record the partition which has been reached the end + finishedPartitions.emplace(record.topic(), record.partition()); + } else { + std::cerr << record.toString() << std::endl; + } + } + } + } +``` + +### To Commit Offset Manually + +Once the KafkaConsumer is configured with `enable.auto.commit=false`, the user has to find out the right places to call `commitSync(...)`/`commitAsync(...)`. + +#### Example + +``` + // Prepare the configuration + Properties props({{"bootstrap.servers", {brokers}}}); + props.put("enable.auto.commit", "false"); + + // Create a consumer instance + KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + while (running) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + + for (const auto& record: records) { + std::cout << record.toString() << std::endl; + } + + if (!records.empty()) { + consumer.commitAsync(); + } + } + + consumer.commitSync(); + + // No explicit close is needed, RAII will take care of it + // consumer.close(); +``` + +### Error Handling + +* Normally, [`kafka::KafkaException`](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1KafkaException.html) will be thrown if an operation fails. + +* But if the `poll` operation fails, the [`kafka::Error`](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1Error.html) would be embedded in the [`kafka::clients::consumer::ConsumerRecord`](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/classKAFKA__API_1_1clients_1_1consumer_1_1ConsumerRecord.html). + +* There're 2 cases for the `kafka::Error::value()` + + * Success + + * `RD_KAFKA_RESP_ERR__NO_ERROR` (`0`), -- got a message successfully + + * `RD_KAFKA_RESP_ERR__PARTITION_EOF` (`-191`), -- reached the end of a partition (no message got) + + * Failure + + * [Error Codes](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + + +## Callbacks for KafkaClient + +We're free to set callbacks in `Properties` with a `kafka::clients::ErrorCallback`, `kafka::clients::LogCallback`, or `kafka::clients::StatsCallback`. + +#### Example + +``` + // Prepare the configuration + Properties props({{"bootstrap.servers", {brokers}}}); + + // To print out the error + props.put("error_cb", [](const kafka::Error& error) { + // https://en.wikipedia.org/wiki/ANSI_escape_code + std::cerr << "\033[1;31m" << "[" << kafka::utility::getCurrentTime() << "] ==> Met Error: " << "\033[0m"; + std::cerr << "\033[4;35m" << error.toString() << "\033[0m" << std::endl; + }); + + // To enable the debug-level log + props.put("log_level", "7"); + props.put("debug", "all"); + props.put("log_cb", [](int /*level*/, const char* /*filename*/, int /*lineno*/, const char* msg) { + std::cout << "[" << kafka::utility::getCurrentTime() << "]" << msg << std::endl; + }); + + // To enable the statistics dumping + props.put("statistics.interval.ms", "1000"); + props.put("stats_cb", [](const std::string& jsonString) { + std::cout << "Statistics: " << jsonString << std::endl; + }); +``` - * [KafkaConsumer Quick Start](doc/KafkaConsumerQuickStart.md) -* User's Manual +## Thread Model - * [Kafka Client API](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/annotated.html) +* Number of Background Threads within a Kafka Client + * __N__ threads for the message transmission (towards __N__ brokers). - * Kafka Client Properties + * __2__ (for `KafkaProducer`) / __3__ (for `KafkaConsumer`) threads to handle internal operations, timers, consumer group operations, etc. - * In most cases, the `Properties` settings for ***modern-cpp-kafka*** are identical with [librdkafka configuration](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) + * __1__ thread for (message-delivery/offset-commit) callback events polling, -- the thread only exists while the client is configured with `enable.manual.events.poll=false` (the default config) - * With following exceptions +* Which Thread Handles the Callbacks - * KafkaConsumer + * `consumer::RebalanceCallback`: the thread which calls `consumer.poll(...)` - * Properties with random string as default + * `consumer::OffsetCommitCallback` - * `client.id` + * While `enable.manual.events.poll=false`: the thread which calls `consumer.pollEvents(...)` - * `group.id` + * While `enable.manual.events.poll=true`: the background (events polling) thread - * More properties than ***librdkafka*** + * `producer::Callback` - * `max.poll.records` (default: `500`): The maxmum number of records that a single call to `poll()` would return + * While `enable.manual.events.poll=false`: the thread which calls `producer.pollEvents(...)` - * Property which overrides the one from ***librdkafka*** + * While `enable.manual.events.poll=true`: the background (events polling) thread - * `enable.auto.commit` (default: `false`): To automatically commit the previously polled offsets on each `poll` operation - * Properties not supposed to be used (internally shadowed by ***modern-cpp-kafka***) - * `enable.auto.offset.store` +# For Developers - * `auto.commit.interval.ms` +## Build (for [tests](https://github.com/morganstanley/modern-cpp-kafka/tree/main/tests)/[tools](https://github.com/morganstanley/modern-cpp-kafka/tree/main/tools)/[examples](https://github.com/morganstanley/modern-cpp-kafka/tree/main/examples)) - * KafkaProducer +* Specify library locations with environment variables - * Properties with random string as default + | Environment Variable | Description | + | -------------------------------- | -------------------------------------------------------- | + | `LIBRDKAFKA_INCLUDE_DIR` | ***librdkafka*** headers | + | `LIBRDKAFKA_LIBRARY_DIR` | ***librdkafka*** libraries | + | `GTEST_ROOT` | ***googletest*** headers and libraries | + | `BOOST_ROOT` | ***boost*** headers and libraries | + | `SASL_LIBRARYDIR`/`SASL_LIBRARY` | [optional] for SASL connection support | + | `RAPIDJSON_INCLUDE_DIRS` | `addons/KafkaMetrics.h` requires ***rapidjson*** headers | - * `client.id` +* Build commands - * Log level + * `cd empty-folder-for-build` - * The default `log_level` is `NOTICE` (`5`) for all these clients + * `cmake path-to-project-root` (following options could be used with `-D`) -* Test Environment (ZooKeeper/Kafka cluster) Setup + | Build Option | Description | + | -------------------------------- | ------------------------------------------------------------- | + | `BUILD_OPTION_USE_TSAN=ON` | Use Thread Sanitizer | + | `BUILD_OPTION_USE_ASAN=ON` | Use Address Sanitizer | + | `BUILD_OPTION_USE_UBSAN=ON` | Use Undefined Behavior Sanitizer | + | `BUILD_OPTION_CLANG_TIDY=ON` | Enable clang-tidy checking | + | `BUILD_OPTION_GEN_DOC=ON` | Generate documentation as well | + | `BUILD_OPTION_DOC_ONLY=ON` | Only generate documentation | + | `BUILD_OPTION_GEN_COVERAGE=ON` | Generate test coverage, only support by clang currently | - * [Start the servers](https://kafka.apache.org/documentation/#quickstart_startserver) + * `make` + * `make install` (to install `tools`) -## How to Achieve High Availability & Performance -* [Kafka Broker Configuration](doc/KafkaBrokerConfiguration.md) +## Run Tests -* [Good Practices to Use KafkaProducer](doc/GoodPracticesToUseKafkaProducer.md) +* Kafka cluster setup -* [Good Practices to Use KafkaConsumer](doc/GoodPracticesToUseKafkaConsumer.md) + * [Quick Start For Cluster Setup](https://kafka.apache.org/documentation/#quickstart) -* [How to Make KafkaProducer Reliable](doc/HowToMakeKafkaProducerReliable.md) + * [Cluster Setup Scripts For Test](https://github.com/morganstanley/modern-cpp-kafka/blob/main/scripts/start-local-kafka-cluster.py) + * [Kafka Broker Configuration](doc/KafkaBrokerConfiguration.md) -## Other References +* To run the binary, the test runner requires following environment variables -* Java API for Kafka clients + | Environment Variable | Descrioption | Example | + | ---------------------------------- | ----------------------------------------------------------- | -------------------------------------------------------------------------- | + | `KAFKA_BROKER_LIST` | The broker list for the Kafka cluster | `export KAFKA_BROKER_LIST=127.0.0.1:29091,127.0.0.1:29092,127.0.0.1:29093` | + | `KAFKA_BROKER_PIDS` | The broker PIDs for test runner to manipulate | `export KAFKA_BROKER_PIDS=61567,61569,61571` | + | `KAFKA_CLIENT_ADDITIONAL_SETTINGS` | Could be used for addtional configuration for Kafka clients | `export KAFKA_CLIENT_ADDITIONAL_SETTINGS="security.protocol=SASL_PLAINTEXT;sasl.kerberos.service.name=...;sasl.kerberos.keytab=...;sasl.kerberos.principal=..."` | - * [org.apache.kafka.clients.producer](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/producer/package-summary.html) + * The environment variable `KAFKA_BROKER_LIST` is mandatory for integration/robustness test, which requires the Kafka cluster. - * [org.apache.kafka.clients.consumer](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/consumer/package-summary.html) + * The environment variable `KAFKA_BROKER_PIDS` is mandatory for robustness test, which requires the Kafka cluster and the privilege to stop/resume the brokers. - * [org.apache.kafka.clients.admin](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/admin/package-summary.html) + | Test Type | `KAFKA_BROKER_LIST` | `KAFKA_BROKER_PIDS` | + | -------------------------------------------------------------------------------------------------- | -------------------- | ------------------- | + | [tests/unit](https://github.com/morganstanley/modern-cpp-kafka/tree/main/tests/unit) | - | - | + | [tests/integration](https://github.com/morganstanley/modern-cpp-kafka/tree/main/tests/integration) | Required | - | + | [tests/robustness](https://github.com/morganstanley/modern-cpp-kafka/tree/main/tests/robustness) | Required | Required | diff --git a/demo_projects_for_build/conan_build/CMakeLists.txt b/demo_projects_for_build/conan_build/CMakeLists.txt index 3ad673450..ee9af3cd5 100644 --- a/demo_projects_for_build/conan_build/CMakeLists.txt +++ b/demo_projects_for_build/conan_build/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION "3.8") +cmake_minimum_required(VERSION "3.20") project("kafka-examples") set(CMAKE_CXX_STANDARD 17) @@ -7,22 +7,11 @@ set(CMAKE_CXX_STANDARD_REQUIRED True) include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) conan_basic_setup() -# Target: kafka_sync_producer -add_executable("kafka_sync_producer" "../../examples/kafka_sync_producer.cc") -target_link_libraries("kafka_sync_producer" ${CONAN_LIBS}) +# Target: kafka_producer +add_executable("kafka_producer" "../../examples/kafka_async_producer_not_copy_payload.cc") +target_link_libraries("kafka_producer" ${CONAN_LIBS}) -# Target: kafka_async_producer_copy_payload -add_executable("kafka_async_producer_copy_payload" "../../examples/kafka_async_producer_copy_payload.cc") -target_link_libraries("kafka_async_producer_copy_payload" ${CONAN_LIBS}) +# Target: kafka_consumer +add_executable("kafka_consumer" "../../examples/kafka_auto_commit_consumer.cc") +target_link_libraries("kafka_consumer" ${CONAN_LIBS}) -# Target: kafka_async_producer_not_copy_payload -add_executable("kafka_async_producer_not_copy_payload" "../../examples/kafka_async_producer_not_copy_payload.cc") -target_link_libraries("kafka_async_producer_not_copy_payload" ${CONAN_LIBS}) - -# Target: kafka_auto_commit_consumer -add_executable("kafka_auto_commit_consumer" "../../examples/kafka_auto_commit_consumer.cc") -target_link_libraries("kafka_auto_commit_consumer" ${CONAN_LIBS}) - -# Target: kafka_manual_commit_consumer -add_executable("kafka_manual_commit_consumer" "../../examples/kafka_manual_commit_consumer.cc") -target_link_libraries("kafka_manual_commit_consumer" ${CONAN_LIBS}) diff --git a/demo_projects_for_build/conan_build/conanfile.txt b/demo_projects_for_build/conan_build/conanfile.txt index 857cf1ea5..9f2f83037 100644 --- a/demo_projects_for_build/conan_build/conanfile.txt +++ b/demo_projects_for_build/conan_build/conanfile.txt @@ -1,5 +1,5 @@ [requires] -modern-cpp-kafka/2022.06.15 +modern-cpp-kafka/2023.01.05 [generators] cmake diff --git a/doc/GoodPracticesToUseKafkaConsumer.md b/doc/GoodPracticesToUseKafkaConsumer.md deleted file mode 100644 index 7575f0778..000000000 --- a/doc/GoodPracticesToUseKafkaConsumer.md +++ /dev/null @@ -1,22 +0,0 @@ -# Good Practices to Use a KafkaConsumer - -If we want to achieve high performance/availability, here're some rules of thumb. - -## How to distribute the messages (for the same topics) to different KafkaConsumers - -* Use a consumer group for these KafkaConsumers, thus they will work together -- each one deals with different partitions. - -* Besides `subscribe` (topics), users could also choose to explicitly `assign` certain partitions to a `KafkaConsumer`. - -## How to enhance the throughput - -* Try with a larger `QUEUED_MIN_MESSAGES`, especially for small messages. - -* Use multiple KafkaConsumers to distribute the payload. - -## How to avoid polling duplicated messages - -* To commit the offsets more frequently (e.g, always do commit after finishing processing a message). - -* Don't use quite a large `MAX_POLL_RECORDS` for a `KafkaConsumer` (with `enable.auto.commit=true`) -- you might fail to commit all these messages before crash, thus more duplications with the next `poll`. - diff --git a/doc/GoodPracticesToUseKafkaProducer.md b/doc/GoodPracticesToUseKafkaProducer.md deleted file mode 100644 index b6af5c744..000000000 --- a/doc/GoodPracticesToUseKafkaProducer.md +++ /dev/null @@ -1,48 +0,0 @@ -# Good Practices to Use a KafkaProducer - -If we want to achieve high performance/availability, here're some rules of thumb. - -## Avoid using `syncSend` for better throughput - -You should never call `syncSend` if you want to get a high throughput. The `syncSend` is a synchronous operation, and would not go on until the `acks` are received. - -## The `message.max.bytes` must be consistent with Kafka servers' setting - -* Default value: 1000,000 - -* The default setting for brokers is `message.max.bytes = 1000012`, and do MAKE SURE the client side setting no larger than it. Otherwise, it might construct a MessageSet which would be rejected (error: INVALID_MESSAGE_SIZE) by brokers. - -## Calculate `batch.num.messages` with the average message size - -* Default value: 10,000 - -* It defines the maximum number of messages batched in one MessageSet. - - Normally, larger value, better performance. However, since the size of MessageSet is limited by `message.max.bytes`, a too large value would not help any more. - - E.g, with the default `message.max.bytes=1000000` and `batch.num.messages=10000` settings, you could get the best performance while the average message size is larger than 100 bytes. - - However, if the average message size is small, you have to enlarge it (to `message.max.bytes/average_message_size` at least). - -## Choose `acks` wisely - -* The acks parameter controls how many partition replicas must receive the record before the producer can consider the write successful. - - * `acks=0`, the producer will not wait for a reply from the broker before assuming the message was sent successfully. - - * `acks=1`, the producer will receive a success response from the broker the moment the leader replica received the message. - - * `acks=all`, the producer will receive a success response from the broker once all in-sync replicas received the message. - - * Note: if "ack=all", please make sure the topic's replication factor is larger than 1. - -* The `acks=all` setting will highly impact the throughput & latency, and it would be obvious if the traffic latency between kafka brokers is high. But it's mandatory if we want to achieve high availability. - -## How could a message miss after send? - -* The message might even not have been received by the partition leader! (with `acks=0`) - -* Once the message received by the partition leader, the leader crashed just after responding to the producer, but has no chance to synchronize the message to other replicas. (with `acks=1`) - -* Once the message received by the partition leader, the leader crashed just after responding to the producer, but with no in-sync replica to synchronize for the message. (with `acks=all`, while brokers are with `min.insync.replicas=1`) - diff --git a/doc/HowToMakeKafkaProducerReliable.md b/doc/HowToMakeKafkaProducerReliable.md deleted file mode 100644 index 970fb3db7..000000000 --- a/doc/HowToMakeKafkaProducerReliable.md +++ /dev/null @@ -1,185 +0,0 @@ -# How to Make KafkaProducer Reliable - -While using message dispatching systems, we always suffer from message lost, duplication and disordering. - -Since the application (using the `KafkaProducer`) might crash/restart, we might consider using certain mechanism to achieve `At most once`/`At least once`, and `Ordering`, -- such as locally persisting the messages until successful delivery, using embedded sequence number to de-duplicate, or responding data-source to acknowledgement the delivery result, etc. These are common topics, which are not quite specific to Kafka. - -Here we'd focus on `KafkaProducer`, together with the `idempotence` feature. Let's see, in which cases problems might happen, how to avoid them, and what's the best practise,-- to achieve `No Message Lost`, `Exactly Once` and `Ordering`. - - -## About `No Message Lost` - -### When might a message actually be lost - -* The producer gets a successful delivery response after sending the message, but the `partition leader` failed to sync it to other `replicas`. - -### How could a message be lost even with successful delivery - -* First, the `partition leader` doesn't sync-up the latest message to enough `in-sync replicas` before responding with the `ack` - - * The `partition leader` just don't need to wait for other `replica`s response - - - E.g, the producer is configured with `acks=1` - - * No available `in-sync replica` to wait for the response - - - E.g, all other replicas are not in-sync - -* Then, the `partition leader` crashes, and one `in-sync replica` becomes new `partition leader` - - * The new `partition leader` has no acknowledgement with the latest messages. Later, while new messages arrive, it would use conflicting record offsets (same with those records which the `partition leader` knows only). Then, even if the previous `partition leader` comes up again, these records have no chance to be recovered (just internally overwritten to be consistent with other replicas). - -### How to make sure `No Message Lost` - -* Make sure the leader would wait for responses from all in-sync replicas before the response - - * Configuration `acks=all` is a MUST for producer - -* Ensure enough `In-Sync partition replicas` - - * Configuration `min.insync.replicas >= 2` is a MUST for brokers - - - Take `min.insync.replicas = 2` for example, it means, - - 1. At most `replication.factor - min.insync.replicas` replicas are out-of-sync, -- the producer would still be able to send messages, otherwise, it could fail with 'no enough replica' error, and keeps retrying. - - 2. Occasionally no more than `min.insync.replicas` in-sync-replica failures. -- otherwise, messages might be missed. In this case, if just one in-sync replica crashes after sending back the ack to the producer, the message would not be lost; if two failed, it would! Since the new leader might be a replica which was not in-sync previously, and has no acknowledgement with these latest messages. - - * Please refer to [Kafka Broker Configuration](KafkaBrokerConfiguration.md) for more details. - - * Then, what would happen if replicas fail - - 1. Fails to send (`not enough in-sync replica failure`), -- while number of `in-sync replicas` could not meet `min.insync.replication` - - 2. Lost messages (after sending messages), -- with no `in-sync replica` survived from multi-failures - - 3. No message lost (while with all `in-sync replicas` acknowledged, and at least one `in-sync replica` available) - - -## About `Exactly Once` - -### How duplications happen - -* After brokers successfully persisted a message, it sent the `ack` to the producer. But for some abnormal reasons (such as network failure, etc), the producer might fail to receive the `ack`. The `librdkafka`'s internal queue would retry, thus another (duplicated) message would be persisted by brokers. - -### How to guarantee `Exactly Once` - -* The `enable.idempotence` configuration is RECOMMENDED. - - -## About `Ordering` - -### No ordering between partitions - -* Make sure these `ProducerRecord`s be with the same partition - - - Explicitly assigned with the same `topic-partition` - - - Use the same `key` for these records - -### How disordering happens within one partition - -* The `librdkafka` uses internal partition queues, and once a message fails to be sent successfully(e.g, brokers are down), it would be put back on the queue and retries again while `retry.backoff.ms` expires. However, before that (retry with the failed message), the brokers might recover and the messages behind (if with configuration `max.in.flight > 1`) happened to be sent successfully. In this case (with configuration `max.in.flight > 1` and `retries > 0`), disordering could happen, and the user would not even be aware of it. - -* Furthermore, while the last retry still failed, delivery callback would eventually be triggered. The user has to determine what to do for that (might want to re-send the message, etc). But there might be a case, -- some later messages had already been saved successfully by the server, thus no way to revert the disordering. - - -## More About `Idempotent producer` - -Please refer to the document from librdkafka, [Idempotent Producer](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md#idempotent-producer) for more details. - -### Extra fields to maintain the message sequence - -The `librdkafka` maintains the original produce() ordering per-partition for all messages produced, using an internal per-partition 64-bit counter called the `msgid` which starts at 1. This `msgid` allows messages to be re-inserted in the partition message queue in the original order in the case of retries. - -The Idempotent Producer functionality in the Kafka protocol also has a per-message `sequence number`, which is a signed 32-bit wrapping counter that is reset each time the `Producer's ID (PID)` or `Epoch` changes. - -The `msgid` is used, (along with a base `msgid` value stored at the time the `PID/Epoch` was bumped), to calculate the Kafka protocol's message `sequence number`. - -### Configuration conflicts - -* Since the following configuration properties are adjusted automatically (if not modified by the user). Producer instantiation will fail if user-supplied configuration is incompatible. - - - `acks = all` - - - `max.in.flight (i.e, `max.in.flight.requests.per.connection`) = 5` - - - `retries = INT32_MAX` - -### Error handling - -* Exception thrown during `send` - - * For these errors which could be detected locally (and could not be recovered with retrying), an exception would be thrown. E.g, invalid message, as RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE (conflicting with local configuration `message.max.bytes`). - -* Permanent errors (respond from brokers) - - * Typical errors are: - - * Invalid message: RD_KAFKA_RESP_ERR_CORRUPT_MESSAGE, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS, RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT, RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE. - - * Topic/Partition not exist: ERR_UNKNOWN_TOPIC_OR_PART, -- automatic topic creation is disabled on the broker or the application is specifying a partition that does not exist. - - * Authorization failure: ERR_TOPIC_AUTHORIZATION_FAILED, ERR_CLUSTER_AUTHORIZATION_FAILED - - * Normally, `Permanent error` means careless design, or wrong configuration, which should be avoided from the very beginning. - - * Unless with `enable.gapless.guarantee`(EXPERIMENTAL) configured, producer would keep going with the following messages; otherwise, it would purge all messages in-flight/in-queue (with RD_KAFKA_RESP_ERR__PURGE_INFLIGHT/RD_KAFKA_RESP_ERR__PURGE_QUEUE). - -* Temporary errors - - * Apart from those `permanent errors`, most of the left are temporary errors, which will be retried (if retry count permits); and while `message.timeout` expired, message delivery callback would be triggered with `RD_KAFKA_RESP_ERR__TIEMD_OUT`. - -* Be careful with the `RD_KAFKA_RESP_ERR__TIEMD_OUT` failure - - * There's some corner cases, such as a message that has been persisted by brokers but `KafkaProducer` failed to get the response. If `message.timeout.ms` has not expired, the producer could retry and eventually get the response. Otherwise, (i.e, `message.timeout.ms` expired before the producer receives the successful `ack`), it would be considered as a delivery failure by the producer (while the brokers wouldn't). Users might re-transmit the message thus causing duplications. - - * To avoid this tricky situation, a longer `message.timeout.ms` is RECOMMENDED, to make sure there's enough time for transmission retries / on-flight responses. - -### Performance impact - -* The main impact comes from `max.in.flight=5` limitation. Currently, `max.in.flight` means `max.in.flight.per.connection`, -- that's 5 message batches (with size of ~1MB at the most) in flight (not get the `ack` response yet) at the most, towards per broker. Within low-latency networks, it would not be a problem; while in other cases, it might be! Good news is, there might be a plan (in `librdkafka`) to improve that `per.connection` limit to `per.partition`, thus boost the performance a lot. - - -## The best practice for `KafkaProducer` - -* Enable `enable.idempotence` configuration - -* Use a long `message.timeout.ms`, which would let `librdkafka` keep retrying, before triggering the delivery failure callback. - - -## Some examples - -### `KafkaProducer` demo - -```cpp - std::atomic running = true; - - KafkaProducer producer( - Properties({ - { ProducerConfig::BOOTSTRAP_SERVERS, "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" }, - { ProducerConfig::ENABLE_IDEMPOTENCE, "true" }, - { ProducerConfig::MESSAGE_TIMEOUT_MS, "86400000"} // as long as 1 day - }) - ); - - while (running) { - auto msg = fetchMsgFromUpstream(); - auto record = ProducerRecord(topic, msg.key, msg.value, msg.id); - producer.send(record, - // Ack callback - [&msg](const Producer::RecordMetadata& metadata, std::error_code ec) { - // the message could be identified by `metadata.recordId()` - auto recordId = metadata.recordId(); - if (ec) { - std::cerr << "Cannot send out message with recordId: " << recordId << ", error:" << ec.message() << std::endl; - } else { - commitMsgToUpstream(recordId); - } - }); - } - - producer.close(); -``` - -* With a long `message.timeout.ms`, we're not likely to catch an error with delivery callback, --it would retry for temporary errors anyway. But be aware with permanent errors, it might be caused by careless design. diff --git a/doc/KafkaConsumerQuickStart.md b/doc/KafkaConsumerQuickStart.md deleted file mode 100644 index 02b0f37f9..000000000 --- a/doc/KafkaConsumerQuickStart.md +++ /dev/null @@ -1,206 +0,0 @@ -# KafkaConsumer Quick Start - -Generally speaking, The `Modern C++ Kafka API` is quite similar with [Kafka Java's API](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html) - -We'd recommend users to cross-reference them, --especially the examples. - -Unlike Java's KafkaConsumer, here we introduced two derived classes, --KafkaAutoCommitConsumer and KafkaManualCommitConsumer, --depending on whether users should call `commit` manually. - -## KafkaConsumer (`enable.auto.commit=true`) - -* Automatically commits previously polled offsets on each `poll` (and the final `close`) operations. - - * Note, the internal `offset commit` is asynchronous, and is not guaranteed to succeed. It's supposed to be triggered (within each `poll` operation) periodically, thus the occasional failure doesn't quite matter. - -### Example -```cpp - // Create configuration object - kafka::Properties props ({ - {"bootstrap.servers", brokers}, - {"enable.auto.commit", "true"} - }); - - // Create a consumer instance - kafka::clients::KafkaConsumer consumer(props); - - // Subscribe to topics - consumer.subscribe({topic}); - - // Read messages from the topic - std::cout << "% Reading messages from topic: " << topic << std::endl; - while (true) { - auto records = consumer.poll(std::chrono::milliseconds(100)); - for (const auto& record: records) { - // In this example, quit on empty message - if (record.value().size() == 0) return 0; - - if (!record.error()) { - std::cout << "% Got a new message..." << std::endl; - std::cout << " Topic : " << record.topic() << std::endl; - std::cout << " Partition: " << record.partition() << std::endl; - std::cout << " Offset : " << record.offset() << std::endl; - std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; - std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; - std::cout << " Key [" << record.key().toString() << "]" << std::endl; - std::cout << " Value [" << record.value().toString() << "]" << std::endl; - } else { - std::cerr << record.toString() << std::endl; - } - } - } - - // consumer.close(); // No explicit close is needed, RAII will take care of it -``` - -* `bootstrap.servers` property is mandatory for a Kafka client. - -* `subscribe` could take a topic list. It's a block operation, would wait the consumer to get partitions assigned. - -* `poll` must be called periodically, thus to trigger kinds of callback handling internally. In practice, it could be put in a "while loop". - -* At the end, we could `close` the consumer explicitly, or just leave it to the destructor. - -## KafkaConsumer (`enable.auto.commit=false`) - -* Users must commit the offsets for received records manually. - -### Example -```cpp - // Create configuration object - kafka::Properties props ({ - {"bootstrap.servers", brokers}, - }); - - // Create a consumer instance - kafka::clients::KafkaConsumer consumer(props); - - // Subscribe to topics - consumer.subscribe({topic}); - - auto lastTimeCommitted = std::chrono::steady_clock::now(); - - // Read messages from the topic - std::cout << "% Reading messages from topic: " << topic << std::endl; - bool allCommitted = true; - bool running = true; - while (running) { - auto records = consumer.poll(std::chrono::milliseconds(100)); - for (const auto& record: records) { - // In this example, quit on empty message - if (record.value().size() == 0) { - running = false; - break; - } - - if (!record.error()) { - std::cout << "% Got a new message..." << std::endl; - std::cout << " Topic : " << record.topic() << std::endl; - std::cout << " Partition: " << record.partition() << std::endl; - std::cout << " Offset : " << record.offset() << std::endl; - std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; - std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; - std::cout << " Key [" << record.key().toString() << "]" << std::endl; - std::cout << " Value [" << record.value().toString() << "]" << std::endl; - - allCommitted = false; - } else { - std::cerr << record.toString() << std::endl; - } - } - - if (!allCommitted) { - auto now = std::chrono::steady_clock::now(); - if (now - lastTimeCommitted > std::chrono::seconds(1)) { - // Commit offsets for messages polled - std::cout << "% syncCommit offsets: " << kafka::utility::getCurrentTime() << std::endl; - consumer.commitSync(); // or commitAsync() - - lastTimeCommitted = now; - allCommitted = true; - } - } - } - - // consumer.close(); // No explicit close is needed, RAII will take care of it -``` - -* The example is quite similar with the KafkaAutoCommitConsumer, with only 1 more line added for manual-commit. - -* `commitSync` and `commitAsync` are both available for a KafkaManualConsumer. Normally, use `commitSync` to guarantee the commitment, or use `commitAsync`(with `OffsetCommitCallback`) to get a better performance. - -## `KafkaConsumer` with `kafka::clients::KafkaClient::EventsPollingOption` - -While we construct a `KafkaConsumer` with `kafka::clients::KafkaClient::EventsPollingOption::Auto` (i.e. the default option), an internal thread would be created for `OffsetCommit` callbacks handling. - -This might not be what you want, since then you have to use 2 different threads to process the messages and handle the `OffsetCommit` responses. - -Here we have another choice, -- using `kafka::clients::KafkaClient::EventsPollingOption::Manual`, thus the `OffsetCommit` callbacks would be called within member function `pollEvents()`. - -### Example -```cpp - KafkaConsumer consumer(props, kafka::clients::KafkaClient::EventsPollingOption::Manual); - - consumer.subscribe({"topic1", "topic2"}); - - while (true) { - auto records = consumer.poll(std::chrono::milliseconds(100)); - for (auto& record: records) { - // Process the message... - process(record); - - // Here we commit the offset manually - consumer.commitSync(*record); - } - - // Here we call the `OffsetCommit` callbacks - // Note, we can only do this while the consumer was constructed with `EventsPollingOption::Manual`. - consumer.pollEvents(); - } -``` - -## Error handling - -No exception would be thrown from a consumer's `poll` operation. - -Instead, once an error occurs, the `Error` would be embedded in the `Consumer::ConsumerRecord`. - -About `Error`'s `value()`s, there are 2 cases - -1. Success - - - `RD_KAFKA_RESP_ERR__NO_ERROR` (`0`), -- got a message successfully - - - `RD_KAFKA_RESP_ERR__PARTITION_EOF`, -- reached the end of a partition (no message got) - -2. Failure - - - [Error Codes](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) - -## Frequently Asked Questions - -* What're the available configurations? - - - [KafkaProducerConfiguration](KafkaClientConfiguration.md#kafkaconsumer-configuration) - - - [Inline doxygen page](../doxygen/classKAFKA__CPP__APIS__NAMESPACE_1_1ConsumerConfig.html) - -* How to enhance the polling performance? - - `ConsumerConfig::QUEUED_MIN_MESSAGES` determines how frequently the consumer would send the FetchRequest towards brokers. - The default configuration (i.e, 100000) might not be good enough for small (less than 1KB) messages, and suggest using a larger value (e.g, 1000000) for it. - -* How many threads would be created by a KafkaConsumer? - - 1. Each broker (in the list of BOOTSTRAP_SERVERS) would take a seperate thread to transmit messages towards a kafka cluster server. - - 2. Another 3 threads will handle internal operations, consumer group operations, and kinds of timers, etc. - - 3. To enable the auto events-polling, one more background thread would be created, which keeps polling/processing the offset-commit callback event. - -* Which one of these threads will handle the callbacks? - - There are 2 kinds of callbacks for a KafkaConsumer, - - 1. `RebalanceCallback` will be triggered internally by the user's thread, -- within the `poll` function. - - 2. If `enable.auto.commit=true`, the `OffsetCommitCallback` will be triggered by the user's `poll` thread; otherwise, it would be triggered by a background thread. diff --git a/doc/KafkaProducerQuickStart.md b/doc/KafkaProducerQuickStart.md deleted file mode 100644 index 3997941dc..000000000 --- a/doc/KafkaProducerQuickStart.md +++ /dev/null @@ -1,217 +0,0 @@ -# KafkaProducer Quick Start - -Generally speaking, The `Modern C++ Kafka API` is quite similar to the [Kafka Java's API](https://kafka.apache.org/10/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html). - -We'd recommend users to cross-reference them, --especially the examples. - -## KafkaProducer - -* The `send` is an unblock operation, and the result (including errors) could only be got from the delivery callback. - -### Example -```cpp - using namespace kafka::clients; - - // Create configuration object - kafka::Properties props ({ - {"bootstrap.servers", brokers}, - {"enable.idempotence", "true"}, - }); - - // Create a producer instance - KafkaProducer producer(props); - - // Read messages from stdin and produce to the broker - std::cout << "% Type message value and hit enter to produce message. (empty line to quit)" << std::endl; - - for (auto line = std::make_shared(); - std::getline(std::cin, *line); - line = std::make_shared()) { - // The ProducerRecord doesn't own `line`, it is just a thin wrapper - auto record = producer::ProducerRecord(topic, - kafka::NullKey, - kafka::Value(line->c_str(), line->size())); - - // Send the message - producer.send(record, - // The delivery report handler - // Note: Here we capture the shared_pointer of `line`, - // which holds the content for `record.value()`. - // It makes sure the memory block is valid until the lambda finishes. - [line](const producer::RecordMetadata& metadata, const kafka::Error& error) { - if (!error) { - std::cout << "% Message delivered: " << metadata.toString() << std::endl; - } else { - std::cerr << "% Message delivery failed: " << error.message() << std::endl; - } - }); - - if (line->empty()) break; - } - - // producer.close(); // No explicit close is needed, RAII will take care of it -``` - -* User must guarantee the memory block for `ProducerRecord`'s `key` is valid until being `send`. - -* By default, the memory block for `ProducerRecord`'s `value` must be valid until the delivery callback is called; Otherwise, the `send` should be with option `KafkaProducer::SendOption::ToCopyRecordValue`. - -* It's guaranteed that the delivery callback would be triggered anyway after `send`, -- a producer would even be waiting for it before `close`. So, it's a good way to release these memory resources in the `Producer::Callback` function. - -## `KafkaProducer` with `kafka::clients::KafkaClient::EventsPollingOption` - -While we construct a `KafkaProducer` with `kafka::clients::KafkaClient::EventsPollingOption::Auto` (the default option), an internal thread would be created for `MessageDelivery` callbacks handling. - -This might not be what you want, since then you have to use 2 different threads to send the messages and handle the `MessageDelivery` responses. - -Here we have another choice, -- using `kafka::clients::KafkaClient::EventsPollingOption::Manual`, thus the `MessageDelivery` callbacks would be called within member function `pollEvents()`. - -* Note, if you constructed the `KafkaProducer` with `EventsPollingOption::Manual`, the `send()` would be an `unblocked` operation. -I.e, once the `message buffering queue` becomes full, the `send()` operation would throw an exception (or return an `error code` with the input reference parameter), -- instead of blocking there. -This makes sense, since you might want to call `pollEvents()` later, thus delivery-callback could be called for some messages (which could then be removed from the `message buffering queue`). - -### Example -```cpp - using namespace kafka::clients; - - KafkaProducer producer(props, KafkaClient::EventsPollingOption::Manual); - - // Prepare "msgsToBeSent" - auto std::map> msgsToBeSent = ...; - - for (const auto& msg : msgsToBeSent) { - auto record = producer::ProducerRecord(topic, partition, msg.second.first, msg.second.second, msg.first); - kafka::Error sendError; - producer.send(sendError, - record, - // Ack callback - [&msg](const producer::RecordMetadata& metadata, const kafka::Error& deliveryError) { - // the message could be identified by `metadata.recordId()` - if (deliveryError) { - std::cerr << "% Message delivery failed: " << deliveryError.message() << std::endl; - } else { - msgsToBeSend.erase(metadata.recordId()); // Quite safe here - } - }); - if (sendError) break; - } - - // Here we call the `MessageDelivery` callbacks - // Note, we can only do this while the producer was constructed with `EventsPollingOption::MANUAL`. - producer.pollEvents(); -``` - -## Headers in ProducerRecord - -* A `ProducerRecord` could take extra information with `headers`. - - * Note, the `header` within `headers` contains the pointer of the memory block for its `value`. The memory block MUST be valid until the `ProducerRecord` is read by `producer.send()`. - -### Example -```cpp - using namespace kafka::clients; - - kafak::KafkaProducer producer(props); - - auto record = producer::ProducerRecord(topic, partition, Key(), Value()); - - for (const auto& msg : msgsToBeSent) { - // Prepare record headers - std::string session = msg.session; - std::uint32_t seqno = msg.seqno; - record.headers() = { - { "session", { session.c_str(), session.size()} }, - { "seqno", { &seqno, sizeof(seqno)} } - }; - - record.setKey(msg.key); - record.setValue(msg.value); - - producer.send(record, - // Ack callback - [&msg](const kafka::Producer::RecordMetadata& metadata, , const kafka::Error& error) { - if (error) { - std::cerr << "% Message delivery failed: " << error.message() << std::endl; - } - }); - } -``` - -## Error handling - -`Error` might occur at different places while sending a message, - -1. A `KafkaException` would be triggered if `KafkaProducer` failed to trigger the send operation. - -2. Delivery `Error` would be passed through the delivery-callback. - -About `Error`'s `value()`s, there are 2 cases - -1. Local errors, - - - `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC` -- The topic doesn't exist - - - `RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION` -- The partition doesn't exist - - - `RD_KAFKA_RESP_ERR__INVALID_ARG` -- Invalid topic (topic is null or the length is too long (>512)) - - - `RD_KAFKA_RESP_ERR__MSG_TIMED_OUT` -- No ack received within the time limit - -2. Broker errors, - - - [Error Codes](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) - -## Frequently Asked Questions - -### What are the available configurations? - -- [KafkaProducerConfiguration](KafkaClientConfiguration.md#kafkaproducer-configuration) - -- [Inline doxygen page](../doxygen/classKAFKA__CPP__APIS__NAMESPACE_1_1ProducerConfig.html) - -### About the automatic `topic creation` - -If the cluster's configuration is with `auto.create.topics.enable=true`, the producer/consumer could trigger the brokers to create a new topic (with `send`, `subscribe`, etc) - -Note, the default created topic may be not what you want (e.g, with `default.replication.factor=1` configuration as default, etc), thus causing other unexpected problems. - -### How to enhance the sending performance? - -Enlarging the default `BATCH_NUM_MESSAGES` and `LINGER_MS` might improve message batching, thus enhancing the throughput. - -While, on the other hand, `LINGER_MS` would highly impact the latency. - -The `QUEUE_BUFFERING_MAX_MESSAGES` and `QUEUE_BUFFERING_MAX_KBYTES` would determine the `max in flight requests (some materials about Kafka would call it in this way)`. If the queue buffer is full, the `send` operation would be blocked. - -Larger `QUEUE_BUFFERING_MAX_MESSAGES`/`QUEUE_BUFFERING_MAX_KBYTES` might help to improve throughput as well, while also means more messages locally buffering. - -### How to achieve reliable delivery - -* Quick Answer, - - 1. The Kafka cluster should be configured with `min.insync.replicas = 2` at least - - 2. Configure the `KafkaProducer` with property `{ProducerConfig::ENABLE_IDEMPOTENCE, "true"}`, together with proper error-handling (within the delivery callback). - -* Complete Answer, - - * [How to Make KafkaProducer Reliable](HowToMakeKafkaProducerReliable.md) - -### How many threads would be created by a KafkaProducer? - -Most of these background threads are started internally by librdkafka. - -Here is a brief introduction what they're used for, - -1. Each broker (in the list of BOOTSTRAP_SERVERS) would take a separate thread to transmit messages towards a kafka cluster server. - -2. Another 2 threads would handle internal operations and kinds of timers, etc. - -3. To enale the auto events-polling, one more background thread would be created, which keeps polling the delivery callback event. - -### Which one of these threads will handle the callbacks - -It will be handled by a background thread, not by the user's thread. - -Note, should be careful if both the `KafkaProducer::send()` and the `producer::Callback` might access the same container at the same time. - diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 0b26d5ec0..4fa514ed8 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -24,3 +24,44 @@ target_link_libraries("kafka_auto_commit_consumer" modern-cpp-kafka-api) # Target: kafka_manual_commit_consumer add_executable("kafka_manual_commit_consumer" "kafka_manual_commit_consumer.cc") target_link_libraries("kafka_manual_commit_consumer" modern-cpp-kafka-api) + + +# Target: example_Properties +add_executable("example_Properties" "example_Properties.cc") +target_link_libraries("example_Properties" modern-cpp-kafka-api) + +# Target: example_KafkaProducer_Simple +add_executable("example_KafkaProducer_Simple" "example_KafkaProducer_Simple.cc") +target_link_libraries("example_KafkaProducer_Simple" modern-cpp-kafka-api) + +# Target: example_KafkaProducer_Lifecycle +add_executable("example_KafkaProducer_Lifecycle" "example_KafkaProducer_Lifecycle.cc") +target_link_libraries("example_KafkaProducer_Lifecycle" modern-cpp-kafka-api) + +# Target: example_KafkaProducer_DeepCopy +add_executable("example_KafkaProducer_DeepCopy" "example_KafkaProducer_DeepCopy.cc") +target_link_libraries("example_KafkaProducer_DeepCopy" modern-cpp-kafka-api) + +# Target: example_KafkaProducer_EnableManualEventsPoll +add_executable("example_KafkaProducer_EnableManualEventsPoll" "example_KafkaProducer_EnableManualEventsPoll.cc") +target_link_libraries("example_KafkaProducer_EnableManualEventsPoll" modern-cpp-kafka-api) + +# Target: example_ProducerRecordHeaders +add_executable("example_ProducerRecordHeaders" "example_ProducerRecordHeaders.cc") +target_link_libraries("example_ProducerRecordHeaders" modern-cpp-kafka-api) + +# Target: example_KafkaConsumer_Simple +add_executable("example_KafkaConsumer_Simple" "example_KafkaConsumer_Simple.cc") +target_link_libraries("example_KafkaConsumer_Simple" modern-cpp-kafka-api) + +# Target: example_KafkaConsumer_RebalanceEvents +add_executable("example_KafkaConsumer_RebalanceEvents" "example_KafkaConsumer_RebalanceEvents.cc") +target_link_libraries("example_KafkaConsumer_RebalanceEvents" modern-cpp-kafka-api) + +# Target: example_KafkaConsumer_ManualOffsetCommit +add_executable("example_KafkaConsumer_ManualOffsetCommit" "example_KafkaConsumer_ManualOffsetCommit.cc") +target_link_libraries("example_KafkaConsumer_ManualOffsetCommit" modern-cpp-kafka-api) + +# Target: example_KafkaClient_Callbacks +add_executable("example_KafkaClient_Callbacks" "example_KafkaClient_Callbacks.cc") +target_link_libraries("example_KafkaClient_Callbacks" modern-cpp-kafka-api) diff --git a/examples/example_KafkaClient_Callbacks.cc b/examples/example_KafkaClient_Callbacks.cc new file mode 100644 index 000000000..a76cb9a4c --- /dev/null +++ b/examples/example_KafkaClient_Callbacks.cc @@ -0,0 +1,70 @@ +#include + +#include +#include +#include +#include + +std::atomic_bool running = {true}; + +void stopRunning(int sig) { + if (sig != SIGINT) return; + + if (running) { + running = false; + } else { + // Restore the signal handler, -- to avoid stuck with this handler + signal(SIGINT, SIG_IGN); // NOLINT + } +} + +int main() +{ + using namespace kafka; + using namespace kafka::clients::consumer; + + // Use Ctrl-C to terminate the program + signal(SIGINT, stopRunning); // NOLINT + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + Properties props({{"bootstrap.servers", {brokers}}}); + + // To print out the error + props.put("error_cb", [](const kafka::Error& error) { + // https://en.wikipedia.org/wiki/ANSI_escape_code + std::cerr << "\033[1;31m" << "[" << kafka::utility::getCurrentTime() << "] ==> Met Error: " << "\033[0m"; + std::cerr << "\033[4;35m" << error.toString() << "\033[0m" << std::endl; + }); + + // To enable the debug-level log + props.put("log_level", "7"); + props.put("debug", "all"); + props.put("log_cb", [](int /*level*/, const char* /*filename*/, int /*lineno*/, const char* msg) { + std::cout << "[" << kafka::utility::getCurrentTime() << "]" << msg << std::endl; + }); + + // To enable the statistics dumping + props.put("statistics.interval.ms", "1000"); + props.put("stats_cb", [](const std::string& jsonString) { + std::cout << "Statistics: " << jsonString << std::endl; + }); + + // Create a consumer instance + KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + while (running) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + + for (const auto& record: records) { + std::cerr << record.toString() << std::endl; + } + } +} + diff --git a/examples/example_KafkaConsumer_ManualOffsetCommit.cc b/examples/example_KafkaConsumer_ManualOffsetCommit.cc new file mode 100644 index 000000000..9dd51bb04 --- /dev/null +++ b/examples/example_KafkaConsumer_ManualOffsetCommit.cc @@ -0,0 +1,60 @@ +#include + +#include +#include +#include +#include + +std::atomic_bool running = {true}; + +void stopRunning(int sig) { + if (sig != SIGINT) return; + + if (running) { + running = false; + } else { + // Restore the signal handler, -- to avoid stuck with this handler + signal(SIGINT, SIG_IGN); // NOLINT + } +} + +int main() +{ + using namespace kafka; + using namespace kafka::clients::consumer; + + // Use Ctrl-C to terminate the program + signal(SIGINT, stopRunning); // NOLINT + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + Properties props({{"bootstrap.servers", {brokers}}}); + props.put("enable.auto.commit", "false"); + + // Create a consumer instance + KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + while (running) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + + for (const auto& record: records) { + std::cout << record.toString() << std::endl; + } + + if (!records.empty()) { + consumer.commitAsync(); + } + } + + consumer.commitSync(); + + // No explicit close is needed, RAII will take care of it + // consumer.close(); +} + diff --git a/examples/example_KafkaConsumer_RebalanceEvents.cc b/examples/example_KafkaConsumer_RebalanceEvents.cc new file mode 100644 index 000000000..90f402f6b --- /dev/null +++ b/examples/example_KafkaConsumer_RebalanceEvents.cc @@ -0,0 +1,68 @@ +#include + +#include +#include +#include +#include + +int main() +{ + using namespace kafka; + using namespace kafka::clients::consumer; + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // The consumer would read all messages from the topic and then quit. + + // Prepare the configuration + const Properties props({{"bootstrap.servers", {brokers}}, + // Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event + // whenever the consumer reaches the end of a partition. + {"enable.partition.eof", {"true"}}, + // Action to take when there is no initial offset in offset store + // it means the consumer would read from the very beginning + {"auto.offset.reset", {"earliest"}}}); + + // Create a consumer instance + KafkaConsumer consumer(props); + + // Prepare the rebalance callbacks + std::atomic assignedPartitions{}; + auto rebalanceCb = [&assignedPartitions](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& tps) { + if (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned) { + assignedPartitions += tps.size(); + std::cout << "Assigned partitions: " << kafka::toString(tps) << std::endl; + } else { + assignedPartitions -= tps.size(); + std::cout << "Revoked partitions: " << kafka::toString(tps) << std::endl; + } + }; + + // Subscribe to topics with rebalance callback + consumer.subscribe({topic}, rebalanceCb); + + TopicPartitions finishedPartitions; + while (finishedPartitions.size() != assignedPartitions.load()) { + // Poll messages from Kafka brokers + auto records = consumer.poll(std::chrono::milliseconds(100)); + + for (const auto& record: records) { + if (!record.error()) { + std::cerr << record.toString() << std::endl; + } else { + if (record.error().value() == RD_KAFKA_RESP_ERR__PARTITION_EOF) { + // Record the partition which has been reached the end + finishedPartitions.emplace(record.topic(), record.partition()); + } else { + std::cerr << record.toString() << std::endl; + } + } + } + } + + // No explicit close is needed, RAII will take care of it + // consumer.close(); +} + diff --git a/examples/example_KafkaConsumer_Simple.cc b/examples/example_KafkaConsumer_Simple.cc new file mode 100644 index 000000000..5fb0924d1 --- /dev/null +++ b/examples/example_KafkaConsumer_Simple.cc @@ -0,0 +1,65 @@ +#include + +#include +#include +#include +#include + +std::atomic_bool running = {true}; + +void stopRunning(int sig) { + if (sig != SIGINT) return; + + if (running) { + running = false; + } else { + // Restore the signal handler, -- to avoid stuck with this handler + signal(SIGINT, SIG_IGN); // NOLINT + } +} + +int main() +{ + using namespace kafka; + using namespace kafka::clients::consumer; + + // Use Ctrl-C to terminate the program + signal(SIGINT, stopRunning); // NOLINT + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + const Properties props({{"bootstrap.servers", {brokers}}}); + + // Create a consumer instance + KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + while (running) { + // Poll messages from Kafka brokers + auto records = consumer.poll(std::chrono::milliseconds(100)); + + for (const auto& record: records) { + if (!record.error()) { + std::cout << "Got a new message..." << std::endl; + std::cout << " Topic : " << record.topic() << std::endl; + std::cout << " Partition: " << record.partition() << std::endl; + std::cout << " Offset : " << record.offset() << std::endl; + std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; + std::cout << " Headers : " << toString(record.headers()) << std::endl; + std::cout << " Key [" << record.key().toString() << "]" << std::endl; + std::cout << " Value [" << record.value().toString() << "]" << std::endl; + } else { + std::cerr << record.toString() << std::endl; + } + } + } + + // No explicit close is needed, RAII will take care of it + consumer.close(); +} + diff --git a/examples/example_KafkaProducer_DeepCopy.cc b/examples/example_KafkaProducer_DeepCopy.cc new file mode 100644 index 000000000..d284f39e0 --- /dev/null +++ b/examples/example_KafkaProducer_DeepCopy.cc @@ -0,0 +1,50 @@ +#include + +#include +#include +#include + + +int main() +{ + using namespace kafka; + using namespace kafka::clients::producer; + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + const Properties props({{"bootstrap.servers", brokers}}); + + // Create a producer + KafkaProducer producer(props); + + std::cout << "Type message value and hit enter to produce message... (empty line to quit)" << std::endl; + + // Get input lines and forward them to Kafka + for (std::string line; std::getline(std::cin, line); ) { + + // Empty line to quit + if (line.empty()) break; + + // Prepare a message + const ProducerRecord record(topic, NullKey, Value(line.c_str(), line.size())); + + // Prepare delivery callback + auto deliveryCb = [](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << std::endl; + } + }; + + // Send the message (deep-copy the payload) + producer.send(record, deliveryCb, KafkaProducer::SendOption::ToCopyRecordValue); + } + + // Close the producer explicitly(or not, since RAII will take care of it) + producer.close(); +} + diff --git a/examples/example_KafkaProducer_EnableManualEventsPoll.cc b/examples/example_KafkaProducer_EnableManualEventsPoll.cc new file mode 100644 index 000000000..be1f54d58 --- /dev/null +++ b/examples/example_KafkaProducer_EnableManualEventsPoll.cc @@ -0,0 +1,62 @@ +#include + +#include +#include +#include +#include + + +int main() +{ + using namespace kafka; + using namespace kafka::clients::producer; + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration (with "enable.manual.events.poll=true") + const Properties props({{"bootstrap.servers", {brokers}}, + {"enable.manual.events.poll", {"true" }}}); + + // Create a producer + KafkaProducer producer(props); + + std::cout << "Type message value and hit enter to produce message... (empty line to finish)" << std::endl; + + // Get all input lines + std::list> messages; + for (auto line = std::make_shared(); std::getline(std::cin, *line) && !line->empty();) { + messages.emplace_back(line); + } + + while (!messages.empty()) { + // Pop out a message to be sent + auto payload = messages.front(); + messages.pop_front(); + + // Prepare the message + const ProducerRecord record(topic, NullKey, Value(payload->c_str(), payload->size())); + + // Prepare the delivery callback + // Note: if fails, the message will be pushed back to the sending queue, and then retries later + auto deliveryCb = [payload, &messages](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << ", will be retried later" << std::endl; + messages.emplace_back(payload); + } + }; + + // Send the message + producer.send(record, deliveryCb); + + // Poll events (e.g. message delivery callback) + producer.pollEvents(std::chrono::milliseconds(0)); + } + + // Close the producer explicitly(or not, since RAII will take care of it) + producer.close(); +} + diff --git a/examples/example_KafkaProducer_Lifecycle.cc b/examples/example_KafkaProducer_Lifecycle.cc new file mode 100644 index 000000000..34a74fcbf --- /dev/null +++ b/examples/example_KafkaProducer_Lifecycle.cc @@ -0,0 +1,54 @@ +#include + +#include +#include +#include +#include + + +int main() +{ + using namespace kafka; + using namespace kafka::clients::producer; + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + const Properties props({{"bootstrap.servers", brokers}}); + + // Create a producer + KafkaProducer producer(props); + + std::cout << "Type message value and hit enter to produce message... (empty line to quit)" << std::endl; + + // Get input lines and forward them to Kafka + for (auto line = std::make_shared(); + std::getline(std::cin, *line); + line = std::make_shared()) { + + // Empty line to quit + if (line->empty()) break; + + // Prepare a message + const ProducerRecord record(topic, NullKey, Value(line->c_str(), line->size())); + + // Prepare delivery callback + // Note: Here we capture the shared pointer of `line`, which holds the content for `record.value()` + auto deliveryCb = [line](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << std::endl; + } + }; + + // Send the message + producer.send(record, deliveryCb); + } + + // Close the producer explicitly(or not, since RAII will take care of it) + producer.close(); +} + diff --git a/examples/example_KafkaProducer_Simple.cc b/examples/example_KafkaProducer_Simple.cc new file mode 100644 index 000000000..719b64d15 --- /dev/null +++ b/examples/example_KafkaProducer_Simple.cc @@ -0,0 +1,45 @@ +#include + +#include +#include +#include + + +int main() +{ + using namespace kafka; + using namespace kafka::clients::producer; + + // E.g. KAFKA_BROKER_LIST: "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" + const std::string brokers = getenv("KAFKA_BROKER_LIST"); // NOLINT + const Topic topic = getenv("TOPIC_FOR_TEST"); // NOLINT + + // Prepare the configuration + const Properties props({{"bootstrap.servers", brokers}}); + + // Create a producer + KafkaProducer producer(props); + + // Prepare a message + std::cout << "Type message value and hit enter to produce message..." << std::endl; + std::string line; + std::getline(std::cin, line); + + const ProducerRecord record(topic, NullKey, Value(line.c_str(), line.size())); + + // Prepare delivery callback + auto deliveryCb = [](const RecordMetadata& metadata, const Error& error) { + if (!error) { + std::cout << "Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "Message failed to be delivered: " << error.message() << std::endl; + } + }; + + // Send a message + producer.send(record, deliveryCb); + + // Close the producer explicitly(or not, since RAII will take care of it) + producer.close(); +} + diff --git a/examples/example_ProducerRecordHeaders.cc b/examples/example_ProducerRecordHeaders.cc new file mode 100644 index 000000000..64ee59d2e --- /dev/null +++ b/examples/example_ProducerRecordHeaders.cc @@ -0,0 +1,31 @@ +#include + +#include +#include + + +int main() +{ + const kafka::Topic topic = "someTopic"; + const kafka::Partition partition = 0; + + const std::string category = "categoryA"; + const std::size_t sessionId = 1; + const std::string key = "some key"; + const std::string value = "some payload"; + + + { + kafka::clients::producer::ProducerRecord record(topic, + partition, + kafka::Key{key.c_str(), key.size()}, + kafka::Value{value.c_str(), value.size()}); + record.headers() = {{ + kafka::Header{kafka::Header::Key{"Category"}, kafka::Header::Value{category.c_str(), category.size()}}, + kafka::Header{kafka::Header::Key{"SessionId"}, kafka::Header::Value{&sessionId, sizeof(sessionId)}} + }}; + + std::cout << "ProducerRecord: " << record.toString() << std::endl; + } +} + diff --git a/examples/example_Properties.cc b/examples/example_Properties.cc new file mode 100644 index 000000000..963ff1f50 --- /dev/null +++ b/examples/example_Properties.cc @@ -0,0 +1,27 @@ +#include + +#include +#include + +std::string brokers = "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092"; + +int main() +{ + { + const kafka::Properties props ({ + {"bootstrap.servers", {brokers}}, + {"enable.idempotence", {"true" }}, + }); + + std::cout << "Properties: " << props.toString() << std::endl; + } + + { + kafka::Properties props; + props.put("bootstrap.servers", brokers); + props.put("enable.idempotence", "true"); + + std::cout << "Properties: " << props.toString() << std::endl; + } +} + diff --git a/examples/kafka_async_producer_copy_payload.cc b/examples/kafka_async_producer_copy_payload.cc index a2b9e8d8d..aa860c27c 100644 --- a/examples/kafka_async_producer_copy_payload.cc +++ b/examples/kafka_async_producer_copy_payload.cc @@ -6,7 +6,9 @@ int main(int argc, char **argv) { + using namespace kafka; using namespace kafka::clients; + using namespace kafka::clients::producer; if (argc != 3) { std::cerr << "Usage: " << argv[0] << " \n"; @@ -14,14 +16,14 @@ int main(int argc, char **argv) } const std::string brokers = argv[1]; - const kafka::Topic topic = argv[2]; + const Topic topic = argv[2]; try { // Create configuration object - const kafka::Properties props ({ - {"bootstrap.servers", brokers}, - {"enable.idempotence", "true"}, + const Properties props ({ + {"bootstrap.servers", {brokers}}, + {"enable.idempotence", {"true" }}, }); // Create a producer instance @@ -32,13 +34,11 @@ int main(int argc, char **argv) for (std::string line; std::getline(std::cin, line);) { // The ProducerRecord doesn't own `line`, it is just a thin wrapper - auto record = producer::ProducerRecord(topic, - kafka::NullKey, - kafka::Value(line.c_str(), line.size())); + auto record = ProducerRecord(topic, NullKey, Value(line.c_str(), line.size())); // Send the message producer.send(record, // The delivery report handler - [](const producer::RecordMetadata& metadata, const kafka::Error& error) { + [](const RecordMetadata& metadata, const Error& error) { if (!error) { std::cout << "% Message delivered: " << metadata.toString() << std::endl; } else { @@ -53,7 +53,7 @@ int main(int argc, char **argv) // producer.close(); // No explicit close is needed, RAII will take care of it - } catch (const kafka::KafkaException& e) { + } catch (const KafkaException& e) { std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; } } diff --git a/examples/kafka_async_producer_not_copy_payload.cc b/examples/kafka_async_producer_not_copy_payload.cc index e4f941bcd..44eaa0daa 100644 --- a/examples/kafka_async_producer_not_copy_payload.cc +++ b/examples/kafka_async_producer_not_copy_payload.cc @@ -6,7 +6,9 @@ int main(int argc, char **argv) { + using namespace kafka; using namespace kafka::clients; + using namespace kafka::clients::producer; if (argc != 3) { std::cerr << "Usage: " << argv[0] << " \n"; @@ -14,14 +16,14 @@ int main(int argc, char **argv) } const std::string brokers = argv[1]; - const kafka::Topic topic = argv[2]; + const Topic topic = argv[2]; try { // Create configuration object - const kafka::Properties props ({ - {"bootstrap.servers", brokers}, - {"enable.idempotence", "true"}, + const Properties props ({ + {"bootstrap.servers", {brokers}}, + {"enable.idempotence", {"true" }}, }); // Create a producer instance @@ -34,9 +36,9 @@ int main(int argc, char **argv) std::getline(std::cin, *line); line = std::make_shared()) { // The ProducerRecord doesn't own `line`, it is just a thin wrapper - auto record = producer::ProducerRecord(topic, - kafka::NullKey, - kafka::Value(line->c_str(), line->size())); + auto record = ProducerRecord(topic, + NullKey, + Value(line->c_str(), line->size())); // Send the message producer.send(record, @@ -44,7 +46,7 @@ int main(int argc, char **argv) // Note: Here we capture the shared_pointer of `line`, // which holds the content for `record.value()`. // It makes sure the memory block is valid until the lambda finishes. - [line](const producer::RecordMetadata& metadata, const kafka::Error& error) { + [line](const RecordMetadata& metadata, const Error& error) { if (!error) { std::cout << "% Message delivered: " << metadata.toString() << std::endl; } else { @@ -57,7 +59,7 @@ int main(int argc, char **argv) // producer.close(); // No explicit close is needed, RAII will take care of it - } catch (const kafka::KafkaException& e) { + } catch (const KafkaException& e) { std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; } } diff --git a/examples/kafka_auto_commit_consumer.cc b/examples/kafka_auto_commit_consumer.cc index b4061401c..79d5bdc85 100644 --- a/examples/kafka_auto_commit_consumer.cc +++ b/examples/kafka_auto_commit_consumer.cc @@ -5,24 +5,27 @@ int main(int argc, char **argv) { + using namespace kafka; + using namespace kafka::clients; + using namespace kafka::clients::consumer; + if (argc != 3) { std::cerr << "Usage: " << argv[0] << " \n"; exit(argc == 1 ? 0 : 1); // NOLINT } const std::string brokers = argv[1]; - const kafka::Topic topic = argv[2]; + const Topic topic = argv[2]; try { // Create configuration object - const kafka::Properties props ({ - {"bootstrap.servers", brokers}, - {"enable.auto.commit", "true"} + const Properties props ({ + {"bootstrap.servers", {brokers}} }); // Create a consumer instance - kafka::clients::KafkaConsumer consumer(props); + KafkaConsumer consumer(props); // Subscribe to topics consumer.subscribe({topic}); @@ -41,7 +44,7 @@ int main(int argc, char **argv) std::cout << " Partition: " << record.partition() << std::endl; std::cout << " Offset : " << record.offset() << std::endl; std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; - std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; + std::cout << " Headers : " << toString(record.headers()) << std::endl; std::cout << " Key [" << record.key().toString() << "]" << std::endl; std::cout << " Value [" << record.value().toString() << "]" << std::endl; } else { @@ -52,7 +55,7 @@ int main(int argc, char **argv) // consumer.close(); // No explicit close is needed, RAII will take care of it - } catch (const kafka::KafkaException& e) { + } catch (const KafkaException& e) { std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; } } diff --git a/examples/kafka_manual_commit_consumer.cc b/examples/kafka_manual_commit_consumer.cc index 503f4b545..ee34d361c 100644 --- a/examples/kafka_manual_commit_consumer.cc +++ b/examples/kafka_manual_commit_consumer.cc @@ -5,23 +5,28 @@ int main(int argc, char **argv) { + using namespace kafka; + using namespace kafka::clients; + using namespace kafka::clients::consumer; + if (argc != 3) { std::cerr << "Usage: " << argv[0] << " \n"; exit(argc == 1 ? 0 : 1); // NOLINT } const std::string brokers = argv[1]; - const kafka::Topic topic = argv[2]; + const Topic topic = argv[2]; try { // Create configuration object - const kafka::Properties props ({ - {"bootstrap.servers", brokers}, + const Properties props ({ + {"bootstrap.servers", {brokers}}, + {"enable.auto.commit", {"false"}} }); // Create a consumer instance - kafka::clients::KafkaConsumer consumer(props); + KafkaConsumer consumer(props); // Subscribe to topics consumer.subscribe({topic}); @@ -47,7 +52,7 @@ int main(int argc, char **argv) std::cout << " Partition: " << record.partition() << std::endl; std::cout << " Offset : " << record.offset() << std::endl; std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; - std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; + std::cout << " Headers : " << toString(record.headers()) << std::endl; std::cout << " Key [" << record.key().toString() << "]" << std::endl; std::cout << " Value [" << record.value().toString() << "]" << std::endl; @@ -61,7 +66,7 @@ int main(int argc, char **argv) auto now = std::chrono::steady_clock::now(); if (now - lastTimeCommitted > std::chrono::seconds(1)) { // Commit offsets for messages polled - std::cout << "% syncCommit offsets: " << kafka::utility::getCurrentTime() << std::endl; + std::cout << "% syncCommit offsets: " << utility::getCurrentTime() << std::endl; consumer.commitSync(); // or commitAsync() lastTimeCommitted = now; @@ -72,7 +77,7 @@ int main(int argc, char **argv) // consumer.close(); // No explicit close is needed, RAII will take care of it - } catch (const kafka::KafkaException& e) { + } catch (const KafkaException& e) { std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; } } diff --git a/examples/kafka_sync_producer.cc b/examples/kafka_sync_producer.cc index fd4d44674..89d3078a1 100644 --- a/examples/kafka_sync_producer.cc +++ b/examples/kafka_sync_producer.cc @@ -5,7 +5,9 @@ int main(int argc, char **argv) { + using namespace kafka; using namespace kafka::clients; + using namespace kafka::clients::producer; if (argc != 3) { std::cerr << "Usage: " << argv[0] << " \n"; @@ -13,14 +15,14 @@ int main(int argc, char **argv) } const std::string brokers = argv[1]; - const kafka::Topic topic = argv[2]; + const Topic topic = argv[2]; try { // Create configuration object - const kafka::Properties props({ - {"bootstrap.servers", brokers}, - {"enable.idempotence", "true"}, + const Properties props({ + {"bootstrap.servers", {brokers}}, + {"enable.idempotence", {"true" }}, }); // Create a producer instance. @@ -31,15 +33,15 @@ int main(int argc, char **argv) for (std::string line; std::getline(std::cin, line);) { // The ProducerRecord doesn't own `line`, it is just a thin wrapper - auto record = producer::ProducerRecord(topic, - kafka::NullKey, - kafka::Value(line.c_str(), line.size())); + auto record = ProducerRecord(topic, + NullKey, + Value(line.c_str(), line.size())); // Send the message. try { - const producer::RecordMetadata metadata = producer.syncSend(record); + const RecordMetadata metadata = producer.syncSend(record); std::cout << "% Message delivered: " << metadata.toString() << std::endl; - } catch (const kafka::KafkaException& e) { + } catch (const KafkaException& e) { std::cerr << "% Message delivery failed: " << e.error().message() << std::endl; } @@ -48,7 +50,7 @@ int main(int argc, char **argv) // producer.close(); // No explicit close is needed, RAII will take care of it - } catch (const kafka::KafkaException& e) { + } catch (const KafkaException& e) { std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; } } diff --git a/include/kafka/AdminClient.h b/include/kafka/AdminClient.h index 1a136f0e0..ea4cde21b 100644 --- a/include/kafka/AdminClient.h +++ b/include/kafka/AdminClient.h @@ -18,7 +18,7 @@ #include -namespace KAFKA_API { namespace clients { +namespace KAFKA_API { namespace clients { namespace admin { /** * The administrative client for Kafka, which supports managing and inspecting topics, etc. @@ -27,11 +27,7 @@ class AdminClient: public KafkaClient { public: explicit AdminClient(const Properties& properties) - : KafkaClient(ClientType::AdminClient, - KafkaClient::validateAndReformProperties(properties), - ConfigCallbacksRegister{}, - EventsPollingOption::Auto, - Interceptors{}) + : KafkaClient(ClientType::AdminClient, KafkaClient::validateAndReformProperties(properties)) { } @@ -148,10 +144,14 @@ AdminClient::createTopics(const Topics& topics, for (const auto& conf: topicConfig.map()) { - const rd_kafka_resp_err_t err = rd_kafka_NewTopic_set_config(rkNewTopics.back().get(), conf.first.c_str(), conf.second.c_str()); + const auto& k = conf.first; + const auto& v = topicConfig.getProperty(k); + if (!v) continue; + + const rd_kafka_resp_err_t err = rd_kafka_NewTopic_set_config(rkNewTopics.back().get(), k.c_str(), v->c_str()); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { - const std::string errMsg = "Invalid config[" + conf.first + "=" + conf.second + "]"; + const std::string errMsg = "Invalid config[" + k + "=" + *v + "]"; KAFKA_API_DO_LOG(Log::Level::Err, errMsg.c_str()); return admin::CreateTopicsResult(Error{RD_KAFKA_RESP_ERR__INVALID_ARG, errMsg}); } @@ -344,5 +344,5 @@ AdminClient::deleteRecords(const TopicPartitionOffsets& topicPartitionOffsets, return admin::DeleteRecordsResult(combineErrors(errors)); } -} } // end of KAFKA_API::clients +} } } // end of KAFKA_API::clients::admin diff --git a/include/kafka/AdminClientConfig.h b/include/kafka/AdminClientConfig.h index 1c21175ef..79da48ecf 100644 --- a/include/kafka/AdminClientConfig.h +++ b/include/kafka/AdminClientConfig.h @@ -2,7 +2,7 @@ #include -#include +#include namespace KAFKA_API { namespace clients { namespace admin { @@ -10,103 +10,12 @@ namespace KAFKA_API { namespace clients { namespace admin { /** * Configuration for the Kafka Consumer. */ -class Config: public Properties +class AdminClientConfig: public Config { public: - Config() = default; - Config(const Config&) = default; - explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} - - /** - * The string contains host:port pairs of brokers (splitted by ",") that the administrative client will use to establish initial connection to the Kafka cluster. - * Note: It's mandatory. - */ - static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; - - /** - * Protocol used to communicate with brokers. - * Default value: plaintext - */ - static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; - - /** - * SASL mechanism to use for authentication. - * Default value: GSSAPI - */ - static const constexpr char* SASL_MECHANISM = "sasl.mechanisms"; - - /** - * SASL username for use with the PLAIN and SASL-SCRAM-.. mechanism. - */ - static const constexpr char* SASL_USERNAME = "sasl.username"; - - /** - * SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism. - */ - static const constexpr char* SASL_PASSWORD = "sasl.password"; - - /** - * Shell command to refresh or acquire the client's Kerberos ticket. - */ - static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; - - /** - * The client's Kerberos principal name. - */ - static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; - - /** - * Set to "default" or "oidc" to control with login method to be used. - * If set to "oidc", the following properties must also be specified: - * sasl.oauthbearer.client.id - * sasl.oauthbearer.client.secret - * sasl.oauthbearer.token.endpoint.url - * Default value: default - */ - static const constexpr char* SASL_OAUTHBEARER_METHOD = "sasl.oauthbearer.method"; - - /** - * Public identifier for the applicaition. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_CLIENT_ID = "sasl.oauthbearer.client.id"; - - /** - * Client secret only known to the application and the authorization server. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_CLIENT_SECRET = "sasl.oauthbearer.client.secret"; - - /** - * Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_EXTENSIONS = "sasl.oauthbearer.extensions"; - - /** - * Client use this to specify the scope of the access request to the broker. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; - - /** - * OAuth/OIDC issuer token endpoint HTTP(S) URI used to retreve token. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; - - /** - * SASL/OAUTHBEARER configuration. - * The format is implementation-dependent and must be parsed accordingly. - */ - static const constexpr char* SASL_OAUTHBEARER_CONFIG = "sasl.oauthbearer.config"; - - /** - * Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. - * Should only be used for development or testing, and not in production. - * Default value: false - */ - static const constexpr char* ENABLE_SASL_OAUTHBEARER_UNSECURE_JWT = "enable.sasl.oauthbearer.unsecure.jwt"; + AdminClientConfig() = default; + AdminClientConfig(const AdminClientConfig&) = default; + explicit AdminClientConfig(const PropertiesMap& kvMap): Config(kvMap) {} }; } } } // end of KAFKA_API::clients::admin diff --git a/include/kafka/ClientCommon.h b/include/kafka/ClientCommon.h new file mode 100644 index 000000000..c399d4d06 --- /dev/null +++ b/include/kafka/ClientCommon.h @@ -0,0 +1,46 @@ +#pragma once + +#include + +#include + +#include + + +namespace KAFKA_API { namespace clients { + + /** + * Callback type for logging. + */ + using LogCallback = std::function; + + /** + * Callback type for error notification. + */ + using ErrorCallback = std::function; + + /** + * Callback type for statistics info dumping. + */ + using StatsCallback = std::function; + + /** + * SASL OAUTHBEARER token info. + */ + struct SaslOauthbearerToken + { + using KeyValuePairs = std::map; + + std::string value; + std::chrono::microseconds mdLifetime{}; + std::string mdPrincipalName; + KeyValuePairs extensions; + }; + + /** + * Callback type for OAUTHBEARER token refresh. + */ + using OauthbearerTokenRefreshCallback = std::function; + +} } // end of KAFKA_API::clients + diff --git a/include/kafka/ClientConfig.h b/include/kafka/ClientConfig.h new file mode 100644 index 000000000..1f02d1389 --- /dev/null +++ b/include/kafka/ClientConfig.h @@ -0,0 +1,167 @@ +#pragma once + +#include + +#include + + +namespace KAFKA_API { namespace clients { + +/** + * Configuration for Kafka clients.. + */ +class Config: public Properties +{ +public: + Config() = default; + Config(const Config&) = default; + explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} + + /** + * To poll the events manually (otherwise, it would be done with a background polling thread). + * Note: Once it's enabled, the interface `pollEvents()` should be manually called, in order to trigger + * 1) The offset-commit callbacks, for consumers. + * 2) The message-delivery callbacks, for producers. + */ + static const constexpr char* ENABLE_MANUAL_EVENTS_POLL = "enable.manual.events.poll"; + + /** + * Log callback. + * Type: `std::function` + */ + static const constexpr char* LOG_CB = "log_cb"; + + /** + * Log callback. + * Type: `std::function` + */ + static const constexpr char* ERROR_CB = "error_cb"; + + /** + * Statistics callback. + * Type: `std::function` + */ + static const constexpr char* STATS_CB = "stats_cb"; + + /** + * OAUTHBEARER token refresh callback. + * Type: `std::function` + */ + static const constexpr char* OAUTHBEARER_TOKEN_REFRESH_CB = "oauthbearer_token_refresh_cb"; + + /** + * Interceptors for thread start/exit, brokers' state change, etc. + * Type: `Interceptors` + */ + static const constexpr char* INTERCEPTORS = "interceptors"; + + /** + * The string contains host:port pairs of brokers (splitted by ",") that the consumer will use to establish initial connection to the Kafka cluster. + * Note: It's mandatory. + */ + static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; + + /** + * Client identifier. + */ + static const constexpr char* CLIENT_ID = "client.id"; + + /** + * Log level (syslog(3) levels). + */ + static const constexpr char* LOG_LEVEL = "log_level"; + + /** + * Timeout for network requests. + * Default value: 60000 + */ + static const constexpr char* SOCKET_TIMEOUT_MS = "socket.timeout.ms"; + + /** + * Protocol used to communicate with brokers. + * Default value: plaintext + */ + static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; + + /** + * SASL mechanism to use for authentication. + * Default value: GSSAPI + */ + static const constexpr char* SASL_MECHANISM = "sasl.mechanisms"; + + /** + * SASL username for use with the PLAIN and SASL-SCRAM-.. mechanism. + */ + static const constexpr char* SASL_USERNAME = "sasl.username"; + + /** + * SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism. + */ + static const constexpr char* SASL_PASSWORD = "sasl.password"; + + /** + * Shell command to refresh or acquire the client's Kerberos ticket. + */ + static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; + + /** + * The client's Kerberos principal name. + */ + static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; + + /** + * Set to "default" or "oidc" to control with login method to be used. + * If set to "oidc", the following properties must also be specified: + * sasl.oauthbearer.client.id + * sasl.oauthbearer.client.secret + * sasl.oauthbearer.token.endpoint.url + * Default value: default + */ + static const constexpr char* SASL_OAUTHBEARER_METHOD = "sasl.oauthbearer.method"; + + /** + * Public identifier for the applicaition. + * Only used with "sasl.oauthbearer.method=oidc". + */ + static const constexpr char* SASL_OAUTHBEARER_CLIENT_ID = "sasl.oauthbearer.client.id"; + + /** + * Client secret only known to the application and the authorization server. + * Only used with "sasl.oauthbearer.method=oidc". + */ + static const constexpr char* SASL_OAUTHBEARER_CLIENT_SECRET = "sasl.oauthbearer.client.secret"; + + /** + * Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. + * Only used with "sasl.oauthbearer.method=oidc". + */ + static const constexpr char* SASL_OAUTHBEARER_EXTENSIONS = "sasl.oauthbearer.extensions"; + + /** + * Client use this to specify the scope of the access request to the broker. + * Only used with "sasl.oauthbearer.method=oidc". + */ + static const constexpr char* SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; + + /** + * OAuth/OIDC issuer token endpoint HTTP(S) URI used to retreve token. + * Only used with "sasl.oauthbearer.method=oidc". + */ + static const constexpr char* SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; + + /** + * SASL/OAUTHBEARER configuration. + * The format is implementation-dependent and must be parsed accordingly. + */ + static const constexpr char* SASL_OAUTHBEARER_CONFIG = "sasl.oauthbearer.config"; + + /** + * Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. + * Should only be used for development or testing, and not in production. + * Default value: false + */ + static const constexpr char* ENABLE_SASL_OAUTHBEARER_UNSECURE_JWT = "enable.sasl.oauthbearer.unsecure.jwt"; +}; + +} } // end of KAFKA_API::clients + diff --git a/include/kafka/ConsumerConfig.h b/include/kafka/ConsumerConfig.h index b361e7e6a..0685fbc32 100644 --- a/include/kafka/ConsumerConfig.h +++ b/include/kafka/ConsumerConfig.h @@ -2,7 +2,7 @@ #include -#include +#include namespace KAFKA_API { namespace clients { namespace consumer { @@ -10,18 +10,13 @@ namespace KAFKA_API { namespace clients { namespace consumer { /** * Configuration for the Kafka Consumer. */ -class Config: public Properties +class ConsumerConfig: public Config { public: - Config() = default; - Config(const Config&) = default; - explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} + ConsumerConfig() = default; + ConsumerConfig(const ConsumerConfig&) = default; + explicit ConsumerConfig(const PropertiesMap& kvMap): Config(kvMap) {} - /** - * The string contains host:port pairs of brokers (splitted by ",") that the consumer will use to establish initial connection to the Kafka cluster. - * Note: It's mandatory. - */ - static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; /** * Group identifier. @@ -30,11 +25,6 @@ class Config: public Properties */ static const constexpr char* GROUP_ID = "group.id"; - /** - * Client identifier. - */ - static const constexpr char* CLIENT_ID = "client.id"; - /** * Automatically commits previously polled offsets on each `poll` operation. */ @@ -74,12 +64,6 @@ class Config: public Properties */ static const constexpr char* SESSION_TIMEOUT_MS = "session.timeout.ms"; - /** - * Timeout for network requests. - * Default value: 60000 - */ - static const constexpr char* SOCKET_TIMEOUT_MS = "socket.timeout.ms"; - /** * Control how to read messages written transactionally. * Available options: read_uncommitted, read_committed @@ -94,92 +78,6 @@ class Config: public Properties * Default value: range,roundrobin */ static const constexpr char* PARTITION_ASSIGNMENT_STRATEGY = "partition.assignment.strategy"; - - /** - * Protocol used to communicate with brokers. - * Default value: plaintext - */ - static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; - - /** - * SASL mechanism to use for authentication. - * Default value: GSSAPI - */ - static const constexpr char* SASL_MECHANISM = "sasl.mechanisms"; - - /** - * SASL username for use with the PLAIN and SASL-SCRAM-.. mechanism. - */ - static const constexpr char* SASL_USERNAME = "sasl.username"; - - /** - * SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism. - */ - static const constexpr char* SASL_PASSWORD = "sasl.password"; - - /** - * Shell command to refresh or acquire the client's Kerberos ticket. - */ - static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; - - /** - * The client's Kerberos principal name. - */ - static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; - - /** - * Set to "default" or "oidc" to control with login method to be used. - * If set to "oidc", the following properties must also be specified: - * sasl.oauthbearer.client.id - * sasl.oauthbearer.client.secret - * sasl.oauthbearer.token.endpoint.url - * Default value: default - */ - static const constexpr char* SASL_OAUTHBEARER_METHOD = "sasl.oauthbearer.method"; - - /** - * Public identifier for the applicaition. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_CLIENT_ID = "sasl.oauthbearer.client.id"; - - /** - * Client secret only known to the application and the authorization server. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_CLIENT_SECRET = "sasl.oauthbearer.client.secret"; - - /** - * Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_EXTENSIONS = "sasl.oauthbearer.extensions"; - - /** - * Client use this to specify the scope of the access request to the broker. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; - - /** - * OAuth/OIDC issuer token endpoint HTTP(S) URI used to retreve token. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; - - /** - * SASL/OAUTHBEARER configuration. - * The format is implementation-dependent and must be parsed accordingly. - */ - static const constexpr char* SASL_OAUTHBEARER_CONFIG = "sasl.oauthbearer.config"; - - /** - * Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. - * Should only be used for development or testing, and not in production. - * Default value: false - */ - static const constexpr char* ENABLE_SASL_OAUTHBEARER_UNSECURE_JWT = "enable.sasl.oauthbearer.unsecure.jwt"; - }; } } } // end of KAFKA_API::clients::consumer diff --git a/include/kafka/Interceptors.h b/include/kafka/Interceptors.h index b9994631f..fbf4b60bd 100644 --- a/include/kafka/Interceptors.h +++ b/include/kafka/Interceptors.h @@ -7,24 +7,68 @@ namespace KAFKA_API { namespace clients { +/** + * Interceptors for Kafka clients. + */ class Interceptors { public: - using ThreadStartCallback = std::function; - using ThreadExitCallback = std::function; + /** + * Callback type for thread-start interceptor. + */ + using ThreadStartCb = std::function; - Interceptors& onThreadStart(ThreadStartCallback cb) { _valid = true; _threadStartCb = std::move(cb); return *this; } - Interceptors& onThreadExit(ThreadExitCallback cb) { _valid = true; _threadExitCb = std::move(cb); return *this; } + /** + * Callback type for thread-exit interceptor. + */ + using ThreadExitCb = std::function; - ThreadStartCallback onThreadStart() const { return _threadStartCb; } - ThreadExitCallback onThreadExit() const { return _threadExitCb; } + /** + * Callback type for broker-state-change interceptor. + */ + using BrokerStateChangeCb = std::function; + /** + * Set interceptor for thread start. + */ + Interceptors& onThreadStart(ThreadStartCb cb) { _valid = true; _threadStartCb = std::move(cb); return *this; } + + /** + * Set interceptor for thread exit. + */ + Interceptors& onThreadExit(ThreadExitCb cb) { _valid = true; _threadExitCb = std::move(cb); return *this; } + + /** + * Set interceptor for broker state change. + */ + Interceptors& onBrokerStateChange(BrokerStateChangeCb cb) { _valid = true; _brokerStateChangeCb = std::move(cb); return *this; } + + /** + * Get interceptor for thread start. + */ + ThreadStartCb onThreadStart() const { return _threadStartCb; } + + /** + * Get interceptor for thread exit. + */ + ThreadExitCb onThreadExit() const { return _threadExitCb; } + + /** + * Get interceptor for broker state change. + */ + BrokerStateChangeCb onBrokerStateChange() const { return _brokerStateChangeCb; } + + /** + * Check if there's no interceptor. + */ bool empty() const { return !_valid; } private: - ThreadStartCallback _threadStartCb; - ThreadExitCallback _threadExitCb; - bool _valid = false; + ThreadStartCb _threadStartCb; + ThreadExitCb _threadExitCb; + BrokerStateChangeCb _brokerStateChangeCb; + + bool _valid = false; }; } } // end of KAFKA_API::clients diff --git a/include/kafka/KafkaClient.h b/include/kafka/KafkaClient.h index 38017756a..36d9a368c 100644 --- a/include/kafka/KafkaClient.h +++ b/include/kafka/KafkaClient.h @@ -3,6 +3,8 @@ #include #include +#include +#include #include #include #include @@ -18,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -32,11 +33,6 @@ namespace KAFKA_API { namespace clients { class KafkaClient { public: - /** - * The option shows whether user wants to call `pollEvents()` manually to trigger internal callbacks. - */ - enum class EventsPollingOption { Manual, Auto }; - virtual ~KafkaClient() = default; /** @@ -49,57 +45,11 @@ class KafkaClient */ const std::string& name() const { return _clientName; } - /** - * Set a log callback for kafka clients, which do not have a client specific logging callback configured (see `setLogger`). - */ - static void setGlobalLogger(Logger logger = NullLogger) - { - std::call_once(Global<>::initOnce, [](){}); // Then no need to init within KafkaClient constructor - Global<>::logger = std::move(logger); - } - - /** - * Set the log callback for the kafka client (it's a per-client setting). - */ - void setLogger(Logger logger) { _logger = std::move(logger); } - /** * Set log level for the kafka client (the default value: 5). */ void setLogLevel(int level); - /** - * Callback type for statistics info dumping. - */ - using StatsCallback = std::function; - - /** - * Set callback to receive the periodic statistics info. - * Note: 1) It only works while the "statistics.interval.ms" property is configured with a non-0 value. - * 2) The callback would be triggered periodically, receiving the internal statistics info (with JSON format) emited from librdkafka. - */ - void setStatsCallback(StatsCallback cb) { _statsCb = std::move(cb); } - - /** - * Callback type for error notification. - */ - using ErrorCallback = std::function; - - /** - * Set callback for error notification. - */ - void setErrorCallback(ErrorCallback cb) { _errorCb = std::move(cb); } - - /** - * Callback type for OAUTHBEARER token refresh. - */ - using OauthbearerTokenRefreshCallback = std::function; - - /** - * Set callback for OAUTHBEARER token refresh. - */ - void setOauthbearerTokernRefreshCallback(OauthbearerTokenRefreshCallback cb) { _oauthbearerTokenRefreshCb = std::move(cb); } - /** * Return the properties which took effect. */ @@ -112,7 +62,7 @@ class KafkaClient /** * Call the OffsetCommit callbacks (if any) - * Note: The Kafka client should be constructed with option `EventsPollingOption::Manual`. + * Note: The Kafka client should be constructed with option `enable.manual.events.poll=true`! */ void pollEvents(std::chrono::milliseconds timeout) { @@ -130,12 +80,11 @@ class KafkaClient template void doLog(int level, const char* filename, int lineno, const char* format, Args... args) const { - const auto& logger = _logger ? _logger : Global<>::logger; - if (level >= 0 && level <= _logLevel && logger) + if (level >= 0 && level <= _logLevel && _logCb) { LogBuffer logBuffer; logBuffer.print("%s ", name().c_str()).print(format, args...); - logger(level, filename, lineno, logBuffer.c_str()); + _logCb(level, filename, lineno, logBuffer.c_str()); } } @@ -146,28 +95,6 @@ class KafkaClient #define KAFKA_API_DO_LOG(lvl, ...) doLog(lvl, __FILE__, __LINE__, ##__VA_ARGS__) - template - static void doGlobalLog(int level, const char* filename, int lineno, const char* format, Args... args) - { - if (!Global<>::logger) return; - - LogBuffer logBuffer; - logBuffer.print(format, args...); - Global<>::logger(level, filename, lineno, logBuffer.c_str()); - } - static void doGlobalLog(int level, const char* filename, int lineno, const char* msg) - { - doGlobalLog(level, filename, lineno, "%s", msg); - } - -/** - * Log for kafka clients, with the callback which `setGlobalLogger` assigned. - * - * E.g, - * KAFKA_API_LOG(Log::Level::Err, "something wrong happened! %s", detailedInfo.c_str()); - */ -#define KAFKA_API_LOG(lvl, ...) KafkaClient::doGlobalLog(lvl, __FILE__, __LINE__, ##__VA_ARGS__) - #if COMPILER_SUPPORTS_CPP_17 static constexpr int DEFAULT_METADATA_TIMEOUT_MS = 10000; #else @@ -182,9 +109,7 @@ class KafkaClient KafkaClient(ClientType clientType, const Properties& properties, - const ConfigCallbacksRegister& extraConfigRegister, - EventsPollingOption eventsPollingOption, - Interceptors interceptors); + const ConfigCallbacksRegister& extraConfigRegister = ConfigCallbacksRegister{}); rd_kafka_t* getClientHandle() const { return _rk.get(); } @@ -198,20 +123,19 @@ class KafkaClient return ms > std::chrono::milliseconds(INT_MAX) ? TIMEOUT_INFINITE : static_cast(ms.count()); } + void setLogCallback(LogCallback cb) { _logCb = std::move(cb); } + void setStatsCallback(StatsCallback cb) { _statsCb = std::move(cb); } + void setErrorCallback(ErrorCallback cb) { _errorCb = std::move(cb); } + void setOauthbearerTokenRefreshCallback(OauthbearerTokenRefreshCallback cb) { _oauthbearerTokenRefreshCb = std::move(cb); } + + void setInterceptors(Interceptors interceptors) { _interceptors = std::move(interceptors); } + // Show whether it's using automatical events polling - bool isWithAutoEventsPolling() const { return _eventsPollingOption == EventsPollingOption::Auto; } + bool isWithAutoEventsPolling() const { return !_enableManualEventsPoll; } // Buffer size for single line logging static const constexpr int LOG_BUFFER_SIZE = 1024; - // Global logger - template - struct Global - { - static Logger logger; - static std::once_flag initOnce; - }; - // Validate properties (and fix it if necesary) static Properties validateAndReformProperties(const Properties& properties); @@ -232,13 +156,13 @@ class KafkaClient std::string _clientName; std::atomic _logLevel = {Log::Level::Notice}; - Logger _logger; + LogCallback _logCb = DefaultLogger; StatsCallback _statsCb; ErrorCallback _errorCb; OauthbearerTokenRefreshCallback _oauthbearerTokenRefreshCb; - EventsPollingOption _eventsPollingOption; + bool _enableManualEventsPoll = false; Interceptors _interceptors; rd_kafka_unique_ptr _rk; @@ -265,6 +189,7 @@ class KafkaClient static rd_kafka_resp_err_t configInterceptorOnNew(rd_kafka_t* rk, const rd_kafka_conf_t* conf, void* opaque, char* errStr, std::size_t maxErrStrSize); static rd_kafka_resp_err_t interceptorOnThreadStart(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* opaque); static rd_kafka_resp_err_t interceptorOnThreadExit(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* opaque); + static rd_kafka_resp_err_t interceptorOnBrokerStateChange(rd_kafka_t* rk, int id, const char* secproto, const char* host, int port, const char* state, void* opaque); // Log callback (for class instance) void onLog(int level, const char* fac, const char* buf) const; @@ -281,10 +206,7 @@ class KafkaClient // Interceptor callback (for class instance) void interceptThreadStart(const std::string& threadName, const std::string& threadType); void interceptThreadExit(const std::string& threadName, const std::string& threadType); - - static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; - static const constexpr char* CLIENT_ID = "client.id"; - static const constexpr char* LOG_LEVEL = "log_level"; + void interceptBrokerStateChange(int id, const std::string& secproto, const std::string& host, int port, const std::string& state); protected: struct Pollable @@ -366,35 +288,29 @@ class KafkaClient std::unique_ptr _pollThread; }; -template -Logger KafkaClient::Global::logger; - -template -std::once_flag KafkaClient::Global::initOnce; inline KafkaClient::KafkaClient(ClientType clientType, const Properties& properties, - const ConfigCallbacksRegister& extraConfigRegister, - EventsPollingOption eventsPollingOption, - Interceptors interceptors) - : _eventsPollingOption(eventsPollingOption), - _interceptors(std::move(interceptors)) + const ConfigCallbacksRegister& extraConfigRegister) { - static const std::set PRIVATE_PROPERTY_KEYS = { "max.poll.records" }; + static const std::set PRIVATE_PROPERTY_KEYS = { "max.poll.records", "enable.manual.events.poll" }; // Save clientID - if (auto clientId = properties.getProperty(CLIENT_ID)) + if (auto clientId = properties.getProperty(Config::CLIENT_ID)) { _clientId = *clientId; _clientName = getClientTypeString(clientType) + "[" + _clientId + "]"; } - // Init global logger - std::call_once(Global<>::initOnce, [](){ Global<>::logger = DefaultLogger; }); + // Log Callback + if (properties.contains("log_cb")) + { + setLogCallback(properties.get("log_cb")); + } // Save LogLevel - if (auto logLevel = properties.getProperty(LOG_LEVEL)) + if (auto logLevel = properties.getProperty(Config::LOG_LEVEL)) { try { @@ -411,12 +327,33 @@ KafkaClient::KafkaClient(ClientType clientType, } } + // Save "enable.manual.events.poll" option + if (auto enableManualEventsPoll = properties.getProperty(Config::ENABLE_MANUAL_EVENTS_POLL)) + { + if (*enableManualEventsPoll == "true" || *enableManualEventsPoll == "t" || *enableManualEventsPoll == "1") + { + _enableManualEventsPoll = true; + } + else if (*enableManualEventsPoll == "false" || *enableManualEventsPoll == "f" || *enableManualEventsPoll == "0") + { + _enableManualEventsPoll = false; + } + else + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG, std::string("Invalid option[" + *enableManualEventsPoll + "] for \"enable.manual.events.poll\", which must be a bool value (true or false)!"))); + } + } + LogBuffer errInfo; auto rk_conf = rd_kafka_conf_unique_ptr(rd_kafka_conf_new()); for (const auto& prop: properties.map()) { + const auto& k = prop.first; + const auto& v = properties.getProperty(k); + if (!v) continue; + // Those private properties are only available for `C++ wrapper`, not for librdkafka if (PRIVATE_PROPERTY_KEYS.count(prop.first)) { @@ -425,8 +362,8 @@ KafkaClient::KafkaClient(ClientType clientType, } const rd_kafka_conf_res_t result = rd_kafka_conf_set(rk_conf.get(), - prop.first.c_str(), - prop.second.c_str(), + k.c_str(), + v->c_str(), errInfo.str(), errInfo.capacity()); if (result == RD_KAFKA_CONF_OK) @@ -435,7 +372,7 @@ KafkaClient::KafkaClient(ClientType clientType, } else { - KAFKA_API_DO_LOG(Log::Level::Err, "failed to be initialized with property[%s:%s], result[%d]", prop.first.c_str(), prop.second.c_str(), result); + KAFKA_API_DO_LOG(Log::Level::Err, "failed to be initialized with property[%s:%s], result[%d]", k.c_str(), v->c_str(), result); } } @@ -443,27 +380,46 @@ KafkaClient::KafkaClient(ClientType clientType, rd_kafka_conf_set_opaque(rk_conf.get(), this); // Log Callback - rd_kafka_conf_set_log_cb(rk_conf.get(), KafkaClient::logCallback); + if (properties.contains("log_cb")) + { + rd_kafka_conf_set_log_cb(rk_conf.get(), KafkaClient::logCallback); + } // Statistics Callback - rd_kafka_conf_set_stats_cb(rk_conf.get(), KafkaClient::statsCallback); + if (properties.contains("stats_cb")) + { + setStatsCallback(properties.get("stats_cb")); + + rd_kafka_conf_set_stats_cb(rk_conf.get(), KafkaClient::statsCallback); + } // Error Callback - rd_kafka_conf_set_error_cb(rk_conf.get(), KafkaClient::errorCallback); + if (properties.contains("error_cb")) + { + setErrorCallback(properties.get("error_cb")); + + rd_kafka_conf_set_error_cb(rk_conf.get(), KafkaClient::errorCallback); + } // OAUTHBEARER Toker Refresh Callback - rd_kafka_conf_set_oauthbearer_token_refresh_cb(rk_conf.get(), KafkaClient::oauthbearerTokenRefreshCallback); + if (properties.contains("oauthbearer_token_refresh_cb")) + { + setOauthbearerTokenRefreshCallback(properties.get("oauthbearer_token_refresh_cb")); - // Other Callbacks - if (extraConfigRegister) extraConfigRegister(rk_conf.get()); + rd_kafka_conf_set_oauthbearer_token_refresh_cb(rk_conf.get(), KafkaClient::oauthbearerTokenRefreshCallback); + } // Interceptor - if (!_interceptors.empty()) + if (properties.contains("interceptors")) { + setInterceptors(properties.get("interceptors")); + const Error result{ rd_kafka_conf_interceptor_add_on_new(rk_conf.get(), "on_new", KafkaClient::configInterceptorOnNew, nullptr) }; KAFKA_THROW_IF_WITH_ERROR(result); } + // Other Callbacks + if (extraConfigRegister) extraConfigRegister(rk_conf.get()); // Set client handler _rk.reset(rd_kafka_new((clientType == ClientType::KafkaConsumer ? RD_KAFKA_CONSUMER : RD_KAFKA_PRODUCER), @@ -473,7 +429,7 @@ KafkaClient::KafkaClient(ClientType clientType, KAFKA_THROW_IF_WITH_ERROR(Error(rd_kafka_last_error())); // Add brokers - auto brokers = properties.getProperty(BOOTSTRAP_SERVERS); + auto brokers = properties.getProperty(Config::BOOTSTRAP_SERVERS); if (!brokers || rd_kafka_brokers_add(getClientHandle(), brokers->c_str()) == 0) { KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG,\ @@ -489,22 +445,22 @@ KafkaClient::validateAndReformProperties(const Properties& properties) auto newProperties = properties; // BOOTSTRAP_SERVERS property is mandatory - if (!newProperties.getProperty(BOOTSTRAP_SERVERS)) + if (!newProperties.getProperty(Config::BOOTSTRAP_SERVERS)) { KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG,\ - "Validation failed! With no property [" + std::string(BOOTSTRAP_SERVERS) + "]")); + "Validation failed! With no property [" + std::string(Config::BOOTSTRAP_SERVERS) + "]")); } // If no "client.id" configured, generate a random one for user - if (!newProperties.getProperty(CLIENT_ID)) + if (!newProperties.getProperty(Config::CLIENT_ID)) { - newProperties.put(CLIENT_ID, utility::getRandomString()); + newProperties.put(Config::CLIENT_ID, utility::getRandomString()); } // If no "log_level" configured, use Log::Level::Notice as default - if (!newProperties.getProperty(LOG_LEVEL)) + if (!newProperties.getProperty(Config::LOG_LEVEL)) { - newProperties.put(LOG_LEVEL, std::to_string(static_cast(Log::Level::Notice))); + newProperties.put(Config::LOG_LEVEL, std::to_string(static_cast(Log::Level::Notice))); } return newProperties; @@ -516,10 +472,10 @@ KafkaClient::getProperty(const std::string& name) const // Find it in pre-saved properties if (auto property = _properties.getProperty(name)) return *property; - constexpr int DEFAULT_BUF_SIZE = 512; - const rd_kafka_conf_t* conf = rd_kafka_conf(getClientHandle()); + constexpr int DEFAULT_BUF_SIZE = 512; + std::vector valueBuf(DEFAULT_BUF_SIZE); std::size_t valueSize = valueBuf.size(); @@ -654,6 +610,12 @@ KafkaClient::interceptThreadExit(const std::string& threadName, const std::strin if (const auto& cb = _interceptors.onThreadExit()) cb(threadName, threadType); } +inline void +KafkaClient::interceptBrokerStateChange(int id, const std::string& secproto, const std::string& host, int port, const std::string& state) +{ + if (const auto& cb = _interceptors.onBrokerStateChange()) cb(id, secproto, host, port, state); +} + inline rd_kafka_resp_err_t KafkaClient::configInterceptorOnNew(rd_kafka_t* rk, const rd_kafka_conf_t* /*conf*/, void* opaque, char* /*errStr*/, std::size_t /*maxErrStrSize*/) { @@ -667,11 +629,16 @@ KafkaClient::configInterceptorOnNew(rd_kafka_t* rk, const rd_kafka_conf_t* /*con return result; } + if (auto result = rd_kafka_interceptor_add_on_broker_state_change(rk, "on_broker_state_change", KafkaClient::interceptorOnBrokerStateChange, opaque)) + { + return result; + } + return RD_KAFKA_RESP_ERR_NO_ERROR; } inline rd_kafka_resp_err_t -KafkaClient::interceptorOnThreadStart(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /*opaque*/) +KafkaClient::interceptorOnThreadStart(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /* opaque */) { kafkaClient(rk).interceptThreadStart(threadName, toString(threadType)); @@ -679,13 +646,21 @@ KafkaClient::interceptorOnThreadStart(rd_kafka_t* rk, rd_kafka_thread_type_t thr } inline rd_kafka_resp_err_t -KafkaClient::interceptorOnThreadExit(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /*opaque*/) +KafkaClient::interceptorOnThreadExit(rd_kafka_t* rk, rd_kafka_thread_type_t threadType, const char* threadName, void* /* opaque */) { kafkaClient(rk).interceptThreadExit(threadName, toString(threadType)); return RD_KAFKA_RESP_ERR_NO_ERROR; } +inline rd_kafka_resp_err_t +KafkaClient::interceptorOnBrokerStateChange(rd_kafka_t* rk, int id, const char* secproto, const char* host, int port, const char* state, void* /* opaque */) +{ + kafkaClient(rk).interceptBrokerStateChange(id, secproto, host, port, state); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + inline Optional KafkaClient::fetchBrokerMetadata(const std::string& topic, std::chrono::milliseconds timeout, bool disableErrorLogging) { diff --git a/include/kafka/KafkaConsumer.h b/include/kafka/KafkaConsumer.h index b9ed40fbc..32dccd597 100644 --- a/include/kafka/KafkaConsumer.h +++ b/include/kafka/KafkaConsumer.h @@ -17,7 +17,7 @@ #include -namespace KAFKA_API { namespace clients { +namespace KAFKA_API { namespace clients { namespace consumer { /** * KafkaConsumer class. @@ -31,17 +31,11 @@ class KafkaConsumer: public KafkaClient /** * The constructor for KafkaConsumer. * - * Options: - * - EventsPollingOption::Auto (default) : An internal thread would be started for OffsetCommit callbacks handling. - * - EventsPollingOption::Maunal : User have to call the member function `pollEvents()` to trigger OffsetCommit callbacks. - * * Throws KafkaException with errors: * - RD_KAFKA_RESP_ERR__INVALID_ARG : Invalid BOOTSTRAP_SERVERS property * - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE: Fail to create internal threads */ - explicit KafkaConsumer(const Properties& properties, - EventsPollingOption eventsPollingOption = EventsPollingOption::Auto, - const Interceptors& interceptors = Interceptors{}); + explicit KafkaConsumer(const Properties& properties); /** * The destructor for KafkaConsumer. @@ -219,16 +213,6 @@ class KafkaConsumer: public KafkaClient */ std::vector poll(std::chrono::milliseconds timeout); - /** - * Fetch data for the topics or partitions specified using one of the subscribe/assign APIs. - * Returns the number of polled records (which have been saved into parameter `output`). - * Note: 1) The result could be fetched through ConsumerRecord (with member function `error`). - * 2) Make sure the `ConsumerRecord` be destructed before the `KafkaConsumer.close()`. - * Throws KafkaException with errors: - * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: Unknow partition - */ - std::size_t poll(std::chrono::milliseconds timeout, std::vector& output); - /** * Suspend fetching from the requested partitions. Future calls to poll() will not return any records from these partitions until they have been resumed using resume(). * Note: 1) After pausing, the application still need to call `poll()` at regular intervals. @@ -301,8 +285,8 @@ class KafkaConsumer: public KafkaClient std::string _groupId; - std::size_t _maxPollRecords = 500; // From "max.poll.records" property, and here is the default for batch-poll - bool _enableAutoCommit = false; // From "enable.auto.commit" property + std::size_t _maxPollRecords = 500; // From "max.poll.records" property, and here is the default for batch-poll + bool _enableAutoCommit = true; // From "enable.auto.commit" property rd_kafka_queue_unique_ptr _rk_queue; @@ -326,8 +310,6 @@ class KafkaConsumer: public KafkaClient // Register Callbacks for rd_kafka_conf_t static void registerConfigCallbacks(rd_kafka_conf_t* conf); - void pollMessages(int timeoutMs, std::vector& output); - enum class PauseOrResumeOperation { Pause, Resume }; void pauseOrResumePartitions(const TopicPartitions& topicPartitions, PauseOrResumeOperation op); @@ -354,20 +336,22 @@ class KafkaConsumer: public KafkaClient inline Properties KafkaConsumer::validateAndReformProperties(Properties properties) { + using namespace consumer; + // Don't pass the "max.poll.records" property to librdkafka - properties.remove(consumer::Config::MAX_POLL_RECORDS); + properties.remove(ConsumerConfig::MAX_POLL_RECORDS); // Let the base class validate first auto newProperties = KafkaClient::validateAndReformProperties(properties); // If no "group.id" configured, generate a random one for user - if (!newProperties.getProperty(consumer::Config::GROUP_ID)) + if (!newProperties.getProperty(ConsumerConfig::GROUP_ID)) { - newProperties.put(consumer::Config::GROUP_ID, utility::getRandomString()); + newProperties.put(ConsumerConfig::GROUP_ID, utility::getRandomString()); } // Disable the internal auto-commit from librdkafka, since we want to customize the behavior - newProperties.put(consumer::Config::ENABLE_AUTO_COMMIT, "false"); + newProperties.put(ConsumerConfig::ENABLE_AUTO_COMMIT, "false"); newProperties.put(AUTO_COMMIT_INTERVAL_MS, "0"); newProperties.put(ENABLE_AUTO_OFFSET_STORE, "true"); @@ -384,25 +368,21 @@ KafkaConsumer::registerConfigCallbacks(rd_kafka_conf_t* conf) } inline -KafkaConsumer::KafkaConsumer(const Properties& properties, - EventsPollingOption eventsPollingOption, - const Interceptors& interceptors) - : KafkaClient(ClientType::KafkaConsumer, - validateAndReformProperties(properties), - registerConfigCallbacks, - eventsPollingOption, - interceptors) +KafkaConsumer::KafkaConsumer(const Properties& properties) + : KafkaClient(ClientType::KafkaConsumer, validateAndReformProperties(properties), registerConfigCallbacks) { + using namespace consumer; + // Pick up the "max.poll.records" property - if (auto maxPollRecordsProperty = properties.getProperty(consumer::Config::MAX_POLL_RECORDS)) + if (auto maxPollRecordsProperty = properties.getProperty(ConsumerConfig::MAX_POLL_RECORDS)) { const std::string maxPollRecords = *maxPollRecordsProperty; _maxPollRecords = static_cast(std::stoi(maxPollRecords)); } - _properties.put(consumer::Config::MAX_POLL_RECORDS, std::to_string(_maxPollRecords)); + _properties.put(ConsumerConfig::MAX_POLL_RECORDS, std::to_string(_maxPollRecords)); // Pick up the "enable.auto.commit" property - if (auto enableAutoCommitProperty = properties.getProperty(consumer::Config::ENABLE_AUTO_COMMIT)) + if (auto enableAutoCommitProperty = properties.getProperty(ConsumerConfig::ENABLE_AUTO_COMMIT)) { const std::string enableAutoCommit = *enableAutoCommitProperty; @@ -416,10 +396,10 @@ KafkaConsumer::KafkaConsumer(const Properties& properties, _enableAutoCommit = isTrue(enableAutoCommit); } - _properties.put(consumer::Config::ENABLE_AUTO_COMMIT, (_enableAutoCommit ? "true" : "false")); + _properties.put(ConsumerConfig::ENABLE_AUTO_COMMIT, (_enableAutoCommit ? "true" : "false")); // Fetch groupId from reformed configuration - auto groupId = _properties.getProperty(consumer::Config::GROUP_ID); + auto groupId = _properties.getProperty(ConsumerConfig::GROUP_ID); assert(groupId); setGroupId(*groupId); @@ -828,45 +808,28 @@ KafkaConsumer::storeOffsetsIfNecessary(const std::vector& output) +// Fetch messages +inline std::vector +KafkaConsumer::poll(std::chrono::milliseconds timeout) { // Commit the offsets for these messages which had been polled last time (for "enable.auto.commit=true" case) commitStoredOffsetsIfNecessary(CommitType::Async); // Poll messages with librdkafka's API std::vector msgPtrArray(_maxPollRecords); - auto msgReceived = rd_kafka_consume_batch_queue(_rk_queue.get(), timeoutMs, msgPtrArray.data(), _maxPollRecords); + auto msgReceived = rd_kafka_consume_batch_queue(_rk_queue.get(), convertMsDurationToInt(timeout), msgPtrArray.data(), _maxPollRecords); if (msgReceived < 0) { KAFKA_THROW_ERROR(Error(rd_kafka_last_error())); } // Wrap messages with ConsumerRecord - output.clear(); - output.reserve(static_cast(msgReceived)); - std::for_each(msgPtrArray.begin(), msgPtrArray.begin() + msgReceived, [&output](rd_kafka_message_t* rkMsg) { output.emplace_back(rkMsg); }); + std::vector records(msgPtrArray.begin(), msgPtrArray.begin() + msgReceived); // Store the offsets for all these polled messages (for "enable.auto.commit=true" case) - storeOffsetsIfNecessary(output); -} + storeOffsetsIfNecessary(records); -// Fetch messages (return via return value) -inline std::vector -KafkaConsumer::poll(std::chrono::milliseconds timeout) -{ - std::vector result; - poll(timeout, result); - return result; -} - -// Fetch messages (return via input parameter) -inline std::size_t -KafkaConsumer::poll(std::chrono::milliseconds timeout, std::vector& output) -{ - pollMessages(convertMsDurationToInt(timeout), output); - return output.size(); + return records; } inline void @@ -1067,5 +1030,5 @@ KafkaConsumer::commitAsync(const consumer::OffsetCommitCallback& offsetCommitCal commitAsync(TopicPartitionOffsets(), offsetCommitCallback); } -} } // end of KAFKA_API::clients +} } } // end of KAFKA_API::clients::consumer diff --git a/include/kafka/KafkaProducer.h b/include/kafka/KafkaProducer.h index fc0459bc7..6012fd52a 100644 --- a/include/kafka/KafkaProducer.h +++ b/include/kafka/KafkaProducer.h @@ -20,7 +20,7 @@ #include -namespace KAFKA_API { namespace clients { +namespace KAFKA_API { namespace clients { namespace producer { /** * KafkaProducer class. @@ -31,17 +31,11 @@ class KafkaProducer: public KafkaClient /** * The constructor for KafkaProducer. * - * Options: - * - EventsPollingOption::Auto (default) : An internal thread would be started for MessageDelivery callbacks handling. - * - EventsPollingOption::Manual : User have to call the member function `pollEvents()` to trigger MessageDelivery callbacks. - * * Throws KafkaException with errors: * - RD_KAFKA_RESP_ERR__INVALID_ARG : Invalid BOOTSTRAP_SERVERS property * - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE: Fail to create internal threads */ - explicit KafkaProducer(const Properties& properties, - EventsPollingOption eventsPollingOption = EventsPollingOption::Auto, - const Interceptors& interceptors = Interceptors{}); + explicit KafkaProducer(const Properties& properties); /** * The destructor for KafkaProducer. @@ -222,14 +216,8 @@ class KafkaProducer: public KafkaClient }; inline -KafkaProducer::KafkaProducer(const Properties& properties, - EventsPollingOption eventsPollingOption, - const Interceptors& interceptors) - : KafkaClient(ClientType::KafkaProducer, - validateAndReformProperties(properties), - registerConfigCallbacks, - eventsPollingOption, - interceptors) +KafkaProducer::KafkaProducer(const Properties& properties) + : KafkaClient(ClientType::KafkaProducer, validateAndReformProperties(properties), registerConfigCallbacks) { // Start background polling (if needed) startBackgroundPollingIfNecessary([this](int timeoutMs){ pollCallbacks(timeoutMs); }); @@ -267,12 +255,14 @@ KafkaProducer::registerConfigCallbacks(rd_kafka_conf_t* conf) inline Properties KafkaProducer::validateAndReformProperties(const Properties& properties) { + using namespace producer; + // Let the base class validate first auto newProperties = KafkaClient::validateAndReformProperties(properties); // Check whether it's an available partitioner const std::set availPartitioners = {"murmur2_random", "murmur2", "random", "consistent", "consistent_random", "fnv1a", "fnv1a_random"}; - auto partitioner = newProperties.getProperty(producer::Config::PARTITIONER); + auto partitioner = newProperties.getProperty(ProducerConfig::PARTITIONER); if (partitioner && !availPartitioners.count(*partitioner)) { std::string errMsg = "Invalid partitioner [" + *partitioner + "]! Valid options: "; @@ -288,10 +278,10 @@ KafkaProducer::validateAndReformProperties(const Properties& properties) // For "idempotence" feature constexpr int KAFKA_IDEMP_MAX_INFLIGHT = 5; - const auto enableIdempotence = newProperties.getProperty(producer::Config::ENABLE_IDEMPOTENCE); + const auto enableIdempotence = newProperties.getProperty(ProducerConfig::ENABLE_IDEMPOTENCE); if (enableIdempotence && *enableIdempotence == "true") { - if (const auto maxInFlight = newProperties.getProperty(producer::Config::MAX_IN_FLIGHT)) + if (const auto maxInFlight = newProperties.getProperty(ProducerConfig::MAX_IN_FLIGHT)) { if (std::stoi(*maxInFlight) > KAFKA_IDEMP_MAX_INFLIGHT) { @@ -300,7 +290,7 @@ KafkaProducer::validateAndReformProperties(const Properties& properties) } } - if (const auto acks = newProperties.getProperty(producer::Config::ACKS)) + if (const auto acks = newProperties.getProperty(ProducerConfig::ACKS)) { if (*acks != "all" && *acks != "-1") { @@ -508,5 +498,5 @@ KafkaProducer::sendOffsetsToTransaction(const TopicPartitionOffsets& t KAFKA_THROW_IF_WITH_ERROR(result); } -} } // end of KAFKA_API::clients +} } } // end of KAFKA_API::clients::producer diff --git a/include/kafka/Log.h b/include/kafka/Log.h index f6f65a5fb..bbc0985dd 100644 --- a/include/kafka/Log.h +++ b/include/kafka/Log.h @@ -9,6 +9,7 @@ #include #include #include +#include namespace KAFKA_API { @@ -36,6 +37,8 @@ struct Log } }; + +// Log Buffer template class LogBuffer { @@ -72,17 +75,66 @@ class LogBuffer char* _wptr; }; -using Logger = std::function; +// Default Logger inline void DefaultLogger(int level, const char* /*filename*/, int /*lineno*/, const char* msg) { std::cout << "[" << utility::getCurrentTime() << "]" << Log::levelString(static_cast(level)) << " " << msg; std::cout << std::endl; } +// Null Logger inline void NullLogger(int /*level*/, const char* /*filename*/, int /*lineno*/, const char* /*msg*/) { } + +// Global Logger +template +struct GlobalLogger +{ + static clients::LogCallback logCb; + static std::once_flag initOnce; + + static const constexpr int LOG_BUFFER_SIZE = 1024; + + template + static void doLog(int level, const char* filename, int lineno, const char* format, Args... args) + { + if (!GlobalLogger<>::logCb) return; + + LogBuffer logBuffer; + logBuffer.print(format, args...); + GlobalLogger<>::logCb(level, filename, lineno, logBuffer.c_str()); + } +}; + +template +clients::LogCallback GlobalLogger::logCb; + +template +std::once_flag GlobalLogger::initOnce; + +/** + * Set a global log interface for kafka API (Note: it takes no effect on Kafka clients). + */ +inline void setGlobalLogger(clients::LogCallback cb) +{ + std::call_once(GlobalLogger<>::initOnce, [](){}); // Then no need to init within the first KAFKA_API_LOG call. + GlobalLogger<>::logCb = std::move(cb); +} + +/** + * Log for kafka API (Note: not for Kafka client instances). + * + * E.g, + * KAFKA_API_LOG(Log::Level::Err, "something wrong happened! %s", detailedInfo.c_str()); + */ +#define KAFKA_API_LOG(level, ...) do { \ + std::call_once(GlobalLogger<>::initOnce, [](){ GlobalLogger<>::logCb = DefaultLogger; }); \ + GlobalLogger<>::doLog(level, __FILE__, __LINE__, ##__VA_ARGS__); \ +} while (0) + + } // end of KAFKA_API diff --git a/include/kafka/ProducerConfig.h b/include/kafka/ProducerConfig.h index 42b52ed1a..e9ecb88fa 100644 --- a/include/kafka/ProducerConfig.h +++ b/include/kafka/ProducerConfig.h @@ -2,7 +2,7 @@ #include -#include +#include namespace KAFKA_API { namespace clients { namespace producer { @@ -10,23 +10,13 @@ namespace KAFKA_API { namespace clients { namespace producer { /** * Configuration for the Kafka Producer. */ -class Config: public Properties +class ProducerConfig: public Config { public: - Config() = default; - Config(const Config&) = default; - explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} + ProducerConfig() = default; + ProducerConfig(const ProducerConfig&) = default; + explicit ProducerConfig(const PropertiesMap& kvMap): Config(kvMap) {} - /** - * The string contains host:port pairs of brokers (splitted by ",") that the producer will use to establish initial connection to the Kafka cluster. - * Note: It's mandatory. - */ - static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; - - /** - * This can be any string, and will be used by the brokers to identify messages sent from the client. - */ - static const constexpr char* CLIENT_ID = "client.id"; /** * The acks parameter controls how many partition replicas must receive the record before the producer can consider the write successful. @@ -128,91 +118,6 @@ class Config: public Properties * Default value: 60000 */ static const constexpr char* TRANSACTION_TIMEOUT_MS = "transaction.timeout.ms"; - - /** - * Protocol used to communicate with brokers. - * Default value: plaintext - */ - static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; - - /** - * SASL mechanism to use for authentication. - * Default value: GSSAPI - */ - static const constexpr char* SASL_MECHANISM = "sasl.mechanisms"; - - /** - * SASL username for use with the PLAIN and SASL-SCRAM-.. mechanism. - */ - static const constexpr char* SASL_USERNAME = "sasl.username"; - - /** - * SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism. - */ - static const constexpr char* SASL_PASSWORD = "sasl.password"; - - /** - * Shell command to refresh or acquire the client's Kerberos ticket. - */ - static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; - - /** - * The client's Kerberos principal name. - */ - static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; - - /** - * Set to "default" or "oidc" to control with login method to be used. - * If set to "oidc", the following properties must also be specified: - * sasl.oauthbearer.client.id - * sasl.oauthbearer.client.secret - * sasl.oauthbearer.token.endpoint.url - * Default value: default - */ - static const constexpr char* SASL_OAUTHBEARER_METHOD = "sasl.oauthbearer.method"; - - /** - * Public identifier for the applicaition. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_CLIENT_ID = "sasl.oauthbearer.client.id"; - - /** - * Client secret only known to the application and the authorization server. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_CLIENT_SECRET = "sasl.oauthbearer.client.secret"; - - /** - * Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_EXTENSIONS = "sasl.oauthbearer.extensions"; - - /** - * Client use this to specify the scope of the access request to the broker. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; - - /** - * OAuth/OIDC issuer token endpoint HTTP(S) URI used to retreve token. - * Only used with "sasl.oauthbearer.method=oidc". - */ - static const constexpr char* SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; - - /** - * SASL/OAUTHBEARER configuration. - * The format is implementation-dependent and must be parsed accordingly. - */ - static const constexpr char* SASL_OAUTHBEARER_CONFIG = "sasl.oauthbearer.config"; - - /** - * Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. - * Should only be used for development or testing, and not in production. - * Default value: false - */ - static const constexpr char* ENABLE_SASL_OAUTHBEARER_UNSECURE_JWT = "enable.sasl.oauthbearer.unsecure.jwt"; }; } } } // end of KAFKA_API::clients::producer diff --git a/include/kafka/Properties.h b/include/kafka/Properties.h index 68beffe42..e29201506 100644 --- a/include/kafka/Properties.h +++ b/include/kafka/Properties.h @@ -2,12 +2,17 @@ #include +#include +#include +#include +#include #include #include #include #include #include +#include namespace KAFKA_API { @@ -17,14 +22,99 @@ namespace KAFKA_API { */ class Properties { +private: + using LogCallback = clients::LogCallback; + using ErrorCallback = clients::ErrorCallback; + using StatsCallback = clients::StatsCallback; + using OauthbearerTokenRefreshCallback = clients::OauthbearerTokenRefreshCallback; + using Interceptors = clients::Interceptors; + + struct ValueType + { + struct Object + { + virtual ~Object() = default; + virtual std::string toString() const = 0; + }; + + template + static std::string getString(const T& /*value*/) { return typeid(T).name(); } + template + static std::string getString(const std::string& value) { return value; } + + const ValueType& validate(const std::string& key) const + { + static const std::vector nonStringValueKeys = { + "log_cb", "error_cb", "stats_cb", "oauthbearer_token_refresh_cb", "interceptors" + }; + + if ((expectedKey.empty() && std::any_of(nonStringValueKeys.cbegin(), nonStringValueKeys.cend(), [key](const auto& k) { return k == key; })) + || (!expectedKey.empty() && key != expectedKey)) + { + throw std::runtime_error("Invalid key/value for configuration: " + key); + } + + return *this; + } + + template + struct ObjWrap: public Object + { + explicit ObjWrap(T v): value(std::move(v)) {} + std::string toString() const override { return getString(value); } + T value; + }; + + template + T& getValue() const { return (dynamic_cast&>(*object)).value; } + + ValueType() = default; + + ValueType(const std::string& value) // NOLINT + { object = std::make_shared>(value); } + + ValueType(const LogCallback& cb) // NOLINT + : expectedKey("log_cb") + { object = std::make_shared>(cb); } + + ValueType(const ErrorCallback& cb) // NOLINT + : expectedKey("error_cb") + { object = std::make_shared>(cb); } + + ValueType(const StatsCallback& cb) // NOLINT + : expectedKey("stats_cb") + { object = std::make_shared>(cb); } + + ValueType(const OauthbearerTokenRefreshCallback& cb) // NOLINT + : expectedKey("oauthbearer_token_refresh_cb") + { object = std::make_shared>(cb); } + + ValueType(const Interceptors& interceptors) // NOLINT + : expectedKey("interceptors") + { object = std::make_shared>(interceptors); } + + bool operator==(const ValueType& rhs) const { return toString() == rhs.toString(); } + + std::string toString() const { return object->toString(); } + + private: + std::string expectedKey; + std::shared_ptr object; + }; + public: // Just make sure key will printed in order - using PropertiesMap = std::map; + using PropertiesMap = std::map; Properties() = default; Properties(const Properties&) = default; - explicit Properties(PropertiesMap kvMap): _kvMap(std::move(kvMap)) {} - + Properties(PropertiesMap kvMap): _kvMap(std::move(kvMap)) // NOLINT + { + for (const auto& kv: _kvMap) + { + kv.second.validate(kv.first); + } + } virtual ~Properties() = default; bool operator==(const Properties& rhs) const { return map() == rhs.map(); } @@ -33,9 +123,10 @@ class Properties * Set a property. * If the map previously contained a mapping for the key, the old value is replaced by the specified value. */ - Properties& put(const std::string& key, const std::string& value) + template + Properties& put(const std::string& key, const T& value) { - _kvMap[key] = value; + _kvMap[key] = ValueType(value).validate(key); return *this; } @@ -47,19 +138,47 @@ class Properties _kvMap.erase(key); } + /** + * Check whether the map contains a property. + */ + bool contains(const std::string& key) const + { + auto search = _kvMap.find(key); + return search != _kvMap.end(); + } + + /** + * Get a property reference. + * If the property doesn't exist, an execption would be thrown. + */ + template + T& get(const std::string& key) const + { + auto search = _kvMap.find(key); + if (search == _kvMap.end()) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG, "Failed to get \"" + key + "\" from Properties!")); + } + + const ValueType& v = search->second; + return v.getValue(); + } + /** * Get a property. - * If the map previously contained a mapping for the key, the old value is replaced by the specified value. */ Optional getProperty(const std::string& key) const { - Optional ret; - auto search = _kvMap.find(key); - if (search != _kvMap.end()) + if (!contains(key)) return Optional{}; + + try { - ret = search->second; + return get(key); + } + catch (const std::bad_cast&) + { + return Optional{}; } - return ret; } /** @@ -77,9 +196,9 @@ class Properties std::for_each(_kvMap.cbegin(), _kvMap.cend(), [&ret](const auto& kv) { const std::string& key = kv.first; - const std::string& value = kv.second; + const std::string value = kv.second.toString(); - static const std::regex reSensitiveKey(R"(.+\.password|.+\.username|.+secret)"); + static const std::regex reSensitiveKey(R"(.+\.password|.+\.username|.+secret|.+key|.+pem)"); const bool isSensitive = std::regex_match(key, reSensitiveKey); ret.append(ret.empty() ? "" : "|").append(key).append("=").append(isSensitive ? "*" : value); diff --git a/include/kafka/Types.h b/include/kafka/Types.h index e4c6ef90e..3d04b7e07 100644 --- a/include/kafka/Types.h +++ b/include/kafka/Types.h @@ -199,20 +199,5 @@ inline std::string toString(const TopicPartitionOffsets& tpos) return ret; } - -/** - * SASL OAUTHBEARER token info. - */ -struct SaslOauthbearerToken -{ - using KeyValuePairs = std::map; - - std::string value; - std::chrono::microseconds mdLifetime{}; - std::string mdPrincipalName; - KeyValuePairs extensions; -}; - - } // end of KAFKA_API diff --git a/include/kafka/addons/KafkaRecoverableProducer.h b/include/kafka/addons/KafkaRecoverableProducer.h index 8ef5f296a..e0d3b4ccb 100644 --- a/include/kafka/addons/KafkaRecoverableProducer.h +++ b/include/kafka/addons/KafkaRecoverableProducer.h @@ -2,15 +2,15 @@ #include -#include #include -#include -#include +#include +#include #include -#include +#include -namespace KAFKA_API { namespace clients { + +namespace KAFKA_API { namespace clients { namespace producer { class KafkaRecoverableProducer { @@ -18,9 +18,8 @@ class KafkaRecoverableProducer explicit KafkaRecoverableProducer(const Properties& properties) : _properties(properties), _running(true) { - _errorCb = [this](const Error& error) { - if (error.isFatal()) _fatalError = std::make_unique(error); - }; + _properties.put(Config::ENABLE_MANUAL_EVENTS_POLL, "true"); + _properties.put(Config::ERROR_CB, [this](const Error& error) { if (error.isFatal()) _fatalError = std::make_unique(error); }); _producer = createProducer(); @@ -52,17 +51,6 @@ class KafkaRecoverableProducer return _producer->name(); } - /** - * Set the log callback for the kafka client (it's a per-client setting). - */ - void setLogger(const Logger& logger) - { - const std::lock_guard lock(_producerMutex); - - _logger = logger; - _producer->setLogger(*_logger); - } - /** * Set log level for the kafka client (the default value: 5). */ @@ -70,33 +58,8 @@ class KafkaRecoverableProducer { const std::lock_guard lock(_producerMutex); - _logLevel = level; - _producer->setLogLevel(*_logLevel); - } - - /** - * Set callback to receive the periodic statistics info. - * Note: 1) It only works while the "statistics.interval.ms" property is configured with a non-0 value. - * 2) The callback would be triggered periodically, receiving the internal statistics info (with JSON format) emited from librdkafka. - */ - void setStatsCallback(const KafkaClient::StatsCallback& cb) - { - const std::lock_guard lock(_producerMutex); - - _statsCb = cb; - _producer->setStatsCallback(*_statsCb); - } - - void setErrorCallback(const KafkaClient::ErrorCallback& cb) - { - const std::lock_guard lock(_producerMutex); - - _errorCb = [cb, this](const Error& error) { - cb(error); - - if (error.isFatal()) _fatalError = std::make_unique(error); - }; - _producer->setErrorCallback(*_errorCb); + _properties.put(Config::LOG_LEVEL, std::to_string(level)); + _producer->setLogLevel(level); } /** @@ -330,22 +293,11 @@ class KafkaRecoverableProducer std::unique_ptr createProducer() { - auto producer = std::make_unique(_properties, KafkaClient::EventsPollingOption::Manual); - - if (_logger) producer->setLogger(*_logger); - if (_logLevel) producer->setLogLevel(*_logLevel); - if (_statsCb) producer->setStatsCallback(*_statsCb); - if (_errorCb) producer->setErrorCallback(*_errorCb); - - return producer; + return std::make_unique(_properties); } // Configurations for producer - Properties _properties; - Optional _logger; - Optional _logLevel; - Optional _statsCb; - Optional _errorCb; + Properties _properties; std::unique_ptr _fatalError; @@ -356,5 +308,5 @@ class KafkaRecoverableProducer std::unique_ptr _producer; }; -} } // end of KAFKA_API::clients +} } } // end of KAFKA_API::clients::producer diff --git a/tests/integration/TestAdminClient.cc b/tests/integration/TestAdminClient.cc index ce0a78ae2..e9d23178e 100644 --- a/tests/integration/TestAdminClient.cc +++ b/tests/integration/TestAdminClient.cc @@ -9,7 +9,7 @@ TEST(AdminClient, CreateListDeleteTopics) { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << adminClient.name() << " started" << std::endl; const kafka::Topics topics = {kafka::utility::getRandomString(), kafka::utility::getRandomString()}; @@ -82,7 +82,7 @@ TEST(AdminClient, DuplicatedCreateDeleteTopics) const int numPartitions = 5; const int replicaFactor = 3; - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << adminClient.name() << " started" << std::endl; constexpr int MAX_REPEAT = 10; @@ -124,7 +124,7 @@ TEST(AdminClient, DeleteRecords) auto metadatas3 = KafkaTestUtility::ProduceMessages(topic, partition3, messages); // Prepare the AdminClient - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << adminClient.name() << " started" << std::endl; // Prepare offsets for `deleteRecords` @@ -148,7 +148,7 @@ TEST(AdminClient, DeleteRecords) KafkaTestUtility::WaitMetadataSyncUpBetweenBrokers(); - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); { auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); EXPECT_EQ(0, records.size()); diff --git a/tests/integration/TestKafkaConsumer.cc b/tests/integration/TestKafkaConsumer.cc index d07fa3779..516454200 100644 --- a/tests/integration/TestKafkaConsumer.cc +++ b/tests/integration/TestKafkaConsumer.cc @@ -23,68 +23,97 @@ TEST(KafkaConsumer, BasicPoll) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); - // The auto-commit consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::ENABLE_AUTO_COMMIT, "true")); - std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; + std::map brokersState; + + kafka::clients::Interceptors interceptors; + interceptors.onBrokerStateChange([&brokersState](int id, const std::string& proto, const std::string& name, int port, const std::string& state) { + const std::string brokerDescription = (std::to_string(id) + " - " + proto + "://" + name + ":" + std::to_string(port)); + std::cout << "Broker[" << brokerDescription << "] ==> " << state << std::endl; + if (!name.empty() && name != "GroupCoordinator") + { + brokersState[name + ":" + std::to_string(port)] = state; + } + }); - // Subscribe topics - consumer.subscribe({topic}, - [](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& tps) { - if (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned) { - // assignment finished - std::cout << "[" << kafka::utility::getCurrentTime() << "] assigned partitions: " << kafka::toString(tps) << std::endl; - } - }); - EXPECT_FALSE(consumer.subscription().empty()); + { + // Config the consumer with interceptors + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::Config::INTERCEPTORS, interceptors)); - // No message yet - auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer, std::chrono::seconds(1)); - EXPECT_EQ(0, records.size()); + std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; - // Try to get the beginning offsets - const kafka::TopicPartition tp{topic, partition}; - std::cout << "[" << kafka::utility::getCurrentTime() << "] Consumer get the beginningOffset[" << consumer.beginningOffsets({tp})[tp] << "]" << std::endl;; + // Subscribe topics + consumer.subscribe({topic}, + [](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& tps) { + if (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned) { + // assignment finished + std::cout << "[" << kafka::utility::getCurrentTime() << "] assigned partitions: " << kafka::toString(tps) << std::endl; + } + }); + EXPECT_FALSE(consumer.subscription().empty()); - // Prepare some messages to send - const std::vector> messages = { - {kafka::Headers{}, "key1", "value1"}, - {kafka::Headers{}, "key2", "value2"}, - {kafka::Headers{}, "key3", "value3"}, - }; + // No message yet + auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer, std::chrono::seconds(1)); + EXPECT_EQ(0, records.size()); - // Send the messages - KafkaTestUtility::ProduceMessages(topic, partition, messages); + // Should be able to get all brokers' state + EXPECT_EQ(KafkaTestUtility::GetNumberOfKafkaBrokers(), brokersState.size()); + // All brokers' state should be "UP" + for (const auto& brokerState: brokersState) + { + EXPECT_EQ("UP", brokerState.second); + } - // Poll these messages - records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); - EXPECT_EQ(messages.size(), records.size()); + // Try to get the beginning offsets + const kafka::TopicPartition tp{topic, partition}; + std::cout << "[" << kafka::utility::getCurrentTime() << "] Consumer get the beginningOffset[" << consumer.beginningOffsets({tp})[tp] << "]" << std::endl;; - // Copyable ConsumerRecord - { - auto recordsCopy = records; - recordsCopy.clear(); - } + // Prepare some messages to send + const std::vector> messages = { + {kafka::Headers{}, "key1", "value1"}, + {kafka::Headers{}, "key2", "value2"}, + {kafka::Headers{}, "key3", "value3"}, + }; - // Check messages - std::size_t rcvMsgCount = 0; - for (auto& record: records) - { - ASSERT_TRUE(rcvMsgCount < messages.size()); + // Send the messages + KafkaTestUtility::ProduceMessages(topic, partition, messages); - EXPECT_EQ(topic, record.topic()); - EXPECT_EQ(partition, record.partition()); - EXPECT_EQ(0, record.headers().size()); - EXPECT_EQ(std::get<1>(messages[rcvMsgCount]).size(), record.key().size()); - EXPECT_EQ(0, std::memcmp(std::get<1>(messages[rcvMsgCount]).c_str(), record.key().data(), record.key().size())); - EXPECT_EQ(std::get<2>(messages[rcvMsgCount]).size(), record.value().size()); - EXPECT_EQ(0, std::memcmp(std::get<2>(messages[rcvMsgCount]).c_str(), record.value().data(), record.value().size())); + // Poll these messages + records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); + EXPECT_EQ(messages.size(), records.size()); - ++rcvMsgCount; + // Copyable ConsumerRecord + { + auto recordsCopy = records; + recordsCopy.clear(); + } + + // Check messages + std::size_t rcvMsgCount = 0; + for (auto& record: records) + { + ASSERT_TRUE(rcvMsgCount < messages.size()); + + EXPECT_EQ(topic, record.topic()); + EXPECT_EQ(partition, record.partition()); + EXPECT_EQ(0, record.headers().size()); + EXPECT_EQ(std::get<1>(messages[rcvMsgCount]).size(), record.key().size()); + EXPECT_EQ(0, std::memcmp(std::get<1>(messages[rcvMsgCount]).c_str(), record.key().data(), record.key().size())); + EXPECT_EQ(std::get<2>(messages[rcvMsgCount]).size(), record.value().size()); + EXPECT_EQ(0, std::memcmp(std::get<2>(messages[rcvMsgCount]).c_str(), record.value().data(), record.value().size())); + + ++rcvMsgCount; + } + + // Close the consumer + consumer.close(); } - // Close the consumer - consumer.close(); + // All brokers' state should be "DOWN" + for (const auto& brokerState: brokersState) + { + EXPECT_EQ("DOWN", brokerState.second); + } } TEST(KafkaConsumer, PollWithHeaders) @@ -97,8 +126,7 @@ TEST(KafkaConsumer, PollWithHeaders) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // The auto-commit consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::ENABLE_AUTO_COMMIT, "true")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -191,11 +219,10 @@ TEST(KafkaConsumer, SeekAndPoll) // The auto-commit consumer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::ENABLE_AUTO_COMMIT, "true") - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1") // Only poll 1 message each time - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest"); // Seek to the earliest offset at the beginning + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1") // Only poll 1 message each time + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest"); // Seek to the earliest offset at the beginning - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; @@ -319,9 +346,11 @@ TEST(KafkaConsumer, NoOffsetCommitCallback) // The manual-commit consumer { - const auto props = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest"); // Seek to the earliest offset at the beginning + const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest"); // Seek to the earliest offset at the beginning - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; @@ -365,10 +394,11 @@ TEST(KafkaConsumer, OffsetCommitCallback) // The manual-commit consumer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") // Seek to the earliest offset at the beginning - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") // Seek to the earliest offset at the beginning + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; @@ -431,10 +461,11 @@ TEST(KafkaConsumer, OffsetCommitCallbackTriggeredBeforeClose) // The manual-commit consumer { const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") // Seek to the earliest offset at the beginning - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") // Seek to the earliest offset at the beginning + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; @@ -489,10 +520,12 @@ TEST(KafkaConsumer, OffsetCommitCallback_ManuallyPollEvents) // The manual-commit consumer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") // Seek to the earliest offset at the beginning - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") // Seek to the earliest offset at the beginning + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1") // Only poll 1 message each time + .put(kafka::clients::Config::ENABLE_MANUAL_EVENTS_POLL, "true"); // Would call `pollEvents()` manually - kafka::clients::KafkaConsumer consumer(props, kafka::clients::KafkaClient::EventsPollingOption::Manual); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; @@ -566,9 +599,10 @@ TEST(KafkaConsumer, ManualOffsetCommitAndPosition) // Start consumer a few times, but only commit the offset for the first message each time { auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Save the configurations (including the client.id/group.id) @@ -633,7 +667,7 @@ TEST(KafkaConsumer, ManualOffsetCommitAndPosition) // Start the consumer (2nd time) { - kafka::clients::KafkaConsumer consumer(savedProps); + kafka::clients::consumer::KafkaConsumer consumer(savedProps); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -679,7 +713,7 @@ TEST(KafkaConsumer, ManualOffsetCommitAndPosition) // Start the consumer (3rd time) { - kafka::clients::KafkaConsumer consumer(savedProps); + kafka::clients::consumer::KafkaConsumer consumer(savedProps); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -728,7 +762,7 @@ TEST(KafkaConsumer, ManualOffsetCommitAndPosition) // Start the consumer (4th time) { - kafka::clients::KafkaConsumer consumer(savedProps); + kafka::clients::consumer::KafkaConsumer consumer(savedProps); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -768,7 +802,7 @@ TEST(KafkaConsumer, ManualOffsetCommitAndPosition) // Start the consumer, -- since all records have been committed, no record polled any more { - kafka::clients::KafkaConsumer consumer(savedProps); + kafka::clients::consumer::KafkaConsumer consumer(savedProps); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -803,12 +837,13 @@ TEST(KafkaConsumer, CommitOffsetBeforeRevolkingPartitions) // Prepare poperties for consumers auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::GROUP_ID, kafka::utility::getRandomString()); + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::GROUP_ID, kafka::utility::getRandomString()); { // First consumer starts - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); consumer.subscribe({topic}, @@ -833,7 +868,7 @@ TEST(KafkaConsumer, CommitOffsetBeforeRevolkingPartitions) { // Second consumer starts - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); consumer.subscribe({topic}); @@ -874,10 +909,9 @@ TEST(KafkaConsumer, AutoOffsetCommitAndPosition) // Consumer will poll twice, -- Note, the last polled message offset would not be committed (no following `poll`) { const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::ENABLE_AUTO_COMMIT, "true") - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Save the properties @@ -935,7 +969,7 @@ TEST(KafkaConsumer, AutoOffsetCommitAndPosition) // Note, the last message was not committed previously // Here we'll start another consumer to continue... { - kafka::clients::KafkaConsumer consumer(savedProps); + kafka::clients::consumer::KafkaConsumer consumer(savedProps); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -975,9 +1009,10 @@ TEST(KafkaConsumer, RebalancePartitionsAssign) // Prepare the consumer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::GROUP_ID, group); + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::GROUP_ID, group); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; std::vector partitionsAssigned; @@ -997,9 +1032,10 @@ TEST(KafkaConsumer, RebalancePartitionsAssign) auto fut = std::async(std::launch::async, [topic, group]() { auto consumerProps = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::GROUP_ID, group); - kafka::clients::KafkaConsumer anotherConsumer(consumerProps); + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::GROUP_ID, group); + kafka::clients::consumer::KafkaConsumer anotherConsumer(consumerProps); anotherConsumer.subscribe({topic}); KafkaTestUtility::ConsumeMessagesUntilTimeout(anotherConsumer); }); @@ -1046,7 +1082,7 @@ TEST(KafkaConsumer, RebalancePartitionsAssign) TEST(KafkaConsumer, ThreadCount) { - auto testThreadCount = [](kafka::clients::KafkaClient::EventsPollingOption eventsPollingOption) { + auto testThreadCount = [](bool enableManualEventsPoll) { struct { std::atomic main = {0}; std::atomic background = {0}; @@ -1083,9 +1119,9 @@ TEST(KafkaConsumer, ThreadCount) kafka::clients::Interceptors interceptors; interceptors.onThreadStart(threadStartCb).onThreadExit(threadExitCb); - const kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig(), - eventsPollingOption, - interceptors); + const kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::Config::ENABLE_MANUAL_EVENTS_POLL, enableManualEventsPoll ? "true" : "false") + .put(kafka::clients::Config::INTERCEPTORS, interceptors)); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; std::cout << "[" << kafka::utility::getCurrentTime() << "] librdkafka thread cnt[" << kafka::utility::getLibRdKafkaThreadCount() << "]" << std::endl; @@ -1097,8 +1133,7 @@ TEST(KafkaConsumer, ThreadCount) EXPECT_EQ(1, threadCount.main); EXPECT_EQ(KafkaTestUtility::GetNumberOfKafkaBrokers() + 2, threadCount.broker); - EXPECT_EQ(eventsPollingOption == kafka::clients::KafkaClient::EventsPollingOption::Auto ? 1 : 0, - threadCount.background); + EXPECT_EQ(enableManualEventsPoll ? 0 : 1, threadCount.background); } EXPECT_EQ(0, kafka::utility::getLibRdKafkaThreadCount()); @@ -1108,8 +1143,8 @@ TEST(KafkaConsumer, ThreadCount) EXPECT_EQ(0, threadCount.background); }; - testThreadCount(kafka::clients::KafkaClient::EventsPollingOption::Auto); - testThreadCount(kafka::clients::KafkaClient::EventsPollingOption::Manual); + testThreadCount(false); + testThreadCount(true); } TEST(KafkaConsumer, PartitionAssignment) @@ -1121,7 +1156,7 @@ TEST(KafkaConsumer, PartitionAssignment) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Assign topic-partitions @@ -1143,7 +1178,7 @@ TEST(KafkaConsumer, TopicSubscription) for (const auto& topic: topics) KafkaTestUtility::CreateKafkaTopic(topic, NUM_PARTITIONS, REPLICA_FACTOR); // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -1165,7 +1200,7 @@ TEST(KafkaConsumer, SubscribeUnsubscribeThenAssign) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -1201,7 +1236,7 @@ TEST(KafkaConsumer, AssignUnassignAndSubscribe) KafkaTestUtility::CreateKafkaTopic(topic, NUM_PARTITIONS, REPLICA_FACTOR); // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Assign topic-partitions @@ -1232,7 +1267,7 @@ TEST(KafkaConsumer, AssignUnassignAndSubscribe) TEST(KafkaConsumer, WrongOperation_SeekBeforePartitionsAssigned) { // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " would seekToBeginning" << std::endl; @@ -1249,7 +1284,7 @@ TEST(KafkaConsumer, WrongOperation_SubscribeThenAssign) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -1271,7 +1306,7 @@ TEST(KafkaConsumer, WrongOperation_AssignThenSubscribe) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Assign topic-partitions @@ -1290,7 +1325,7 @@ TEST(KafkaClient, FetchBrokerMetadata) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // Start consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -1323,9 +1358,9 @@ TEST(KafkaConsumer, SubscribeAndPoll) const kafka::Topic topic = kafka::utility::getRandomString(); KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); - const auto props = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::consumer::Config::ENABLE_PARTITION_EOF, "true"); + const auto props = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::consumer::ConsumerConfig::ENABLE_PARTITION_EOF, "true"); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; kafka::TopicPartitions assignedPartitions; @@ -1376,10 +1411,9 @@ TEST(KafkaConsumer, PauseAndResume) // An auto-commit Consumer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::ENABLE_AUTO_COMMIT, "true") - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); - kafka::clients::KafkaConsumer consumer(props); + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -1448,9 +1482,9 @@ TEST(KafkaConsumer, SeekAfterPause) // An auto-commit Consumer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); - kafka::clients::KafkaConsumer consumer(props); + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -1499,11 +1533,11 @@ TEST(KafkaConsumer, SeekBeforePause) // An auto-commit Consumer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1") .put("log_level", "7") .put("debug", "all"); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe topics @@ -1546,9 +1580,9 @@ TEST(KafkaConsumer, PauseStillWorksAfterRebalance) // Start the consumer1 auto props1 = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::SESSION_TIMEOUT_MS, "60000") - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); - kafka::clients::KafkaConsumer consumer1(props1); + .put(kafka::clients::consumer::ConsumerConfig::SESSION_TIMEOUT_MS, "60000") + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); + kafka::clients::consumer::KafkaConsumer consumer1(props1); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer1.name() << " started" << std::endl; // Subscribe topics @@ -1570,12 +1604,12 @@ TEST(KafkaConsumer, PauseStillWorksAfterRebalance) std::promise p; auto fu = p.get_future(); // Anther consumer with the same group.id - const auto groupIdOption = consumer1.getProperty(kafka::clients::consumer::Config::GROUP_ID); + const auto groupIdOption = consumer1.getProperty(kafka::clients::consumer::ConsumerConfig::GROUP_ID); ASSERT_TRUE(groupIdOption); - const auto props2 = props1.put(kafka::clients::consumer::Config::GROUP_ID, *groupIdOption); // NOLINT + const auto props2 = props1.put(kafka::clients::consumer::ConsumerConfig::GROUP_ID, *groupIdOption); // NOLINT const KafkaTestUtility::JoiningThread consumer2Thread( [props2, topic1, topic2, &p]() { - kafka::clients::KafkaConsumer consumer2(props2); + kafka::clients::consumer::KafkaConsumer consumer2(props2); consumer2.subscribe({topic1, topic2}); for (int i = 0; i < 50; ++i) { consumer2.poll(std::chrono::milliseconds(100)); @@ -1633,7 +1667,7 @@ TEST(KafkaConsumer, OffsetsForTime) std::cout << "Produce messages:" << std::endl; { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); for (std::size_t i = 0; i < MESSAGES_NUM; ++i) { checkPoints.emplace_back(system_clock::now()); @@ -1662,7 +1696,7 @@ TEST(KafkaConsumer, OffsetsForTime) std::cout << "Try with normal case:" << std::endl; { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); consumer.subscribe({topic1, topic2}); for (std::size_t i = 0; i < MESSAGES_NUM; ++i) { @@ -1678,7 +1712,7 @@ TEST(KafkaConsumer, OffsetsForTime) std::cout << "Try with no subcription:" << std::endl; { - const kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + const kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); // Here we doesn't subsribe to topic1 or topic2 (the result is undefined) for (std::size_t i = 0; i < MESSAGES_NUM; ++i) @@ -1703,7 +1737,7 @@ TEST(KafkaConsumer, OffsetsForTime) std::cout << "Try with all invalid topic-partitions: (exception caught)" << std::endl; { - const kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + const kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); const auto timepoint = checkPoints[0]; @@ -1713,7 +1747,7 @@ TEST(KafkaConsumer, OffsetsForTime) std::cout << "Try with partial valid topic-partitions:" << std::endl; { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); consumer.subscribe({topic1, topic2}); for (std::size_t i = 0; i < MESSAGES_NUM; ++i) @@ -1747,7 +1781,7 @@ TEST(KafkaConsumer, RecoverByTime) // Send the messages { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); for (const auto& msg: messages) { auto record = kafka::clients::producer::ProducerRecord(topic, @@ -1768,7 +1802,7 @@ TEST(KafkaConsumer, RecoverByTime) // The first consumer quits, and fails to handle all messages constexpr int FAILURE_MSG_INDEX = 3; { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); @@ -1791,7 +1825,7 @@ TEST(KafkaConsumer, RecoverByTime) // The second consumer catches up and continue { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); kafka::TopicPartitions assignedPartitions; // Subscribe topics @@ -1843,14 +1877,14 @@ TEST(KafkaConsumer, AutoCreateTopics) { const kafka::Topic topic = kafka::utility::getRandomString(); - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put("allow.auto.create.topics", "true")); - - // The error would be triggered while consumer tries to subscribe a non-existed topic. - consumer.setErrorCallback([](const kafka::Error& error) { - std::cout << "consumer met an error: " << error.toString() << std::endl; - EXPECT_EQ(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, error.value()); - }); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put("allow.auto.create.topics", "true") + .put(kafka::clients::Config::ERROR_CB, + // The error would be triggered while consumer tries to subscribe a non-existed topic. + [](const kafka::Error& error) { + std::cout << "consumer met an error: " << error.toString() << std::endl; + EXPECT_EQ(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, error.value()); + })); // Subscribe topics, but would never make it! EXPECT_KAFKA_THROW(consumer.subscribe({topic}, kafka::clients::consumer::NullRebalanceCallback, std::chrono::seconds(10)), @@ -1868,16 +1902,15 @@ TEST(KafkaConsumer, CreateTopicAfterSubscribe) KafkaTestUtility::CreateKafkaTopic(topic, 1, 1); }; - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig()); - bool errCbTriggered = false; - - // The error would be triggered while consumer tries to subscribe a non-existed topic. - consumer.setErrorCallback([&errCbTriggered](const kafka::Error& error) { - errCbTriggered = true; - KafkaTestUtility::DumpError(error); - EXPECT_EQ(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, error.value()); - }); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::Config::ERROR_CB, + // The error would be triggered while consumer tries to subscribe a non-existed topic. + [&errCbTriggered](const kafka::Error& error) { + errCbTriggered = true; + KafkaTestUtility::DumpError(error); + EXPECT_EQ(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, error.value()); + })); // The topic would be created after 5 seconds const KafkaTestUtility::JoiningThread consumer1Thread(createTopicAfterSeconds, 5); @@ -1917,13 +1950,13 @@ TEST(KafkaConsumer, CooperativeRebalance) }; const kafka::Properties props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::CLIENT_ID, clientId) - .put(kafka::clients::consumer::Config::GROUP_ID, groupId) - .put(kafka::clients::consumer::Config::PARTITION_ASSIGNMENT_STRATEGY, "cooperative-sticky"); + .put(kafka::clients::consumer::ConsumerConfig::CLIENT_ID, clientId) + .put(kafka::clients::consumer::ConsumerConfig::GROUP_ID, groupId) + .put(kafka::clients::consumer::ConsumerConfig::PARTITION_ASSIGNMENT_STRATEGY, "cooperative-sticky"); KafkaTestUtility::PrintDividingLine(clientId + " is starting"); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); consumer.subscribe({topicPattern}, rebalanceCb); @@ -1968,9 +2001,9 @@ TEST(KafkaConsumer, FetchBrokerMetadataTriggersRejoin) }; const kafka::Properties props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::PARTITION_ASSIGNMENT_STRATEGY, "cooperative-sticky"); + .put(kafka::clients::consumer::ConsumerConfig::PARTITION_ASSIGNMENT_STRATEGY, "cooperative-sticky"); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); // Subscribe to the topic pattern consumer.subscribe({topicPattern}, rebalanceCb); @@ -2009,7 +2042,7 @@ TEST(KafkaConsumer, SubscribeNotConflictWithStatsEvent) auto testNormalOperations = [topic1, topic2, topic3](const kafka::Properties& props) { KafkaTestUtility::PrintDividingLine("[Normal operations] Test with consumer properties[" + props.toString() + "]"); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); // Subscribe topics kafka::Topics topicsToSubscribe = {topic1, topic2}; @@ -2050,7 +2083,7 @@ TEST(KafkaConsumer, SubscribeNotConflictWithStatsEvent) auto testDuplicatedOperations = [topic1, topic2, topic3](const kafka::Properties& props) { KafkaTestUtility::PrintDividingLine("[Duplicated operations] Test with consumer properties[" + props.toString() + "]"); - kafka::clients::KafkaConsumer consumer(props); + kafka::clients::consumer::KafkaConsumer consumer(props); // Rebalance callback auto rebalanceCb = [](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& tps) { @@ -2105,7 +2138,7 @@ TEST(KafkaConsumer, SubscribeNotConflictWithStatsEvent) testNormalOperations(props); // Try with incremental partitions assignment - props.put(kafka::clients::consumer::Config::PARTITION_ASSIGNMENT_STRATEGY, "cooperative-sticky"); + props.put(kafka::clients::consumer::ConsumerConfig::PARTITION_ASSIGNMENT_STRATEGY, "cooperative-sticky"); testDuplicatedOperations(props); testNormalOperations(props); } diff --git a/tests/integration/TestKafkaProducer.cc b/tests/integration/TestKafkaProducer.cc index fb0d20289..cc8ff013a 100644 --- a/tests/integration/TestKafkaProducer.cc +++ b/tests/integration/TestKafkaProducer.cc @@ -24,10 +24,10 @@ TEST(KafkaProducer, SendMessagesWithAcks1) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // Properties for the producer - const auto props = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::producer::Config::ACKS, "1"); + const auto props = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::producer::ProducerConfig::ACKS, "1"); // Sync-send producer - kafka::clients::KafkaProducer producer(props); + kafka::clients::producer::KafkaProducer producer(props); // Send messages for (const auto& msg: messages) @@ -39,8 +39,8 @@ TEST(KafkaProducer, SendMessagesWithAcks1) } // Prepare a consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest")); consumer.setLogLevel(kafka::Log::Level::Crit); consumer.subscribe({topic}); @@ -71,10 +71,10 @@ TEST(KafkaProducer, SendMessagesWithAcksAll) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); // Properties for the producer - const auto props = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::producer::Config::ACKS, "all"); + const auto props = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::producer::ProducerConfig::ACKS, "all"); // Async-send producer - kafka::clients::KafkaProducer producer(props); + kafka::clients::producer::KafkaProducer producer(props); // Send messages for (const auto& msg: messages) @@ -88,8 +88,8 @@ TEST(KafkaProducer, SendMessagesWithAcksAll) } // Prepare a consumer - const auto consumerProps = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest"); - kafka::clients::KafkaConsumer consumer(consumerProps); + const auto consumerProps = KafkaTestUtility::GetKafkaClientCommonConfig().put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest"); + kafka::clients::consumer::KafkaConsumer consumer(consumerProps); consumer.setLogLevel(kafka::Log::Level::Crit); consumer.subscribe({topic}); @@ -122,11 +122,11 @@ TEST(KafkaProducer, FailToSendMessagesWithAcksAll) // Properties for the producer const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::ACKS, "all") - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "5000"); // To shorten the test + .put(kafka::clients::producer::ProducerConfig::ACKS, "all") + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "5000"); // To shorten the test // Async-send producer - kafka::clients::KafkaProducer producer(props); + kafka::clients::producer::KafkaProducer producer(props); if (auto brokerMetadata = producer.fetchBrokerMetadata(topic)) { @@ -160,11 +160,11 @@ TEST(KafkaProducer, InSyncBrokersAckTimeout) { const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::ACKS, "all") - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "1000") - .put(kafka::clients::producer::Config::REQUEST_TIMEOUT_MS, "1"); // Here it's a short value, more likely to trigger the timeout + .put(kafka::clients::producer::ProducerConfig::ACKS, "all") + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "1000") + .put(kafka::clients::producer::ProducerConfig::REQUEST_TIMEOUT_MS, "1"); // Here it's a short value, more likely to trigger the timeout - kafka::clients::KafkaProducer producer(props); + kafka::clients::producer::KafkaProducer producer(props); constexpr int MAX_RETRIES = 100; for (int i = 0; i < MAX_RETRIES; ++i) @@ -186,7 +186,7 @@ TEST(KafkaProducer, InSyncBrokersAckTimeout) TEST(KafkaProducer, DefaultPartitioner) { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); const kafka::Topic topic = kafka::utility::getRandomString(); KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); @@ -220,9 +220,9 @@ TEST(KafkaProducer, TryOtherPartitioners) { auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); // Partitioner "murmur2": if with no available key, all these records would be partitioned to the same partition - props.put(kafka::clients::producer::Config::PARTITIONER, "murmur2"); + props.put(kafka::clients::producer::ProducerConfig::PARTITIONER, "murmur2"); - kafka::clients::KafkaProducer producer(props); + kafka::clients::producer::KafkaProducer producer(props); std::map partitionCounts; static constexpr int MSG_NUM = 20; @@ -249,18 +249,18 @@ TEST(KafkaProducer, TryOtherPartitioners) { auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); // An invalid partitioner - props.put(kafka::clients::producer::Config::PARTITIONER, "invalid"); + props.put(kafka::clients::producer::ProducerConfig::PARTITIONER, "invalid"); // An exception would be thrown for invalid "partitioner" setting - EXPECT_KAFKA_THROW(const kafka::clients::KafkaProducer producer(props), RD_KAFKA_RESP_ERR__INVALID_ARG); + EXPECT_KAFKA_THROW(const kafka::clients::producer::KafkaProducer producer(props), RD_KAFKA_RESP_ERR__INVALID_ARG); } } TEST(KafkaProducer, RecordWithEmptyOrNullFields) { auto sendMessages = [](const kafka::clients::producer::ProducerRecord& record, std::size_t repeat, const std::string& partitioner) { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::PARTITIONER, partitioner)); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::PARTITIONER, partitioner)); producer.setLogLevel(kafka::Log::Level::Crit); for (std::size_t i = 0; i < repeat; ++i) { producer.syncSend(record); @@ -282,8 +282,8 @@ TEST(KafkaProducer, RecordWithEmptyOrNullFields) sendMessages(producerRecord, 10, partitioner); // The auto-commit consumer - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest")); // Subscribe topics consumer.subscribe({topic}); @@ -330,7 +330,7 @@ TEST(KafkaProducer, RecordWithEmptyOrNullFields) TEST(KafkaProducer, ThreadCount) { - auto testThreadCount = [](kafka::clients::KafkaClient::EventsPollingOption eventsPollingOption) { + auto testThreadCount = [](bool enableManualEventsPoll) { struct { std::atomic main = {0}; std::atomic background = {0}; @@ -367,9 +367,9 @@ TEST(KafkaProducer, ThreadCount) kafka::clients::Interceptors interceptors; interceptors.onThreadStart(threadStartCb).onThreadExit(threadExitCb); - const kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig(), - eventsPollingOption, - interceptors); + const kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::Config::ENABLE_MANUAL_EVENTS_POLL, enableManualEventsPoll ? "true" : "false") + .put(kafka::clients::Config::INTERCEPTORS, interceptors)); std::cout << "[" <> messages = { @@ -590,11 +589,11 @@ TEST(KafkaProducer, TooLargeMessageForBroker) const auto record = kafka::clients::producer::ProducerRecord(topic, partition, kafka::NullKey, kafka::Value(value.c_str(), value.size())); const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::BATCH_SIZE, "2000000") - .put(kafka::clients::producer::Config::MESSAGE_MAX_BYTES, "2000000") // Note: by default, the brokers only support messages no larger than 1M - .put(kafka::clients::producer::Config::LINGER_MS, "100"); // Here use a large value to make sure it's long enough to generate a large message-batch + .put(kafka::clients::producer::ProducerConfig::BATCH_SIZE, "2000000") + .put(kafka::clients::producer::ProducerConfig::MESSAGE_MAX_BYTES, "2000000") // Note: by default, the brokers only support messages no larger than 1M + .put(kafka::clients::producer::ProducerConfig::LINGER_MS, "100"); // Here use a large value to make sure it's long enough to generate a large message-batch - kafka::clients::KafkaProducer producer(props); + kafka::clients::producer::KafkaProducer producer(props); constexpr std::size_t MSG_NUM = 2000; std::size_t failedCount = 0; @@ -620,12 +619,12 @@ TEST(KafkaProducer, CopyRecordValueWithinSend) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::PARTITIONER, "murmur2"); // `ProducerRecord`s with empty key are mapped to a single partition + .put(kafka::clients::producer::ProducerConfig::PARTITIONER, "murmur2"); // `ProducerRecord`s with empty key are mapped to a single partition // Send messages (with option "ToCopyRecordValue") constexpr std::size_t MSG_NUM = 100; { - kafka::clients::KafkaProducer producer(props); + kafka::clients::producer::KafkaProducer producer(props); for (std::size_t i = 0; i < MSG_NUM; ++i) { @@ -633,7 +632,7 @@ TEST(KafkaProducer, CopyRecordValueWithinSend) auto record = kafka::clients::producer::ProducerRecord(topic, kafka::Key(nullptr, 0), kafka::Value(value.c_str(), value.size())); producer.send(record, [] (const kafka::clients::producer::RecordMetadata& /*metadata*/, const kafka::Error& error) { EXPECT_FALSE(error); }, - kafka::clients::KafkaProducer::SendOption::ToCopyRecordValue); // Copy the payload internally + kafka::clients::producer::KafkaProducer::SendOption::ToCopyRecordValue); // Copy the payload internally } } std::cout << "[" <(message); - auto record = kafka::clients::producer::ProducerRecord(topic, kafka::NullKey, kafka::Value(payload->c_str(), payload->size())); + auto record = ProducerRecord(topic, kafka::NullKey, kafka::Value(payload->c_str(), payload->size())); producer.send(record, - [payload](const kafka::clients::producer::RecordMetadata& metadata, const kafka::Error& error) { + [payload](const RecordMetadata& metadata, const kafka::Error& error) { std::cout << "[" << kafka::utility::getCurrentTime() << "] Producer got the delivery result: " << error.message() << ", with metadata: " << metadata.toString() << std::endl; }); @@ -52,10 +56,10 @@ TEST(Transaction, CommitTransaction) const std::string isolationConf = (isolationLevel == IsolationLevel::ReadCommitted) ? "read_committed" : "read_uncommitted"; auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest"); - props.put(kafka::clients::consumer::Config::ISOLATION_LEVEL, isolationConf); + props.put(ConsumerConfig::AUTO_OFFSET_RESET, "earliest"); + props.put(ConsumerConfig::ISOLATION_LEVEL, isolationConf); - kafka::clients::KafkaConsumer consumer(props); + KafkaConsumer consumer(props); consumer.setLogLevel(kafka::Log::Level::Crit); consumer.subscribe({topic}); @@ -145,12 +149,15 @@ TEST(Transaction, CommitTransaction) TEST(Transaction, CatchException) { + using namespace kafka::clients; + using namespace kafka::clients::producer; + { KafkaTestUtility::PrintDividingLine("No transaction.id configured"); auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - kafka::clients::KafkaProducer producer(props); + KafkaProducer producer(props); EXPECT_KAFKA_THROW(producer.initTransactions(), RD_KAFKA_RESP_ERR__NOT_CONFIGURED); } @@ -159,9 +166,9 @@ TEST(Transaction, CatchException) KafkaTestUtility::PrintDividingLine("No initTransactions"); auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::producer::Config::TRANSACTIONAL_ID, kafka::utility::getRandomString()); + props.put(ProducerConfig::TRANSACTIONAL_ID, kafka::utility::getRandomString()); - kafka::clients::KafkaProducer producer(props); + KafkaProducer producer(props); EXPECT_KAFKA_THROW(producer.beginTransaction(), RD_KAFKA_RESP_ERR__STATE); } @@ -170,9 +177,9 @@ TEST(Transaction, CatchException) KafkaTestUtility::PrintDividingLine("No beginTransaction"); auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::producer::Config::TRANSACTIONAL_ID, kafka::utility::getRandomString()); + props.put(ProducerConfig::TRANSACTIONAL_ID, kafka::utility::getRandomString()); - kafka::clients::KafkaProducer producer(props); + KafkaProducer producer(props); producer.initTransactions(); @@ -184,9 +191,9 @@ TEST(Transaction, CatchException) KafkaTestUtility::PrintDividingLine("abortTransaction (with no initTransactions)"); auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::producer::Config::TRANSACTIONAL_ID, kafka::utility::getRandomString()); + props.put(ProducerConfig::TRANSACTIONAL_ID, kafka::utility::getRandomString()); - kafka::clients::KafkaProducer producer(props); + KafkaProducer producer(props); EXPECT_KAFKA_THROW(producer.abortTransaction(), RD_KAFKA_RESP_ERR__STATE); } @@ -195,9 +202,9 @@ TEST(Transaction, CatchException) KafkaTestUtility::PrintDividingLine("abortTransaction (with no beginTransaction)"); auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::producer::Config::TRANSACTIONAL_ID, kafka::utility::getRandomString()); + props.put(ProducerConfig::TRANSACTIONAL_ID, kafka::utility::getRandomString()); - kafka::clients::KafkaProducer producer(props); + KafkaProducer producer(props); producer.initTransactions(); @@ -208,9 +215,9 @@ TEST(Transaction, CatchException) KafkaTestUtility::PrintDividingLine("abortTransaction (with no message sent)"); auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::producer::Config::TRANSACTIONAL_ID, kafka::utility::getRandomString()); + props.put(ProducerConfig::TRANSACTIONAL_ID, kafka::utility::getRandomString()); - kafka::clients::KafkaProducer producer(props); + KafkaProducer producer(props); producer.initTransactions(); @@ -223,9 +230,9 @@ TEST(Transaction, CatchException) KafkaTestUtility::PrintDividingLine("commitTransation (with no message sent)"); auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::producer::Config::TRANSACTIONAL_ID, kafka::utility::getRandomString()); + props.put(ProducerConfig::TRANSACTIONAL_ID, kafka::utility::getRandomString()); - kafka::clients::KafkaProducer producer(props); + KafkaProducer producer(props); producer.initTransactions(); @@ -237,6 +244,8 @@ TEST(Transaction, CatchException) TEST(Transaction, ContinueTheTransaction) { + using namespace kafka::clients::producer; + const kafka::Topic topic = kafka::utility::getRandomString(); const std::string transactionId = kafka::utility::getRandomString(); const std::string messageToSend = "message to send"; @@ -245,17 +254,17 @@ TEST(Transaction, ContinueTheTransaction) // Start a producer to send the message, but fail to commit { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::TRANSACTIONAL_ID, transactionId)); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::TRANSACTIONAL_ID, transactionId)); producer.initTransactions(); producer.beginTransaction(); - auto record = kafka::clients::producer::ProducerRecord(topic, kafka::NullKey, kafka::Value(messageToSend.c_str(), messageToSend.size())); + auto record = ProducerRecord(topic, kafka::NullKey, kafka::Value(messageToSend.c_str(), messageToSend.size())); producer.send(record, - [](const kafka::clients::producer::RecordMetadata& metadata, const kafka::Error& error) { + [](const RecordMetadata& metadata, const kafka::Error& error) { std::cout << "[" << kafka::utility::getCurrentTime() << "] Producer got the delivery result: " << error.message() << ", with metadata: " << metadata.toString() << std::endl; }); @@ -265,17 +274,17 @@ TEST(Transaction, ContinueTheTransaction) // Start another producer, continue to send the message (with the same transaction.id) { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::TRANSACTIONAL_ID, transactionId)); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::TRANSACTIONAL_ID, transactionId)); producer.initTransactions(); producer.beginTransaction(); - auto record = kafka::clients::producer::ProducerRecord(topic, kafka::NullKey, kafka::Value(messageToSend.c_str(), messageToSend.size())); + auto record = ProducerRecord(topic, kafka::NullKey, kafka::Value(messageToSend.c_str(), messageToSend.size())); producer.send(record, - [](const kafka::clients::producer::RecordMetadata& metadata, const kafka::Error& error) { + [](const RecordMetadata& metadata, const kafka::Error& error) { std::cout << "[" << kafka::utility::getCurrentTime() << "] Producer got the delivery result: " << error.message() << ", with metadata: " << metadata.toString() << std::endl; }); @@ -287,9 +296,9 @@ TEST(Transaction, ContinueTheTransaction) // Check all received messages (committed only) { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::ISOLATION_LEVEL, "read_committed")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::ISOLATION_LEVEL, "read_committed")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); @@ -303,9 +312,9 @@ TEST(Transaction, ContinueTheTransaction) // Check all received messages (incluing uncommitted) { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::ISOLATION_LEVEL, "read_uncommitted")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::ISOLATION_LEVEL, "read_uncommitted")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); @@ -321,6 +330,9 @@ TEST(Transaction, ContinueTheTransaction) TEST(Transaction, ContinueTheTransaction2) { + using namespace kafka::clients::producer; + using namespace kafka::clients::consumer; + const kafka::Topic topic = kafka::utility::getRandomString(); const std::string transactionId = kafka::utility::getRandomString(); const std::string clientId = "someTransactionalProducer"; @@ -336,9 +348,9 @@ TEST(Transaction, ContinueTheTransaction2) // Start a producer to send the messages, but fail to commit the transaction for some messages (before close) { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::TRANSACTIONAL_ID, transactionId) - .put(kafka::clients::producer::Config::CLIENT_ID, clientId)); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(ProducerConfig::TRANSACTIONAL_ID, transactionId) + .put(ProducerConfig::CLIENT_ID, clientId)); producer.initTransactions(); std::cout << "[" << kafka::utility::getCurrentTime() << "] The first producer initialized the transaction" << std::endl; @@ -351,12 +363,12 @@ TEST(Transaction, ContinueTheTransaction2) for (std::size_t i = 0; i < NUM_MESSAGES / 2; ++i) { const auto& msg = messagesToSend[i]; - auto record = kafka::clients::producer::ProducerRecord(topic, - kafka::NullKey, - kafka::Value(msg.c_str(), msg.size())); + auto record = ProducerRecord(topic, + kafka::NullKey, + kafka::Value(msg.c_str(), msg.size())); producer.send(record, - [&delivered](const kafka::clients::producer::RecordMetadata& metadata, const kafka::Error& error) { + [&delivered](const RecordMetadata& metadata, const kafka::Error& error) { ++delivered; if (error) { @@ -382,12 +394,12 @@ TEST(Transaction, ContinueTheTransaction2) for (std::size_t i = NUM_MESSAGES / 2; i < NUM_MESSAGES; ++i) { const auto& msg = messagesToSend[i]; - auto record = kafka::clients::producer::ProducerRecord(topic, - kafka::NullKey, - kafka::Value(msg.c_str(), msg.size())); + auto record = ProducerRecord(topic, + kafka::NullKey, + kafka::Value(msg.c_str(), msg.size())); producer.send(record, - [&delivered](const kafka::clients::producer::RecordMetadata& metadata, const kafka::Error& error) { + [&delivered](const RecordMetadata& metadata, const kafka::Error& error) { ++delivered; if (error) { @@ -410,9 +422,9 @@ TEST(Transaction, ContinueTheTransaction2) // Re-start the producer, continue to send the message (with the same transaction.id) { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::TRANSACTIONAL_ID, transactionId) - .put(kafka::clients::producer::Config::CLIENT_ID, clientId ) ); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(ProducerConfig::TRANSACTIONAL_ID, transactionId) + .put(ProducerConfig::CLIENT_ID, clientId ) ); producer.initTransactions(); std::cout << "[" << kafka::utility::getCurrentTime() << "] The second producer initialized the transaction" << std::endl; @@ -425,12 +437,12 @@ TEST(Transaction, ContinueTheTransaction2) for (std::size_t i = NUM_MESSAGES / 2; i < NUM_MESSAGES; ++i) { const auto& msg = messagesToSend[i]; - auto record = kafka::clients::producer::ProducerRecord(topic, - kafka::NullKey, - kafka::Value(msg.c_str(), msg.size())); + auto record = ProducerRecord(topic, + kafka::NullKey, + kafka::Value(msg.c_str(), msg.size())); producer.send(record, - [&delivered](const kafka::clients::producer::RecordMetadata& metadata, const kafka::Error& error) { + [&delivered](const RecordMetadata& metadata, const kafka::Error& error) { ++delivered; if (error) { @@ -452,9 +464,9 @@ TEST(Transaction, ContinueTheTransaction2) // Check all received messages (committed only) { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::ISOLATION_LEVEL, "read_committed")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(ConsumerConfig::ISOLATION_LEVEL, "read_committed")); consumer.subscribe({topic}); // No message lost, no message duplicated @@ -469,9 +481,9 @@ TEST(Transaction, ContinueTheTransaction2) // Check all received messages (incluing uncommitted) { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::ISOLATION_LEVEL, "read_uncommitted")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(ConsumerConfig::ISOLATION_LEVEL, "read_uncommitted")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); diff --git a/tests/robustness/TestAdminClient.cc b/tests/robustness/TestAdminClient.cc index 420de2546..e38b775ff 100755 --- a/tests/robustness/TestAdminClient.cc +++ b/tests/robustness/TestAdminClient.cc @@ -14,7 +14,7 @@ TEST(AdminClient, BrokersTimeout) const int replicaFactor = 3; { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << adminClient.name() << " started" << std::endl; KafkaTestUtility::PauseBrokers(); @@ -51,7 +51,7 @@ TEST(AdminClient, BrokersTimeout) constexpr int maxRetry = 5; for (int i = 0; i < maxRetry; ++i) { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); // Create Topics, -- success std::cout << "[" << kafka::utility::getCurrentTime() << "] will CreateTopics" << std::endl; @@ -77,7 +77,7 @@ TEST(AdminClient, BrokersTimeout) KafkaTestUtility::WaitMetadataSyncUpBetweenBrokers(); { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); // List Topics, -- success std::cout << "[" << kafka::utility::getCurrentTime() << "] will ListTopics" << std::endl; @@ -100,7 +100,7 @@ TEST(AdminClient, BrokersTimeout) KafkaTestUtility::PauseBrokers(); { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); // Delete Topics, -- timeout std::cout << "[" << kafka::utility::getCurrentTime() << "] will DeleteTopics" << std::endl; @@ -116,7 +116,7 @@ TEST(AdminClient, BrokersTimeout) // Since the brokers might not be ready during the short time, sometimes we have to retry... for (int i = 0; i < maxRetry; ++i) { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); // Delete Topics, -- success std::cout << "[" << kafka::utility::getCurrentTime() << "] will DeleteTopics" << std::endl; auto deleteResult = adminClient.deleteTopics({topic}); @@ -135,7 +135,7 @@ TEST(AdminClient, BrokersTimeout) KafkaTestUtility::WaitMetadataSyncUpBetweenBrokers(); { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); // List Topics, -- success std::cout << "[" << kafka::utility::getCurrentTime() << "] will ListTopics" << std::endl; diff --git a/tests/robustness/TestKafkaConsumer.cc b/tests/robustness/TestKafkaConsumer.cc index 9cf5cc08b..7bf313606 100755 --- a/tests/robustness/TestKafkaConsumer.cc +++ b/tests/robustness/TestKafkaConsumer.cc @@ -32,15 +32,16 @@ TEST(KafkaConsumer, DISABLED_AlwaysFinishClosing_ManuallyPollEvents) // Consumer properties auto props = KafkaTestUtility::GetKafkaClientCommonConfig(); - props.put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time - props.put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest"); - props.put(kafka::clients::consumer::Config::SOCKET_TIMEOUT_MS, "2000"); + props.put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false"); + props.put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1"); // Only poll 1 message each time + props.put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest"); + props.put(kafka::clients::consumer::ConsumerConfig::SOCKET_TIMEOUT_MS, "2000"); volatile std::size_t commitCbCount = 0; { // Start a consumer (which need to call `pollEvents()` to trigger the commit callback) - kafka::clients::KafkaConsumer consumer(props, kafka::clients::KafkaClient::EventsPollingOption::Manual); - consumer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::consumer::KafkaConsumer consumer(props.put(kafka::clients::Config::ENABLE_MANUAL_EVENTS_POLL, "true") + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe the topic @@ -98,24 +99,23 @@ TEST(KafkaConsumer, DISABLED_CommitOffsetWhileBrokersStop) // Consumer properties const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::MAX_POLL_RECORDS, "1") // Only poll 1 message each time - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::SOCKET_TIMEOUT_MS, "2000") // Just don't want to wait too long for the commit-offset callback. - .put("log_level", "7") - .put("debug", "all"); + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_AUTO_COMMIT, "false") + .put(kafka::clients::consumer::ConsumerConfig::MAX_POLL_RECORDS, "1") // Only poll 1 message each time + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::SOCKET_TIMEOUT_MS, "2000") // Just don't want to wait too long for the commit-offset callback. + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError); volatile std::size_t commitCbCount = 0; { // Start a consumer - kafka::clients::KafkaConsumer consumer(props); - consumer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe th topic consumer.subscribe({topic}, [](kafka::clients::consumer::RebalanceEventType et, const kafka::TopicPartitions& /*unused*/) { std::cout << "[" << kafka::utility::getCurrentTime() << "] rebalance-event triggered, event type[" - << (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned ? "PartitionAssigned" : "PartitionRevolked") << "]" << std::endl; + << (et == kafka::clients::consumer::RebalanceEventType::PartitionsAssigned ? "PartitionAssigned" : "PartitionRevoked") << "]" << std::endl; }); EXPECT_FALSE(consumer.subscription().empty()); @@ -163,12 +163,12 @@ TEST(KafkaConsumer, BrokerStopBeforeConsumerStart) // Consumer properties const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::SESSION_TIMEOUT_MS, "30000") - .put(kafka::clients::consumer::Config::ENABLE_PARTITION_EOF, "true"); + .put(kafka::clients::consumer::ConsumerConfig::SESSION_TIMEOUT_MS, "30000") + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_PARTITION_EOF, "true") + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError); // Start the consumer - kafka::clients::KafkaConsumer consumer(props); - consumer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; @@ -211,12 +211,12 @@ TEST(KafkaConsumer, BrokerStopBeforeSubscription) // Consumer properties const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::SESSION_TIMEOUT_MS, "30000") - .put(kafka::clients::consumer::Config::ENABLE_PARTITION_EOF, "true"); + .put(kafka::clients::consumer::ConsumerConfig::SESSION_TIMEOUT_MS, "30000") + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_PARTITION_EOF, "true") + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError); // Start the consumer - kafka::clients::KafkaConsumer consumer(props); - consumer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Pause the brokers for a while @@ -260,12 +260,12 @@ TEST(KafkaConsumer, BrokerStopBeforeSeek) // Consumer properties const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::SESSION_TIMEOUT_MS, "30000") - .put(kafka::clients::consumer::Config::ENABLE_PARTITION_EOF, "true"); + .put(kafka::clients::consumer::ConsumerConfig::SESSION_TIMEOUT_MS, "30000") + .put(kafka::clients::consumer::ConsumerConfig::ENABLE_PARTITION_EOF, "true") + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError); // Start the consumer - kafka::clients::KafkaConsumer consumer(props); - consumer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe the topic @@ -325,12 +325,12 @@ TEST(KafkaConsumer, BrokerStopDuringMsgPoll) // Consumer properties const auto props = KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::SESSION_TIMEOUT_MS, "30000") - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest"); // Seek to the very beginning + .put(kafka::clients::consumer::ConsumerConfig::SESSION_TIMEOUT_MS, "30000") + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") // Seek to the very beginning + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError); // Start the consumer - kafka::clients::KafkaConsumer consumer(props); - consumer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::consumer::KafkaConsumer consumer(props); std::cout << "[" << kafka::utility::getCurrentTime() << "] " << consumer.name() << " started" << std::endl; // Subscribe the topic diff --git a/tests/robustness/TestKafkaProducer.cc b/tests/robustness/TestKafkaProducer.cc index b22aae59c..d1dff5abe 100755 --- a/tests/robustness/TestKafkaProducer.cc +++ b/tests/robustness/TestKafkaProducer.cc @@ -14,13 +14,21 @@ TEST(KafkaProducer, RecordTimestamp) // Create topics with different "message.timestamp.type" settings { - kafka::clients::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); + kafka::clients::admin::AdminClient adminClient(KafkaTestUtility::GetKafkaClientCommonConfig()); - auto createResult = adminClient.createTopics({topicWithRecordCreateTime}, 5, 3, kafka::Properties{{{"message.timestamp.type", "CreateTime"}}}, std::chrono::minutes(1)); + auto createResult = adminClient.createTopics({topicWithRecordCreateTime}, + 5, + 3, + kafka::Properties{{{"message.timestamp.type", {"CreateTime"}}}}, + std::chrono::minutes(1)); std::cout << "[" << kafka::utility::getCurrentTime() << "] Topic[" << topicWithRecordCreateTime << "] (with CreateTime) was created, result: " << createResult.error.message() << std::endl; ASSERT_FALSE(createResult.error); - createResult = adminClient.createTopics({topicWithLogAppendTime}, 5, 3, kafka::Properties{{{"message.timestamp.type", "LogAppendTime"}}}, std::chrono::minutes(1)); + createResult = adminClient.createTopics({topicWithLogAppendTime}, + 5, + 3, + kafka::Properties{{{"message.timestamp.type", {"LogAppendTime"}}}}, + std::chrono::minutes(1)); std::cout << "[" << kafka::utility::getCurrentTime() << "] Topic[" << topicWithLogAppendTime << "] (with LogAppendTime) was created, result: " << createResult.error.message() << std::endl; ASSERT_FALSE(createResult.error); @@ -28,8 +36,8 @@ TEST(KafkaProducer, RecordTimestamp) } // Prepare a producer - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); constexpr int TIME_LAPSE_THRESHOLD_MS = 1000; using namespace std::chrono; @@ -51,8 +59,8 @@ TEST(KafkaProducer, RecordTimestamp) std::cout << "[" << kafka::utility::getCurrentTime() << "] Producer has just sent a message to topic [" << topic << "], with metadata[" << metadata.toString() << "]" << std::endl; // Poll the message - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); ASSERT_EQ(1, records.size()); @@ -82,8 +90,8 @@ TEST(KafkaProducer, RecordTimestamp) std::cout << "[" << kafka::utility::getCurrentTime() << "] Producer has just sent a message to topic [" << topic << "], with metadata[" << metadata.toString() << "]" << std::endl; // Poll the message - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); ASSERT_EQ(1, records.size()); @@ -123,9 +131,9 @@ TEST(KafkaProducer, NoMissedDeliveryCallback) }; { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "5000")); - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "5000") + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); // Pause the brokers for a while auto asyncTask = KafkaTestUtility::PauseBrokersForAWhile(std::chrono::seconds(5)); @@ -160,8 +168,8 @@ TEST(KafkaProducer, DeliveryCallbackTriggeredByPurgeWithinClose) std::size_t deliveryCbTriggeredCount = 0; { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); KafkaTestUtility::PauseBrokers(); @@ -204,8 +212,8 @@ TEST(KafkaProducer, BrokerStopWhileSendingMessages) std::size_t deliveryCount = 0; { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig()); - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); // Pause the brokers for a while (shorter then the default "MESSAGE_TIMEOUT_MS" for producer, which is 10 seconds) auto asyncTask = KafkaTestUtility::PauseBrokersForAWhile(std::chrono::seconds(5)); @@ -230,8 +238,8 @@ TEST(KafkaProducer, BrokerStopWhileSendingMessages) ASSERT_EQ(messages.size(), deliveryCount); // Fetch & check all messages - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer); EXPECT_EQ(messages.size(), records.size()); @@ -254,9 +262,9 @@ TEST(KafkaProducer, Send_AckTimeout) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "3000")); // If with no response, the delivery would fail in a short time - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "3000") // If with no response, the delivery would fail in a short time + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); // Pause the brokers for a while auto asyncTask = KafkaTestUtility::PauseBrokersForAWhile(std::chrono::seconds(5)); @@ -294,10 +302,10 @@ TEST(KafkaProducer, ManuallyPollEvents_AckTimeout) KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "3000"), // If with no response, the delivery would fail in a short time - kafka::clients::KafkaClient::EventsPollingOption::Manual); // Manually call `pollEvents()` - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "3000") // If with no response, the delivery would fail in a short time + .put(kafka::clients::Config::ENABLE_MANUAL_EVENTS_POLL, "true") // Manually call `pollEvents()` + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); // Pause the brokers for a while auto asyncTask = KafkaTestUtility::PauseBrokersForAWhile(std::chrono::seconds(5)); @@ -343,10 +351,10 @@ TEST(KafkaProducer, ManuallyPollEvents_AlwaysFinishClosing) std::size_t failureCount = 0; { - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "3000"), // If with no response, the delivery would fail in a short time - kafka::clients::KafkaClient::EventsPollingOption::Manual); // Manually call `pollEvents()` - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "3000") // If with no response, the delivery would fail in a short time + .put(kafka::clients::Config::ENABLE_MANUAL_EVENTS_POLL, "true") // Manually call `pollEvents()` + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); // Pause the brokers for a while auto asyncTask = KafkaTestUtility::PauseBrokersForAWhile(std::chrono::seconds(5)); @@ -377,9 +385,9 @@ TEST(KafkaProducer, SyncSend_AckTimeout) const kafka::Topic topic = kafka::utility::getRandomString(); KafkaTestUtility::CreateKafkaTopic(topic, 5, 3); - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "3000")); - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "3000") + .put("error_cb", KafkaTestUtility::DumpError)); // Pause the brokers for a while auto asyncTask = KafkaTestUtility::PauseBrokersForAWhile(std::chrono::seconds(5)); diff --git a/tests/robustness/TestTransaction.cc b/tests/robustness/TestTransaction.cc index be5b81204..ee1934a68 100644 --- a/tests/robustness/TestTransaction.cc +++ b/tests/robustness/TestTransaction.cc @@ -20,10 +20,10 @@ TEST(Transaction, DeliveryFailure) { auto record = kafka::clients::producer::ProducerRecord(topic, kafka::NullKey, kafka::Value(messageToSent.c_str(), messageToSent.size())); - kafka::clients::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::producer::Config::MESSAGE_TIMEOUT_MS, "3000") // The delivery would fail in a short timeout - .put(kafka::clients::producer::Config::TRANSACTIONAL_ID, transactionId)); - producer.setErrorCallback(KafkaTestUtility::DumpError); + kafka::clients::producer::KafkaProducer producer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::producer::ProducerConfig::MESSAGE_TIMEOUT_MS, "3000") // The delivery would fail in a short timeout + .put(kafka::clients::producer::ProducerConfig::TRANSACTIONAL_ID, transactionId) + .put(kafka::clients::Config::ERROR_CB, KafkaTestUtility::DumpError)); std::cout << "[" << kafka::utility::getCurrentTime() << "] Producer created." << std::endl; @@ -65,9 +65,9 @@ TEST(Transaction, DeliveryFailure) // Check all received messages (incluing uncommitted) { - kafka::clients::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() - .put(kafka::clients::consumer::Config::AUTO_OFFSET_RESET, "earliest") - .put(kafka::clients::consumer::Config::ISOLATION_LEVEL, "read_uncommitted")); + kafka::clients::consumer::KafkaConsumer consumer(KafkaTestUtility::GetKafkaClientCommonConfig() + .put(kafka::clients::consumer::ConsumerConfig::AUTO_OFFSET_RESET, "earliest") + .put(kafka::clients::consumer::ConsumerConfig::ISOLATION_LEVEL, "read_uncommitted")); consumer.subscribe({topic}); auto records = KafkaTestUtility::ConsumeMessagesUntilTimeout(consumer, std::chrono::seconds(1)); diff --git a/tests/unit/TestKafkaClientDefaultProperties.cc b/tests/unit/TestKafkaClientDefaultProperties.cc index 7a4dadfad..a46f57706 100644 --- a/tests/unit/TestKafkaClientDefaultProperties.cc +++ b/tests/unit/TestKafkaClientDefaultProperties.cc @@ -16,7 +16,7 @@ namespace { // Here we even don't need a valid bootstrap server address -const kafka::Properties commonProps({{"bootstrap.servers", "127.0.0.1:9092"}, {"log_level", "0"}}); +const kafka::Properties commonProps({{"bootstrap.servers", {"127.0.0.1:9092"}}, {"log_level", {"0"}}}); using KVMap = std::vector>; @@ -51,19 +51,19 @@ TEST(KafkaClient, KafkaProducerDefaultProperties) const KVMap expectedKVs = { - // { Config::ACKS, "-1" }, - { Config::QUEUE_BUFFERING_MAX_MESSAGES, "100000" }, - { Config::QUEUE_BUFFERING_MAX_KBYTES, "1048576" }, // 0x100000 - { Config::LINGER_MS, "5" }, - { Config::BATCH_NUM_MESSAGES, "10000" }, - { Config::BATCH_SIZE, "1000000" }, - { Config::MESSAGE_MAX_BYTES, "1000000" }, - // { Config::MESSAGE_TIMEOUT_MS, "300000" }, - // { Config::REQUEST_TIMEOUT_MS, "30000" }, - // { Config::PARTITIONER, "consistent_random" }, - { Config::SECURITY_PROTOCOL, "plaintext" }, - { Config::MAX_IN_FLIGHT, "1000000" }, - { Config::ENABLE_IDEMPOTENCE, "false" }, + // { ProducerConfig::ACKS, "-1" }, + { ProducerConfig::QUEUE_BUFFERING_MAX_MESSAGES, "100000" }, + { ProducerConfig::QUEUE_BUFFERING_MAX_KBYTES, "1048576" }, // 0x100000 + { ProducerConfig::LINGER_MS, "5" }, + { ProducerConfig::BATCH_NUM_MESSAGES, "10000" }, + { ProducerConfig::BATCH_SIZE, "1000000" }, + { ProducerConfig::MESSAGE_MAX_BYTES, "1000000" }, + // { ProducerConfig::MESSAGE_TIMEOUT_MS, "300000" }, + // { ProducerConfig::REQUEST_TIMEOUT_MS, "30000" }, + // { ProducerConfig::PARTITIONER, "consistent_random" }, + { ProducerConfig::SECURITY_PROTOCOL, "plaintext" }, + { ProducerConfig::MAX_IN_FLIGHT, "1000000" }, + { ProducerConfig::ENABLE_IDEMPOTENCE, "false" }, }; EXPECT_TRUE(checkProperties("KafkaProducer", producer, expectedKVs)); @@ -73,57 +73,55 @@ TEST(KafkaClient, KafkaProducerDefaultProperties) { auto props = commonProps; - props.put(Config::ENABLE_IDEMPOTENCE, "true"); + props.put(ProducerConfig::ENABLE_IDEMPOTENCE, "true"); const KafkaProducer producer(props); const KVMap expectedKVs = { - { Config::MAX_IN_FLIGHT, "5" }, - { Config::ENABLE_IDEMPOTENCE, "true" }, + { ProducerConfig::MAX_IN_FLIGHT, "5" }, + { ProducerConfig::ENABLE_IDEMPOTENCE, "true" }, }; EXPECT_TRUE(checkProperties("KafkaProducer[enable.idempotence=true]", producer, expectedKVs)); } } - TEST(KafkaClient, KafkaConsumerDefaultProperties) { using namespace kafka::clients; using namespace kafka::clients::consumer; { - auto props = commonProps; - props.put(Config::ENABLE_AUTO_COMMIT, "true"); - const KafkaConsumer consumer(props); + const KafkaConsumer consumer(commonProps); const KVMap expectedKVs = { - { Config::ENABLE_AUTO_COMMIT, "true" }, - { Config::ENABLE_PARTITION_EOF, "false" }, - { Config::MAX_POLL_RECORDS, "500" }, - { Config::QUEUED_MIN_MESSAGES, "100000" }, - { Config::SESSION_TIMEOUT_MS, "45000" }, - { Config::SOCKET_TIMEOUT_MS, "60000" }, - { Config::SECURITY_PROTOCOL, "plaintext" }, - { Config::ENABLE_AUTO_COMMIT, "true" }, - { "auto.commit.interval.ms", "0" }, - { "enable.auto.offset.store", "true" } + { ConsumerConfig::ENABLE_AUTO_COMMIT, "true" }, + { ConsumerConfig::ENABLE_PARTITION_EOF, "false" }, + { ConsumerConfig::MAX_POLL_RECORDS, "500" }, + { ConsumerConfig::QUEUED_MIN_MESSAGES, "100000" }, + { ConsumerConfig::SESSION_TIMEOUT_MS, "45000" }, + { ConsumerConfig::SOCKET_TIMEOUT_MS, "60000" }, + { ConsumerConfig::SECURITY_PROTOCOL, "plaintext" }, + { "auto.commit.interval.ms", "0" }, + { "enable.auto.offset.store", "true" } }; EXPECT_TRUE(checkProperties("KakfaConsumer[enable.auto.commit=true]", consumer, expectedKVs)); // Interesting, -- no default for AUTO_OFFSET_RESET within librdkafka - EXPECT_FALSE(consumer.getProperty(Config::AUTO_OFFSET_RESET)); + EXPECT_FALSE(consumer.getProperty(ConsumerConfig::AUTO_OFFSET_RESET)); } KafkaTestUtility::PrintDividingLine(); { - const KafkaConsumer consumer(commonProps); + auto props = commonProps; + props.put(ConsumerConfig::ENABLE_AUTO_COMMIT, "false"); + const KafkaConsumer consumer(props); const KVMap expectedKVs = { - { Config::ENABLE_AUTO_COMMIT, "false" }, + { ConsumerConfig::ENABLE_AUTO_COMMIT, "false" }, }; EXPECT_TRUE(checkProperties("KakfaConsumer[enable.auto.commit=false]", consumer, expectedKVs)); } diff --git a/tests/unit/TestProperties.cc b/tests/unit/TestProperties.cc index e29419064..f4ea6f3dc 100644 --- a/tests/unit/TestProperties.cc +++ b/tests/unit/TestProperties.cc @@ -1,7 +1,10 @@ #include "kafka/AdminClientConfig.h" +#include "kafka/ClientConfig.h" #include "kafka/ConsumerConfig.h" +#include "kafka/Interceptors.h" #include "kafka/ProducerConfig.h" #include "kafka/Properties.h" +#include "kafka/Utility.h" #include "gtest/gtest.h" @@ -13,46 +16,68 @@ TEST(Properties, Basic) props.put("bootstrap.servers", "127.0.0.1:9000,127.0.0.1:9001"); props.put("auto.offset.reset", "earliest"); props.put("max.poll.records", "500"); + props.put("log_cb", [](int /*level*/, const char* /*filename*/, int /*lineno*/, const char* msg) { + std::cout << "log_cb: [" << kafka::utility::getCurrentTime() << "]" << msg << std::endl; + }); + props.put("error_cb", [](const kafka::Error& err) { + std::cout << "error_cb: [" << kafka::utility::getCurrentTime() << "]" << err.toString() << std::endl; + }); + props.put("stats_cb", [](const std::string& stats) { + std::cout << "stats_cb: [" << kafka::utility::getCurrentTime() << "]" << stats << std::endl; + }); + props.put("interceptors", kafka::clients::Interceptors{}); // Fetch a property auto getBootstrapServers = props.getProperty("bootstrap.servers"); ASSERT_TRUE(getBootstrapServers); - EXPECT_EQ("127.0.0.1:9000,127.0.0.1:9001", *getBootstrapServers); // NOLINT + EXPECT_EQ("127.0.0.1:9000,127.0.0.1:9001", *getBootstrapServers); // NOLINT // Remove a property props.eraseProperty("bootstrap.servers"); EXPECT_FALSE(props.getProperty("bootstrap.servers")); // To string - EXPECT_EQ("auto.offset.reset=earliest|max.poll.records=500", props.toString()); + const std::regex re(R"(auto\.offset\.reset=earliest\|error_cb=.+\|interceptors=.+\|log_cb=.+\|max\.poll\.records=500\|stats_cb=.+)"); + EXPECT_TRUE(std::regex_match(props.toString(), re)); // Get the internal map ref - const auto& m = props.map(); - EXPECT_EQ(2, m.size()); + EXPECT_EQ(6, props.map().size()); // Initialize with initializer list + kafka::clients::Interceptors interceptors; + interceptors.onThreadStart([](const std::string& threadName, const std::string& /*threadType*/) { + std::cout << threadName << " started!" << std::endl; + }) + .onThreadExit([](const std::string& threadName, const std::string& /*threadType*/) { + std::cout << threadName << " exited!" << std::endl; + }); + kafka::Properties anotherProps {{ - { "bootstrap.servers", "127.0.0.1:9000,127.0.0.1:9001" }, - { "auto.offset.reset", "earliest" }, - { "max.poll.records", "500" } + { "bootstrap.servers", { "127.0.0.1:9000,127.0.0.1:9001"} }, + { "auto.offset.reset", { "earliest" } }, + { "max.poll.records", { "500" } }, + { "error_cb", { [](const kafka::Error& error) { std::cout << "error_cb: [" << kafka::utility::getCurrentTime() << "]" << error.toString() << std::endl; } } }, + { "interceptors", { interceptors } } }}; + std::cout << anotherProps.toString() << std::endl; + // Assignment anotherProps = props; EXPECT_EQ(props, anotherProps); } - TEST(Properties, ConsumerConfig) { + using namespace kafka::clients; using namespace kafka::clients::consumer; const Config props {{ - { Config::BOOTSTRAP_SERVERS, "127.0.0.1:9000,127.0.0.1:9001" }, - { Config::AUTO_OFFSET_RESET, "earliest" }, - { Config::ENABLE_PARTITION_EOF, "false" } + { Config::BOOTSTRAP_SERVERS, { "127.0.0.1:9000,127.0.0.1:9001" } }, + { ConsumerConfig::AUTO_OFFSET_RESET, { "earliest" } }, + { ConsumerConfig::ENABLE_PARTITION_EOF, { "false" } } }}; EXPECT_EQ("auto.offset.reset=earliest|bootstrap.servers=127.0.0.1:9000,127.0.0.1:9001|enable.partition.eof=false", props.toString()); @@ -60,13 +85,14 @@ TEST(Properties, ConsumerConfig) TEST(Properties, ProducerConfig) { + using namespace kafka::clients; using namespace kafka::clients::producer; - const Config props + const ProducerConfig props {{ - { Config::BOOTSTRAP_SERVERS, "127.0.0.1:9000,127.0.0.1:9001" }, - { Config::LINGER_MS, "20" }, - { Config::ENABLE_IDEMPOTENCE, "true" } + { Config::BOOTSTRAP_SERVERS, { "127.0.0.1:9000,127.0.0.1:9001" } }, + { ProducerConfig::LINGER_MS, { "20" } }, + { ProducerConfig::ENABLE_IDEMPOTENCE, { "true" } } }}; EXPECT_EQ("bootstrap.servers=127.0.0.1:9000,127.0.0.1:9001|enable.idempotence=true|linger.ms=20", props.toString()); @@ -74,11 +100,13 @@ TEST(Properties, ProducerConfig) TEST(Properties, AdminClientConfig) { + using namespace kafka::clients; using namespace kafka::clients::admin; - const Config props + + const AdminClientConfig props {{ - { Config::BOOTSTRAP_SERVERS, "127.0.0.1:9000,127.0.0.1:9001" }, - { Config::SECURITY_PROTOCOL, "SASL_PLAINTEXT" } + { Config::BOOTSTRAP_SERVERS, { "127.0.0.1:9000,127.0.0.1:9001" } }, + { Config::SECURITY_PROTOCOL, { "SASL_PLAINTEXT" } } }}; EXPECT_EQ("bootstrap.servers=127.0.0.1:9000,127.0.0.1:9001|security.protocol=SASL_PLAINTEXT", props.toString()); @@ -88,11 +116,77 @@ TEST(Properties, SensitiveProperties) { const kafka::Properties props {{ - { "ssl.key.password", "passwordA" }, - { "ssl.keystore.password", "passwordB" }, - { "sasl.username", "userName" }, - { "sasl.password", "passwordC" }, + { "ssl.key.password", { "passwordA" } }, + { "ssl.keystore.password", { "passwordB" } }, + { "sasl.username", { "userName" } }, + { "sasl.password", { "passwordC" } }, + { "ssl.key.pem", { "pem" } }, + { "ssl_key", { "key" } }, }}; - EXPECT_EQ("sasl.password=*|sasl.username=*|ssl.key.password=*|ssl.keystore.password=*", props.toString()); + EXPECT_EQ("sasl.password=*|sasl.username=*|ssl.key.password=*|ssl.key.pem=*|ssl.keystore.password=*|ssl_key=*", props.toString()); } + +TEST(Properties, Validation) +{ + kafka::Properties props; + + props.put("whatever", "somevalue"); + + // Test with invalid keys + auto tryWithInvalidKey = [&props](auto v) + { + try + { + props.put("invalid_key", v); + return false; + } + catch (const std::runtime_error& e) + { + std::cout << "Exception caught: " << e.what() << std::endl; + } + return true; + }; + + EXPECT_TRUE(tryWithInvalidKey([](int /*level*/, const char* /*filename*/, int /*lineno*/, const char* msg) { std::cout << msg << std::endl; })); + EXPECT_TRUE(tryWithInvalidKey([](const kafka::Error& err) { std::cerr << err.toString() << std::endl; })); + EXPECT_TRUE(tryWithInvalidKey([](const std::string& stats) { std::cout << stats << std::endl; })); + const kafka::clients::OauthbearerTokenRefreshCallback oauthTokenRefreshCb = [](const std::string&) { return kafka::clients::SaslOauthbearerToken(); }; + EXPECT_TRUE(tryWithInvalidKey(oauthTokenRefreshCb)); + EXPECT_TRUE(tryWithInvalidKey(kafka::clients::Interceptors{})); + + // Test with invalid values + const auto tryWithInvalidValue = [&props](const std::string& key) + { + try + { + props.put(key, "haha"); + return false; + } + catch (const std::runtime_error& e) + { + std::cout << "exception caught: " << e.what() << std::endl; + } + return true; + }; + + EXPECT_TRUE(tryWithInvalidValue(kafka::clients::Config::LOG_CB)); + EXPECT_TRUE(tryWithInvalidValue(kafka::clients::Config::ERROR_CB)); + EXPECT_TRUE(tryWithInvalidValue(kafka::clients::Config::STATS_CB)); + EXPECT_TRUE(tryWithInvalidValue(kafka::clients::Config::OAUTHBEARER_TOKEN_REFRESH_CB)); + EXPECT_TRUE(tryWithInvalidValue(kafka::clients::Config::INTERCEPTORS)); + + // Failure within constructor + try + { + const kafka::Properties properties = {{ + { "interceptorsxx", { kafka::clients::Interceptors{} } }, + }}; + EXPECT_FALSE(true); + } + catch (const std::runtime_error& e) + { + std::cout << "exception caught: " << e.what() << std::endl; + } +} + diff --git a/tests/utils/TestUtility.h b/tests/utils/TestUtility.h index 9431e4132..932ee8d56 100644 --- a/tests/utils/TestUtility.h +++ b/tests/utils/TestUtility.h @@ -136,8 +136,8 @@ const auto MAX_OFFSET_COMMIT_TIMEOUT = std::chrono::seconds(15); const auto MAX_DELIVERY_TIMEOUT = std::chrono::seconds(5); inline std::vector -ConsumeMessagesUntilTimeout(kafka::clients::KafkaConsumer& consumer, - std::chrono::milliseconds timeout = MAX_POLL_MESSAGES_TIMEOUT) +ConsumeMessagesUntilTimeout(kafka::clients::consumer::KafkaConsumer& consumer, + std::chrono::milliseconds timeout = MAX_POLL_MESSAGES_TIMEOUT) { std::vector records; @@ -179,8 +179,7 @@ WaitUntil(const std::function& checkDone, std::chrono::milliseconds time inline std::vector ProduceMessages(const std::string& topic, int partition, const std::vector>& msgs) { - kafka::clients::KafkaProducer producer(GetKafkaClientCommonConfig()); - producer.setLogLevel(kafka::Log::Level::Crit); + kafka::clients::producer::KafkaProducer producer(GetKafkaClientCommonConfig().put(kafka::clients::Config::LOG_LEVEL, "1")); std::vector ret; for (const auto& msg: msgs) @@ -198,7 +197,7 @@ ProduceMessages(const std::string& topic, int partition, const std::vector ParseArguments(int argc, char **argv) return args; } -void RunConsumer(const std::string& topic, const kafka::clients::consumer::Config& props) +void RunConsumer(const std::string& topic, const kafka::clients::Config& props) { using namespace kafka::clients; using namespace kafka::clients::consumer; - // Create a manual-commit consumer - KafkaClient::setGlobalLogger(kafka::Logger()); + + // Create a auto-commit consumer KafkaConsumer consumer(props); // Subscribe to topic @@ -141,9 +141,8 @@ int main (int argc, char **argv) // Use Ctrl-C to terminate the program signal(SIGINT, stopRunning); // NOLINT + using namespace kafka::clients; // Prepare consumer properties - // - using namespace kafka::clients::consumer; Config props; props.put(Config::BOOTSTRAP_SERVERS, boost::algorithm::join(args->brokerList, ",")); // Get client id @@ -155,6 +154,8 @@ int main (int argc, char **argv) { props.put(prop.first, prop.second); } + // Disable logging + props.put(Config::LOG_CB, kafka::NullLogger); // Start consumer try diff --git a/tools/KafkaConsoleProducer.cc b/tools/console_clients/KafkaConsoleProducer.cc similarity index 88% rename from tools/KafkaConsoleProducer.cc rename to tools/console_clients/KafkaConsoleProducer.cc index 8fb321afb..7eb22e974 100644 --- a/tools/KafkaConsoleProducer.cc +++ b/tools/console_clients/KafkaConsoleProducer.cc @@ -76,6 +76,7 @@ std::unique_ptr ParseArguments(int argc, char **argv) int main (int argc, char **argv) { using namespace kafka::clients; + using namespace kafka::clients::producer; try { @@ -85,20 +86,21 @@ int main (int argc, char **argv) if (!args) return EXIT_SUCCESS; // Only for "help" // Prepare consumer properties - producer::Config props; - props.put(producer::Config::BOOTSTRAP_SERVERS, boost::algorithm::join(args->brokerList, ",")); + ProducerConfig props; + props.put(Config::BOOTSTRAP_SERVERS, boost::algorithm::join(args->brokerList, ",")); // Get client id std::ostringstream oss; oss << "producer-" << std::this_thread::get_id(); - props.put(producer::Config::CLIENT_ID, oss.str()); + props.put(Config::CLIENT_ID, oss.str()); // For other properties user assigned for (const auto& prop: args->props) { props.put(prop.first, prop.second); } + // Disable logging + props.put(Config::LOG_CB, kafka::NullLogger); - // Create a sync-send producer - KafkaClient::setGlobalLogger(kafka::Logger()); + // Create a producer KafkaProducer producer(props); auto startPromptLine = []() { std::cout << "> "; }; @@ -130,9 +132,9 @@ int main (int argc, char **argv) startPromptLine(); } } - catch (const std::exception& e) + catch (const kafka::KafkaException& e) { - std::cout << e.what() << std::endl; + std::cerr << "Exception thrown by producer: " << e.what() << std::endl; return EXIT_FAILURE; } diff --git a/tools/KafkaTopics.cc b/tools/console_clients/KafkaTopics.cc similarity index 98% rename from tools/KafkaTopics.cc rename to tools/console_clients/KafkaTopics.cc index d81d226c9..02b99f197 100644 --- a/tools/KafkaTopics.cc +++ b/tools/console_clients/KafkaTopics.cc @@ -140,6 +140,7 @@ std::unique_ptr ParseArguments(int argc, char **argv) int main (int argc, char **argv) { using namespace kafka::clients; + using namespace kafka::clients::admin; try { @@ -149,7 +150,7 @@ int main (int argc, char **argv) if (!args) return EXIT_SUCCESS; // Only for "help" kafka::Properties adminConf = args->adminConfig; - adminConf.put(admin::Config::BOOTSTRAP_SERVERS, args->broker); + adminConf.put(Config::BOOTSTRAP_SERVERS, args->broker); AdminClient adminClient(adminConf);