From 0de26fb0ce01aa3ea0db8cd949c3761aa6d67b19 Mon Sep 17 00:00:00 2001 From: Michael Richmond Date: Mon, 21 Aug 2023 16:42:17 -0700 Subject: [PATCH 01/51] Fix out of date filename --- cmake/BoostRedisConfig.cmake.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/BoostRedisConfig.cmake.in b/cmake/BoostRedisConfig.cmake.in index f0d6ff06..c620b40c 100644 --- a/cmake/BoostRedisConfig.cmake.in +++ b/cmake/BoostRedisConfig.cmake.in @@ -1,4 +1,4 @@ @PACKAGE_INIT@ -include("${CMAKE_CURRENT_LIST_DIR}/Aedis.cmake") +include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@.cmake") check_required_components("@PROJECT_NAME@") From 7d09040646c4a58a7080ed37cba4b9e2ecfaaba8 Mon Sep 17 00:00:00 2001 From: Michael Richmond Date: Mon, 21 Aug 2023 16:42:41 -0700 Subject: [PATCH 02/51] Bump version number --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c9022131..5c4347d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,7 +13,7 @@ endif() project( boost_redis - VERSION 1.4.1 + VERSION 1.4.2 DESCRIPTION "A redis client library" HOMEPAGE_URL "https://boostorg.github.io/redis/" LANGUAGES CXX From 4fbd0c685317dc8848da45f78606766f4a19e712 Mon Sep 17 00:00:00 2001 From: Marcelo Zimbres Date: Sun, 13 Aug 2023 20:23:57 +0200 Subject: [PATCH 03/51] Progreeses with the adapter type erasure. --- CMakePresets.json | 19 + README.md | 12 + examples/cpp20_chat_room.cpp | 8 +- examples/cpp20_subscriber.cpp | 8 +- examples/main.cpp | 2 +- include/boost/redis/connection.hpp | 21 + .../boost/redis/detail/connection_base.hpp | 630 ++++++++---------- include/boost/redis/detail/health_checker.hpp | 66 +- include/boost/redis/detail/runner.hpp | 34 +- include/boost/redis/impl/logger.ipp | 89 ++- include/boost/redis/logger.hpp | 40 ++ include/boost/redis/resp3/impl/parser.ipp | 11 + include/boost/redis/resp3/parser.hpp | 16 +- tests/test_conn_check_health.cpp | 5 +- tests/test_conn_echo_stress.cpp | 10 +- tests/test_conn_exec.cpp | 6 +- tests/test_conn_exec_error.cpp | 4 +- tests/test_conn_push.cpp | 26 +- tests/test_conn_tls.cpp | 42 +- tests/test_issue_50.cpp | 8 +- tests/test_low_level_async.cpp | 2 +- 21 files changed, 645 insertions(+), 414 deletions(-) diff --git a/CMakePresets.json b/CMakePresets.json index c68fbf34..bac4c390 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -90,6 +90,23 @@ "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/clang++-13/doc/" } }, + { + "name": "clang++-14", + "generator": "Unix Makefiles", + "hidden": false, + "inherits": ["cmake-pedantic"], + "binaryDir": "${sourceDir}/build/clang++-14", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug", + "CMAKE_CXX_EXTENSIONS": "OFF", + "CMAKE_CXX_FLAGS": "-Wall -Wextra -fsanitize=address", + "CMAKE_CXX_COMPILER": "clang++-14", + "CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address", + "CMAKE_CXX_STANDARD_REQUIRED": "ON", + "PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-14", + "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/clang++-14/doc/" + } + }, { "name": "libc++-14-cpp17", "generator": "Unix Makefiles", @@ -143,6 +160,7 @@ { "name": "g++-11", "configurePreset": "g++-11" }, { "name": "g++-11-release", "configurePreset": "g++-11-release" }, { "name": "clang++-13", "configurePreset": "clang++-13" }, + { "name": "clang++-14", "configurePreset": "clang++-14" }, { "name": "libc++-14-cpp17", "configurePreset": "libc++-14-cpp17" }, { "name": "libc++-14-cpp20", "configurePreset": "libc++-14-cpp20" }, { "name": "clang-tidy", "configurePreset": "clang-tidy" } @@ -158,6 +176,7 @@ { "name": "g++-11", "configurePreset": "g++-11", "inherits": ["test"] }, { "name": "g++-11-release", "configurePreset": "g++-11-release", "inherits": ["test"] }, { "name": "clang++-13", "configurePreset": "clang++-13", "inherits": ["test"] }, + { "name": "clang++-14", "configurePreset": "clang++-14", "inherits": ["test"] }, { "name": "libc++-14-cpp17", "configurePreset": "libc++-14-cpp17", "inherits": ["test"] }, { "name": "libc++-14-cpp20", "configurePreset": "libc++-14-cpp20", "inherits": ["test"] }, { "name": "clang-tidy", "configurePreset": "clang-tidy", "inherits": ["test"] } diff --git a/README.md b/README.md index cf1d4d59..355316a1 100644 --- a/README.md +++ b/README.md @@ -676,6 +676,18 @@ https://lists.boost.org/Archives/boost/2023/01/253944.php. ### develop (incorporates changes to conform the boost review and more) +* Deprecates the `async_receive` overload that takes a response. Users + should now first call `set_receive_response` to avoid contantly seting + the same response. + +* Uses `std::function` to type erase the response adapter. This change + should not influence users in any way but allowed important + simplification in the connections internals. This resulted in big + performance improvement where one of my benchmark programs passed + from 190k/s to 473k/s. + +### v1.4.2 (incorporates changes to conform the boost review and more) + * Adds `boost::redis::config::database_index` to make it possible to choose a database before starting running commands e.g. after an automatic reconnection. diff --git a/examples/cpp20_chat_room.cpp b/examples/cpp20_chat_room.cpp index 24c4a025..bc9a73e7 100644 --- a/examples/cpp20_chat_room.cpp +++ b/examples/cpp20_chat_room.cpp @@ -39,15 +39,17 @@ receiver(std::shared_ptr conn) -> net::awaitable request req; req.push("SUBSCRIBE", "channel"); + generic_response resp; + conn->set_receive_response(resp); + while (conn->will_reconnect()) { // Subscribe to channels. co_await conn->async_exec(req, ignore, net::deferred); // Loop reading Redis push messages. - for (generic_response resp;;) { - error_code ec; - co_await conn->async_receive(resp, redirect_error(use_awaitable, ec)); + for (error_code ec;;) { + co_await conn->async_receive(redirect_error(use_awaitable, ec)); if (ec) break; // Connection lost, break so we can reconnect to channels. std::cout diff --git a/examples/cpp20_subscriber.cpp b/examples/cpp20_subscriber.cpp index 69884705..30dcb308 100644 --- a/examples/cpp20_subscriber.cpp +++ b/examples/cpp20_subscriber.cpp @@ -52,6 +52,9 @@ receiver(std::shared_ptr conn) -> net::awaitable request req; req.push("SUBSCRIBE", "channel"); + generic_response resp; + conn->set_receive_response(resp); + // Loop while reconnection is enabled while (conn->will_reconnect()) { @@ -59,9 +62,8 @@ receiver(std::shared_ptr conn) -> net::awaitable co_await conn->async_exec(req, ignore, net::deferred); // Loop reading Redis pushs messages. - for (generic_response resp;;) { - error_code ec; - co_await conn->async_receive(resp, net::redirect_error(net::use_awaitable, ec)); + for (error_code ec;;) { + co_await conn->async_receive(net::redirect_error(net::use_awaitable, ec)); if (ec) break; // Connection lost, break so we can reconnect to channels. std::cout diff --git a/examples/main.cpp b/examples/main.cpp index 78e0a56a..f2d79213 100644 --- a/examples/main.cpp +++ b/examples/main.cpp @@ -30,7 +30,7 @@ auto main(int argc, char * argv[]) -> int } net::io_context ioc; - net::co_spawn(ioc, std::move(co_main(cfg)), [](std::exception_ptr p) { + net::co_spawn(ioc, co_main(cfg), [](std::exception_ptr p) { if (p) std::rethrow_exception(p); }); diff --git a/include/boost/redis/connection.hpp b/include/boost/redis/connection.hpp index a3d573ab..1721a82b 100644 --- a/include/boost/redis/connection.hpp +++ b/include/boost/redis/connection.hpp @@ -184,10 +184,15 @@ class basic_connection { * Where the second parameter is the size of the push received in * bytes. */ + template > + auto async_receive(CompletionToken token = CompletionToken{}) + { return impl_.async_receive(std::move(token)); } + template < class Response = ignore_t, class CompletionToken = asio::default_completion_token_t > + [[deprecated("Set the response with set_receive_response and use the other overload.")]] auto async_receive( Response& response, @@ -282,6 +287,11 @@ class basic_connection { auto const& next_layer() const noexcept { return impl_.next_layer(); } + /// Sets the response object of `async_receive` operations. + template + void set_receive_response(Response& response) + { impl_.set_receive_response(response); } + private: using timer_type = asio::basic_waitable_timer< @@ -342,11 +352,17 @@ class connection { /// Calls `boost::redis::basic_connection::async_receive`. template + [[deprecated("Set the response with set_receive_response and use the other overload.")]] auto async_receive(Response& response, CompletionToken token) { return impl_.async_receive(response, std::move(token)); } + /// Calls `boost::redis::basic_connection::async_receive`. + template + auto async_receive(CompletionToken token) + { return impl_.async_receive(std::move(token)); } + /// Calls `boost::redis::basic_connection::async_exec`. template auto async_exec(request const& req, Response& resp, CompletionToken token) @@ -373,6 +389,11 @@ class connection { void reset_stream() { impl_.reset_stream();} + /// Sets the response object of `async_receive` operations. + template + void set_receive_response(Response& response) + { impl_.set_receive_response(response); } + private: void async_run_impl( diff --git a/include/boost/redis/detail/connection_base.hpp b/include/boost/redis/detail/connection_base.hpp index f2f5f35e..ed399a85 100644 --- a/include/boost/redis/detail/connection_base.hpp +++ b/include/boost/redis/detail/connection_base.hpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -37,278 +38,80 @@ #include #include #include +#include namespace boost::redis::detail { template -struct wait_receive_op { - Conn* conn_; - asio::coroutine coro{}; - - template - void - operator()(Self& self , system::error_code ec = {}) - { - BOOST_ASIO_CORO_REENTER (coro) - { - conn_->read_op_timer_.cancel(); - - BOOST_ASIO_CORO_YIELD - conn_->read_op_timer_.async_wait(std::move(self)); - if (!conn_->is_open() || is_cancelled(self)) { - self.complete(!!ec ? ec : asio::error::operation_aborted); - return; - } - self.complete({}); - } - } -}; - -template -class read_next_op { -public: - using req_info_type = typename Conn::req_info; - using req_info_ptr = typename std::shared_ptr; - -private: - Conn* conn_; - req_info_ptr info_; - Adapter adapter_; - std::size_t cmds_ = 0; - std::size_t read_size_ = 0; - std::size_t index_ = 0; - asio::coroutine coro_{}; - -public: - read_next_op(Conn& conn, Adapter adapter, req_info_ptr info) - : conn_{&conn} - , info_{info} - , adapter_{adapter} - , cmds_{info->get_number_of_commands()} - {} - - auto make_adapter() noexcept - { - return [i = index_, adpt = adapter_] (resp3::basic_node const& nd, system::error_code& ec) mutable { adpt(i, nd, ec); }; - } - - template - void - operator()( Self& self - , system::error_code ec = {} - , std::size_t n = 0) - { - BOOST_ASIO_CORO_REENTER (coro_) - { - // Loop reading the responses to this request. - while (cmds_ != 0) { - if (info_->stop_requested()) { - self.complete(asio::error::operation_aborted, 0); - return; - } - - //----------------------------------- - // If we detect a push in the middle of a request we have - // to hand it to the push consumer. To do that we need - // some data in the read bufer. - if (conn_->read_buffer_.empty()) { - - if (conn_->use_ssl()) { - BOOST_ASIO_CORO_YIELD - asio::async_read_until(conn_->next_layer(), conn_->dbuf_, resp3::parser::sep, std::move(self)); - } else { - BOOST_ASIO_CORO_YIELD - asio::async_read_until(conn_->next_layer().next_layer(), conn_->dbuf_, resp3::parser::sep, std::move(self)); - } - - BOOST_REDIS_CHECK_OP1(conn_->cancel(operation::run);); - if (info_->stop_requested()) { - self.complete(asio::error::operation_aborted, 0); - return; - } - } - - // If the next request is a push we have to handle it to - // the receive_op wait for it to be done and continue. - if (resp3::to_type(conn_->read_buffer_.front()) == resp3::type::push) { - BOOST_ASIO_CORO_YIELD - conn_->async_wait_receive(std::move(self)); - BOOST_REDIS_CHECK_OP1(conn_->cancel(operation::run);); - continue; - } - //----------------------------------- - - if (conn_->use_ssl()) { - BOOST_ASIO_CORO_YIELD - redis::detail::async_read(conn_->next_layer(), conn_->dbuf_, make_adapter(), std::move(self)); - } else { - BOOST_ASIO_CORO_YIELD - redis::detail::async_read(conn_->next_layer().next_layer(), conn_->dbuf_, make_adapter(), std::move(self)); - } - - ++index_; - - if (ec || redis::detail::is_cancelled(self)) { - conn_->cancel(operation::run); - self.complete(!!ec ? ec : asio::error::operation_aborted, {}); - return; - } - - conn_->dbuf_.consume(n); - read_size_ += n; - - BOOST_ASSERT(cmds_ != 0); - --cmds_; - } - - self.complete({}, read_size_); - } - } -}; - -template -struct receive_op { - Conn* conn_; - Adapter adapter; - asio::coroutine coro{}; - - template - void - operator()( Self& self - , system::error_code ec = {} - , std::size_t n = 0) - { - BOOST_ASIO_CORO_REENTER (coro) - { - if (!conn_->is_next_push()) { - BOOST_ASIO_CORO_YIELD - conn_->read_op_timer_.async_wait(std::move(self)); - if (!conn_->is_open() || is_cancelled(self)) { - self.complete(!!ec ? ec : asio::error::operation_aborted, 0); - return; - } - } - - if (conn_->use_ssl()) { - BOOST_ASIO_CORO_YIELD - redis::detail::async_read(conn_->next_layer(), conn_->dbuf_, adapter, std::move(self)); - } else { - BOOST_ASIO_CORO_YIELD - redis::detail::async_read(conn_->next_layer().next_layer(), conn_->dbuf_, adapter, std::move(self)); - } - - if (ec || is_cancelled(self)) { - conn_->cancel(operation::run); - conn_->cancel(operation::receive); - self.complete(!!ec ? ec : asio::error::operation_aborted, {}); - return; - } - - conn_->dbuf_.consume(n); - - if (!conn_->is_next_push()) { - conn_->read_op_timer_.cancel(); - } - - self.complete({}, n); - return; - } - } -}; - -template struct exec_op { using req_info_type = typename Conn::req_info; + using adapter_type = typename Conn::adapter_type; - Conn* conn = nullptr; - request const* req = nullptr; - Adapter adapter{}; - std::shared_ptr info = nullptr; + Conn* conn_ = nullptr; + request const* req_ = nullptr; + adapter_type adapter{}; + std::shared_ptr info_ = nullptr; asio::coroutine coro{}; template - void - operator()( Self& self - , system::error_code ec = {} - , std::size_t n = 0) + void operator()(Self& self , system::error_code ec = {}) { BOOST_ASIO_CORO_REENTER (coro) { // Check whether the user wants to wait for the connection to // be stablished. - if (req->get_config().cancel_if_not_connected && !conn->is_open()) { + if (req_->get_config().cancel_if_not_connected && !conn_->is_open()) { BOOST_ASIO_CORO_YIELD asio::post(std::move(self)); return self.complete(error::not_connected, 0); } - info = std::allocate_shared(asio::get_associated_allocator(self), *req, conn->get_executor()); + info_ = std::allocate_shared(asio::get_associated_allocator(self), *req_, adapter, conn_->get_executor()); + + conn_->add_request_info(info_); - conn->add_request_info(info); EXEC_OP_WAIT: BOOST_ASIO_CORO_YIELD - info->async_wait(std::move(self)); + info_->async_wait(std::move(self)); BOOST_ASSERT(ec == asio::error::operation_aborted); - if (info->stop_requested()) { + if (info_->ec_) { + self.complete(info_->ec_, 0); + return; + } + + if (info_->stop_requested()) { // Don't have to call remove_request as it has already // been by cancel(exec). return self.complete(ec, 0); } if (is_cancelled(self)) { - if (info->is_written()) { + if (info_->is_written()) { using c_t = asio::cancellation_type; auto const c = self.get_cancellation_state().cancelled(); if ((c & c_t::terminal) != c_t::none) { // Cancellation requires closing the connection // otherwise it stays in inconsistent state. - conn->cancel(operation::run); + conn_->cancel(operation::run); return self.complete(ec, 0); } else { // Can't implement other cancelation types, ignoring. self.get_cancellation_state().clear(); + + // TODO: Find out a better way to ignore + // cancelation. goto EXEC_OP_WAIT; } } else { // Cancelation can be honored. - conn->remove_request(info); + conn_->remove_request(info_); self.complete(ec, 0); return; } } - BOOST_ASSERT(conn->is_open()); - - if (req->size() == 0) { - // Don't have to call remove_request as it has already - // been removed. - return self.complete({}, 0); - } - - BOOST_ASSERT(!conn->reqs_.empty()); - BOOST_ASSERT(conn->reqs_.front() != nullptr); - BOOST_ASIO_CORO_YIELD - conn->async_read_next(adapter, std::move(self)); - BOOST_REDIS_CHECK_OP1(;); - - if (info->stop_requested()) { - // Don't have to call remove_request as it has already - // been by cancel(exec). - return self.complete(ec, 0); - } - - BOOST_ASSERT(!conn->reqs_.empty()); - conn->reqs_.pop_front(); - - if (conn->is_waiting_response()) { - BOOST_ASSERT(!conn->reqs_.empty()); - conn->reqs_.front()->proceed(); - } else { - conn->read_timer_.cancel_one(); - } - - self.complete({}, n); + self.complete(info_->ec_, info_->read_size_); } } }; @@ -329,20 +132,24 @@ struct run_op { { conn->write_buffer_.clear(); conn->read_buffer_.clear(); + conn->parser_.reset(); BOOST_ASIO_CORO_YIELD asio::experimental::make_parallel_group( - [this](auto token) { return conn->reader(token);}, + [this](auto token) { return conn->reader(logger_, token);}, [this](auto token) { return conn->writer(logger_, token);} ).async_wait( asio::experimental::wait_for_one(), std::move(self)); if (is_cancelled(self)) { + logger_.trace("run-op: canceled. Exiting ..."); self.complete(asio::error::operation_aborted); return; } + logger_.on_run(ec0, ec1); + switch (order[0]) { case 0: self.complete(ec0); break; case 1: self.complete(ec1); break; @@ -374,7 +181,19 @@ struct writer_op { BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer().next_layer(), asio::buffer(conn_->write_buffer_), std::move(self)); logger_.on_write(ec, conn_->write_buffer_); - BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);); + + if (ec) { + logger_.trace("writer-op: error. Exiting ..."); + conn_->cancel(operation::run); + self.complete(ec); + return; + } + + if (is_cancelled(self)) { + logger_.trace("writer-op: canceled. Exiting ..."); + self.complete(asio::error::operation_aborted); + return; + } conn_->on_write(); @@ -382,6 +201,7 @@ struct writer_op { // successful write might had already been queued, so we // have to check here before proceeding. if (!conn_->is_open()) { + logger_.trace("writer-op: canceled (2). Exiting ..."); self.complete({}); return; } @@ -390,6 +210,7 @@ struct writer_op { BOOST_ASIO_CORO_YIELD conn_->writer_timer_.async_wait(std::move(self)); if (!conn_->is_open() || is_cancelled(self)) { + logger_.trace("writer-op: canceled (3). Exiting ..."); // Notice this is not an error of the op, stoping was // requested from the outside, so we complete with // success. @@ -400,20 +221,15 @@ struct writer_op { } }; -template +template struct reader_op { - Conn* conn; + using parse_result = typename Conn::parse_result; + using parse_ret_type = typename Conn::parse_ret_type; + Conn* conn_; + Logger logger_; + parse_ret_type res_{parse_result::resp, 0}; asio::coroutine coro{}; - bool as_push() const - { - return - (resp3::to_type(conn->read_buffer_.front()) == resp3::type::push) - || conn->reqs_.empty() - || (!conn->reqs_.empty() && conn->reqs_.front()->get_number_of_commands() == 0) - || !conn->is_waiting_response(); // Added to deal with MONITOR. - } - template void operator()( Self& self , system::error_code ec = {} @@ -423,59 +239,77 @@ struct reader_op { BOOST_ASIO_CORO_REENTER (coro) for (;;) { - if (conn->use_ssl()) - BOOST_ASIO_CORO_YIELD asio::async_read_until(conn->next_layer(), conn->dbuf_, "\r\n", std::move(self)); - else - BOOST_ASIO_CORO_YIELD asio::async_read_until(conn->next_layer().next_layer(), conn->dbuf_, "\r\n", std::move(self)); - - if (ec == asio::error::eof) { - conn->cancel(operation::run); - return self.complete({}); // EOFINAE: EOF is not an error. - } + // Appends some data to the buffer if necessary. + if ((res_.first == parse_result::needs_more) || std::empty(conn_->read_buffer_)) { + if (conn_->use_ssl()) { + BOOST_ASIO_CORO_YIELD + async_append_some( + conn_->next_layer(), + conn_->dbuf_, + conn_->get_suggested_buffer_growth(), + std::move(self)); + } else { + BOOST_ASIO_CORO_YIELD + async_append_some( + conn_->next_layer().next_layer(), + conn_->dbuf_, + conn_->get_suggested_buffer_growth(), + std::move(self)); + } - BOOST_REDIS_CHECK_OP0(conn->cancel(operation::run);); - - // We handle unsolicited events in the following way - // - // 1. Its resp3 type is a push. - // - // 2. A non-push type is received with an empty requests - // queue. I have noticed this is possible (e.g. -MISCONF). - // I expect them to have type push so we can distinguish - // them from responses to commands, but it is a - // simple-error. If we are lucky enough to receive them - // when the command queue is empty we can treat them as - // server pushes, otherwise it is impossible to handle - // them properly - // - // 3. The request does not expect any response but we got - // one. This may happen if for example, subscribe with - // wrong syntax. - // - // Useful links: - // - // - https://github.com/redis/redis/issues/11784 - // - https://github.com/redis/redis/issues/6426 - // - BOOST_ASSERT(!conn->read_buffer_.empty()); - if (as_push()) { - BOOST_ASIO_CORO_YIELD - conn->async_wait_receive(std::move(self)); - } else { - BOOST_ASSERT_MSG(conn->is_waiting_response(), "Not waiting for a response (using MONITOR command perhaps?)"); - BOOST_ASSERT(!conn->reqs_.empty()); - BOOST_ASSERT(conn->reqs_.front()->get_number_of_commands() != 0); - conn->reqs_.front()->proceed(); - BOOST_ASIO_CORO_YIELD - conn->read_timer_.async_wait(std::move(self)); - ec = {}; + logger_.on_read(ec, n); + + // EOF is not treated as error. + if (ec == asio::error::eof) { + logger_.trace("reader-op: EOF received. Exiting ..."); + conn_->cancel(operation::run); + return self.complete({}); // EOFINAE: EOF is not an error. + } + + // The connection is not viable after an error. + if (ec) { + logger_.trace("reader-op: error. Exiting ..."); + conn_->cancel(operation::run); + self.complete(ec); + return; + } + + // Somebody might have canceled implicitly or explicitly + // while we were suspended and after queueing so we have to + // check. + if (!conn_->is_open() || is_cancelled(self)) { + logger_.trace("reader-op: canceled. Exiting ..."); + self.complete(ec); + return; + } } - if (!conn->is_open() || ec || is_cancelled(self)) { - conn->cancel(operation::run); - self.complete(asio::error::basic_errors::operation_aborted); + res_ = conn_->on_read(buffer_view(conn_->dbuf_), ec); + if (ec) { + logger_.trace("reader-op: parse error. Exiting ..."); + conn_->cancel(operation::run); + self.complete(ec); return; } + + if (res_.first == parse_result::push) { + BOOST_ASIO_CORO_YIELD + conn_->receive_channel_.async_send(ec, res_.second, std::move(self)); + + if (ec) { + logger_.trace("reader-op: error. Exiting ..."); + conn_->cancel(operation::run); + self.complete(ec); + return; + } + + if (!conn_->is_open() || is_cancelled(self)) { + logger_.trace("reader-op: canceled (2). Exiting ..."); + self.complete(asio::error::operation_aborted); + return; + } + + } } } }; @@ -495,6 +329,8 @@ class connection_base { /// Type of the next layer using next_layer_type = asio::ssl::stream>; + using receiver_adapter_type = std::function const&, system::error_code&)>; + using this_type = connection_base; /// Constructs from an executor. @@ -505,14 +341,12 @@ class connection_base { : ctx_{method} , stream_{std::make_unique(ex, ctx_)} , writer_timer_{ex} - , read_timer_{ex} - , read_op_timer_{ex} + , receive_channel_{ex} , runner_{ex, {}} , dbuf_{read_buffer_, max_read_size} { + set_receive_response(ignore); writer_timer_.expires_at(std::chrono::steady_clock::time_point::max()); - read_timer_.expires_at(std::chrono::steady_clock::time_point::max()); - read_op_timer_.expires_at(std::chrono::steady_clock::time_point::max()); } /// Returns the ssl context. @@ -539,7 +373,7 @@ class connection_base { auto get_executor() {return writer_timer_.get_executor();} /// Cancels specific operations. - virtual void cancel(operation op) + void cancel(operation op) { runner_.cancel(op); if (op == operation::all) { @@ -562,22 +396,21 @@ class connection_base { return asio::async_compose < CompletionToken , void(system::error_code, std::size_t) - >(redis::detail::exec_op{this, &req, f}, token, writer_timer_); + >(redis::detail::exec_op{this, &req, f}, token, writer_timer_); } template + [[deprecated("Set the response with set_receive_response and use the other overload.")]] auto async_receive(Response& response, CompletionToken token) { - using namespace boost::redis::adapter; - auto g = boost_redis_adapt(response); - auto f = adapter::detail::make_adapter_wrapper(g); - - return asio::async_compose - < CompletionToken - , void(system::error_code, std::size_t) - >(redis::detail::receive_op{this, f}, token, read_op_timer_); + set_receive_response(response); + return receive_channel_.async_receive(std::move(token)); } + template + auto async_receive(CompletionToken token) + { return receive_channel_.async_receive(std::move(token)); } + template auto async_run(config const& cfg, Logger l, CompletionToken token) { @@ -586,11 +419,21 @@ class connection_base { return runner_.async_run(*this, l, std::move(token)); } + template + void set_receive_response(Response& response) + { + using namespace boost::redis::adapter; + auto g = boost_redis_adapt(response); + receive_adapter_ = adapter::detail::make_adapter_wrapper(g); + } + private: using clock_type = std::chrono::steady_clock; using clock_traits_type = asio::wait_traits; using timer_type = asio::basic_waitable_timer; + using receive_channel_type = asio::experimental::channel; using runner_type = redis::detail::runner; + using adapter_type = std::function const&, system::error_code&)>; auto use_ssl() const noexcept { return runner_.get_config().use_ssl;} @@ -603,9 +446,9 @@ class connection_base { BOOST_ASSERT(ptr != nullptr); if (ptr->is_written()) { - return !ptr->get_request().get_config().cancel_if_unresponded; + return !ptr->req_->get_config().cancel_if_unresponded; } else { - return !ptr->get_request().get_config().cancel_on_connection_lost; + return !ptr->req_->get_config().cancel_on_connection_lost; } }; @@ -655,13 +498,13 @@ class connection_base { case operation::run: { close(); - read_timer_.cancel(); writer_timer_.cancel(); + receive_channel_.cancel(); cancel_on_conn_lost(); } break; case operation::receive: { - read_op_timer_.cancel(); + receive_channel_.cancel(); } break; default: /* ignore */; } @@ -687,6 +530,9 @@ class connection_base { struct req_info { public: + using node_type = resp3::basic_node; + using wrapped_adapter_type = std::function; + enum class action { stop, @@ -694,14 +540,23 @@ class connection_base { none, }; - explicit req_info(request const& req, executor_type ex) + explicit req_info(request const& req, adapter_type adapter, executor_type ex) : timer_{ex} , action_{action::none} , req_{&req} + , adapter_{} , cmds_{std::size(req)} , status_{status::none} + , ec_{{}} + , read_size_{0} { timer_.expires_at(std::chrono::steady_clock::time_point::max()); + + adapter_ = [this, adapter](node_type const& nd, system::error_code& ec) + { + auto const i = std::size(*req_) - cmds_; + adapter(i, nd, ec); + }; } auto proceed() @@ -734,12 +589,6 @@ class connection_base { void reset_status() noexcept { status_ = status::none; } - [[nodiscard]] auto get_number_of_commands() const noexcept - { return cmds_; } - - [[nodiscard]] auto get_request() const noexcept -> auto const& - { return *req_; } - [[nodiscard]] auto stop_requested() const noexcept { return action_ == action::stop;} @@ -749,7 +598,7 @@ class connection_base { return timer_.async_wait(std::move(token)); } - private: + //private: enum class status { none , staged @@ -759,8 +608,14 @@ class connection_base { timer_type timer_; action action_; request const* req_; + wrapped_adapter_type adapter_; + + // Contains the number of commands that haven't been read yet. std::size_t cmds_; status status_; + + system::error_code ec_; + std::size_t read_size_; }; void remove_request(std::shared_ptr const& info) @@ -770,28 +625,16 @@ class connection_base { using reqs_type = std::deque>; - template friend struct redis::detail::reader_op; + template friend struct redis::detail::reader_op; template friend struct redis::detail::writer_op; template friend struct redis::detail::run_op; - template friend struct redis::detail::exec_op; - template friend class redis::detail::read_next_op; - template friend struct redis::detail::receive_op; - template friend struct redis::detail::wait_receive_op; + template friend struct redis::detail::exec_op; template friend struct redis::detail::run_all_op; - template - auto async_wait_receive(CompletionToken token) - { - return asio::async_compose - < CompletionToken - , void(system::error_code) - >(redis::detail::wait_receive_op{this}, token, read_op_timer_); - } - void cancel_push_requests() { auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) { - return !(ptr->is_staged() && ptr->get_request().size() == 0); + return !(ptr->is_staged() && ptr->req_->size() == 0); }); std::for_each(point, std::end(reqs_), [](auto const& ptr) { @@ -810,7 +653,7 @@ class connection_base { { reqs_.push_back(info); - if (info->get_request().has_hello_priority()) { + if (info->req_->has_hello_priority()) { auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) { return e->is_waiting_write(); }); @@ -822,13 +665,13 @@ class connection_base { writer_timer_.cancel(); } - template - auto reader(CompletionToken&& token) + template + auto reader(Logger l, CompletionToken&& token) { return asio::async_compose < CompletionToken , void(system::error_code) - >(redis::detail::reader_op{this}, token, writer_timer_); + >(redis::detail::reader_op{this, l}, token, writer_timer_); } template @@ -840,15 +683,6 @@ class connection_base { >(redis::detail::writer_op{this, l}, token, writer_timer_); } - template - auto async_read_next(Adapter adapter, CompletionToken token) - { - return asio::async_compose - < CompletionToken - , void(system::error_code, std::size_t) - >(redis::detail::read_next_op{*this, adapter, reqs_.front()}, token, writer_timer_); - } - template auto async_run_lean(config const& cfg, Logger l, CompletionToken token) { @@ -870,7 +704,7 @@ class connection_base { std::for_each(point, std::cend(reqs_), [this](auto const& ri) { // Stage the request. - write_buffer_ += ri->get_request().payload(); + write_buffer_ += ri->req_->payload(); ri->mark_staged(); }); @@ -884,17 +718,117 @@ class connection_base { void close() { - if (stream_->next_layer().is_open()) - stream_->next_layer().close(); + if (stream_->next_layer().is_open()) { + // TODO: Communicate the error to the caller. + system::error_code ec; + stream_->next_layer().close(ec); + } + } + + auto is_open() const noexcept { return stream_->next_layer().is_open(); } + auto& lowest_layer() noexcept { return stream_->lowest_layer(); } + + auto is_next_push() + { + // We handle unsolicited events in the following way + // + // 1. Its resp3 type is a push. + // + // 2. A non-push type is received with an empty requests + // queue. I have noticed this is possible (e.g. -MISCONF). + // I expect them to have type push so we can distinguish + // them from responses to commands, but it is a + // simple-error. If we are lucky enough to receive them + // when the command queue is empty we can treat them as + // server pushes, otherwise it is impossible to handle + // them properly + // + // 3. The request does not expect any response but we got + // one. This may happen if for example, subscribe with + // wrong syntax. + // + // Useful links: + // + // - https://github.com/redis/redis/issues/11784 + // - https://github.com/redis/redis/issues/6426 + // + + BOOST_ASSERT(!read_buffer_.empty()); + + return + (resp3::to_type(read_buffer_.front()) == resp3::type::push) + || reqs_.empty() + || (!reqs_.empty() && reqs_.front()->cmds_ == 0) + || !is_waiting_response(); // Added to deal with MONITOR. + } + + auto get_suggested_buffer_growth() const noexcept + { + return parser_.get_suggested_buffer_growth(1024); } - bool is_next_push() const noexcept + enum class parse_result { needs_more, push, resp }; + + using parse_ret_type = std::pair; + + parse_ret_type on_finish_parsing(parse_result t) { - return !read_buffer_.empty() && (resp3::to_type(read_buffer_.front()) == resp3::type::push); + on_push_ = false; + dbuf_.consume(parser_.get_consumed()); + auto const res = std::make_pair(t, parser_.get_consumed()); + parser_.reset(); + return res; } - auto is_open() const noexcept { return stream_->next_layer().is_open(); } - auto& lowest_layer() noexcept { return stream_->lowest_layer(); } + parse_ret_type on_read(std::string_view data, system::error_code& ec) + { + // We arrive here in two states: + // + // 1. While we are parsing a message. In this case we + // don't want to determine the type of the message in the + // buffer (i.e. response vs push) but leave it untouched + // until the parsing of a complete message ends. + // + // 2. On a new message, in which case we have to determine + // whether the next messag is a push or a response. + // + if (!on_push_) // Prepare for new message. + on_push_ = is_next_push(); + + if (on_push_) { + if (!resp3::parse(parser_, data, receive_adapter_, ec)) + return std::make_pair(parse_result::needs_more, 0); + + if (ec) + return std::make_pair(parse_result::push, 0); + + return on_finish_parsing(parse_result::push); + } + + BOOST_ASSERT_MSG(is_waiting_response(), "Not waiting for a response (using MONITOR command perhaps?)"); + BOOST_ASSERT(!reqs_.empty()); + BOOST_ASSERT(reqs_.front() != nullptr); + BOOST_ASSERT(reqs_.front()->cmds_ != 0); + + if (!resp3::parse(parser_, data, reqs_.front()->adapter_, ec)) + return std::make_pair(parse_result::needs_more, 0); + + if (ec) { + reqs_.front()->ec_ = ec; + reqs_.front()->proceed(); + return std::make_pair(parse_result::resp, 0); + } + + reqs_.front()->read_size_ += parser_.get_consumed(); + + if (--reqs_.front()->cmds_ == 0) { + // Done with this request. + reqs_.front()->proceed(); + reqs_.pop_front(); + } + + return on_finish_parsing(parse_result::resp); + } asio::ssl::context ctx_; std::unique_ptr stream_; @@ -903,9 +837,9 @@ class connection_base { // also more suitable than a channel and the notify operation does // not suspend. timer_type writer_timer_; - timer_type read_timer_; - timer_type read_op_timer_; + receive_channel_type receive_channel_; runner_type runner_; + receiver_adapter_type receive_adapter_; using dyn_buffer_type = asio::dynamic_string_buffer, std::allocator>; @@ -913,6 +847,8 @@ class connection_base { dyn_buffer_type dbuf_; std::string write_buffer_; reqs_type reqs_; + resp3::parser parser_{}; + bool on_push_ = false; }; } // boost::redis::detail diff --git a/include/boost/redis/detail/health_checker.hpp b/include/boost/redis/detail/health_checker.hpp index dcf0292d..4fe564f7 100644 --- a/include/boost/redis/detail/health_checker.hpp +++ b/include/boost/redis/detail/health_checker.hpp @@ -24,11 +24,12 @@ namespace boost::redis::detail { -template +template class ping_op { public: HealthChecker* checker_ = nullptr; Connection* conn_ = nullptr; + Logger logger_; asio::coroutine coro_{}; template @@ -37,28 +38,39 @@ class ping_op { BOOST_ASIO_CORO_REENTER (coro_) for (;;) { if (checker_->checker_has_exited_) { + logger_.trace("ping_op: checker has exited. Exiting ..."); self.complete({}); return; } BOOST_ASIO_CORO_YIELD conn_->async_exec(checker_->req_, checker_->resp_, std::move(self)); - BOOST_REDIS_CHECK_OP0(checker_->wait_timer_.cancel();) + if (ec || is_cancelled(self)) { + logger_.trace("ping_op: error/cancelled (1)."); + checker_->wait_timer_.cancel(); + self.complete(!!ec ? ec : asio::error::operation_aborted); + return; + } // Wait before pinging again. checker_->ping_timer_.expires_after(checker_->ping_interval_); BOOST_ASIO_CORO_YIELD checker_->ping_timer_.async_wait(std::move(self)); - BOOST_REDIS_CHECK_OP0(;) + if (ec || is_cancelled(self)) { + logger_.trace("ping_op: error/cancelled (2)."); + self.complete(!!ec ? ec : asio::error::operation_aborted); + return; + } } } }; -template +template class check_timeout_op { public: HealthChecker* checker_ = nullptr; Connection* conn_ = nullptr; + Logger logger_; asio::coroutine coro_{}; template @@ -69,14 +81,20 @@ class check_timeout_op { checker_->wait_timer_.expires_after(2 * checker_->ping_interval_); BOOST_ASIO_CORO_YIELD checker_->wait_timer_.async_wait(std::move(self)); - BOOST_REDIS_CHECK_OP0(;) + if (ec || is_cancelled(self)) { + logger_.trace("check-timeout-op: error/canceled. Exiting ..."); + self.complete(!!ec ? ec : asio::error::operation_aborted); + return; + } if (checker_->resp_.has_error()) { + logger_.trace("check-timeout-op: Response error. Exiting ..."); self.complete({}); return; } if (checker_->resp_.value().empty()) { + logger_.trace("check-timeout-op: Response has no value. Exiting ..."); checker_->ping_timer_.cancel(); conn_->cancel(operation::run); checker_->checker_has_exited_ = true; @@ -91,11 +109,12 @@ class check_timeout_op { } }; -template +template class check_health_op { public: HealthChecker* checker_ = nullptr; Connection* conn_ = nullptr; + Logger logger_; asio::coroutine coro_{}; template @@ -109,6 +128,7 @@ class check_health_op { BOOST_ASIO_CORO_REENTER (coro_) { if (checker_->ping_interval_ == std::chrono::seconds::zero()) { + logger_.trace("check-health-op: timeout disabled."); BOOST_ASIO_CORO_YIELD asio::post(std::move(self)); self.complete({}); @@ -117,13 +137,16 @@ class check_health_op { BOOST_ASIO_CORO_YIELD asio::experimental::make_parallel_group( - [this](auto token) { return checker_->async_ping(*conn_, token); }, - [this](auto token) { return checker_->async_check_timeout(*conn_, token);} + [this](auto token) { return checker_->async_ping(*conn_, logger_, token); }, + [this](auto token) { return checker_->async_check_timeout(*conn_, logger_, token);} ).async_wait( asio::experimental::wait_for_one(), std::move(self)); + logger_.on_check_health(ec1, ec2); + if (is_cancelled(self)) { + logger_.trace("check-health-op: canceled. Exiting ..."); self.complete(asio::error::operation_aborted); return; } @@ -163,15 +186,20 @@ class health_checker { template < class Connection, + class Logger, class CompletionToken = asio::default_completion_token_t > - auto async_check_health(Connection& conn, CompletionToken token = CompletionToken{}) + auto + async_check_health( + Connection& conn, + Logger l, + CompletionToken token = CompletionToken{}) { checker_has_exited_ = false; return asio::async_compose < CompletionToken , void(system::error_code) - >(check_health_op{this, &conn}, token, conn); + >(check_health_op{this, &conn, l}, token, conn); } std::size_t cancel(operation op) @@ -189,27 +217,27 @@ class health_checker { } private: - template - auto async_ping(Connection& conn, CompletionToken token) + template + auto async_ping(Connection& conn, Logger l, CompletionToken token) { return asio::async_compose < CompletionToken , void(system::error_code) - >(ping_op{this, &conn}, token, conn, ping_timer_); + >(ping_op{this, &conn, l}, token, conn, ping_timer_); } - template - auto async_check_timeout(Connection& conn, CompletionToken token) + template + auto async_check_timeout(Connection& conn, Logger l, CompletionToken token) { return asio::async_compose < CompletionToken , void(system::error_code) - >(check_timeout_op{this, &conn}, token, conn, wait_timer_); + >(check_timeout_op{this, &conn, l}, token, conn, wait_timer_); } - template friend class ping_op; - template friend class check_timeout_op; - template friend class check_health_op; + template friend class ping_op; + template friend class check_timeout_op; + template friend class check_health_op; timer_type ping_timer_; timer_type wait_timer_; diff --git a/include/boost/redis/detail/runner.hpp b/include/boost/redis/detail/runner.hpp index e728b090..af5d9188 100644 --- a/include/boost/redis/detail/runner.hpp +++ b/include/boost/redis/detail/runner.hpp @@ -50,8 +50,15 @@ struct hello_op { BOOST_ASIO_CORO_YIELD conn_->async_exec(runner_->hello_req_, runner_->hello_resp_, std::move(self)); logger_.on_hello(ec, runner_->hello_resp_); - BOOST_REDIS_CHECK_OP0(conn_->cancel(operation::run);) - self.complete(ec); + + if (ec || runner_->has_error_in_response() || is_cancelled(self)) { + logger_.trace("hello-op: error/canceled. Exiting ..."); + conn_->cancel(operation::run); + self.complete(!!ec ? ec : asio::error::operation_aborted); + return; + } + + self.complete({}); } } }; @@ -84,12 +91,14 @@ class runner_op { BOOST_ASIO_CORO_YIELD asio::experimental::make_parallel_group( [this](auto token) { return runner_->async_run_all(*conn_, logger_, token); }, - [this](auto token) { return runner_->health_checker_.async_check_health(*conn_, token); }, + [this](auto token) { return runner_->health_checker_.async_check_health(*conn_, logger_, token); }, [this](auto token) { return runner_->async_hello(*conn_, logger_, token); } ).async_wait( asio::experimental::wait_for_all(), std::move(self)); + logger_.on_runner(ec0, ec1, ec2); + if (is_cancelled(self)) { self.complete(asio::error::operation_aborted); return; @@ -232,10 +241,27 @@ class runner { else hello_req_.push("HELLO", "3", "SETNAME", cfg_.clientname); - if (cfg_.database_index) + if (cfg_.database_index && cfg_.database_index.value() != 0) hello_req_.push("SELECT", cfg_.database_index.value()); } + bool has_error_in_response() const noexcept + { + if (!hello_resp_.has_value()) + return true; + + auto f = [](auto const& e) + { + switch (e.data_type) { + case resp3::type::simple_error: + case resp3::type::blob_error: return true; + default: return false; + } + }; + + return std::any_of(std::cbegin(hello_resp_.value()), std::cend(hello_resp_.value()), f); + } + resolver_type resv_; connector_type ctor_; handshaker_type hsher_; diff --git a/include/boost/redis/impl/logger.ipp b/include/boost/redis/impl/logger.ipp index d2c4db8c..a8c5c9ae 100644 --- a/include/boost/redis/impl/logger.ipp +++ b/include/boost/redis/impl/logger.ipp @@ -25,7 +25,7 @@ void logger::on_resolve(system::error_code const& ec, asio::ip::tcp::resolver::r write_prefix(); - std::clog << "Resolve results: "; + std::clog << "run-all-op: resolve addresses "; if (ec) { std::clog << ec.message() << std::endl; @@ -51,7 +51,7 @@ void logger::on_connect(system::error_code const& ec, asio::ip::tcp::endpoint co write_prefix(); - std::clog << "Connected to endpoint: "; + std::clog << "run-all-op: connected to endpoint "; if (ec) std::clog << ec.message() << std::endl; @@ -68,7 +68,7 @@ void logger::on_ssl_handshake(system::error_code const& ec) write_prefix(); - std::clog << "SSL handshake: " << ec.message() << std::endl; + std::clog << "Runner: SSL handshake " << ec.message() << std::endl; } void logger::on_connection_lost(system::error_code const& ec) @@ -97,9 +97,38 @@ logger::on_write( write_prefix(); if (ec) - std::clog << "Write: " << ec.message(); + std::clog << "writer-op: " << ec.message(); else - std::clog << "Bytes written: " << std::size(payload); + std::clog << "writer-op: " << std::size(payload) << " bytes written."; + + std::clog << std::endl; +} + +void logger::on_read(system::error_code const& ec, std::size_t n) +{ + if (level_ < level::info) + return; + + write_prefix(); + + if (ec) + std::clog << "reader-op: " << ec.message(); + else + std::clog << "reader-op: " << n << " bytes read."; + + std::clog << std::endl; +} + +void logger::on_run(system::error_code const& reader_ec, system::error_code const& writer_ec) +{ + if (level_ < level::info) + return; + + write_prefix(); + + std::clog << "run-op: " + << reader_ec.message() << " (reader), " + << writer_ec.message() << " (writer)"; std::clog << std::endl; } @@ -115,14 +144,60 @@ logger::on_hello( write_prefix(); if (ec) { - std::clog << "Hello: " << ec.message(); + std::clog << "hello-op: " << ec.message(); if (resp.has_error()) std::clog << " (" << resp.error().diagnostic << ")"; } else { - std::clog << "Hello: Success"; + std::clog << "hello-op: Success"; } std::clog << std::endl; } +void + logger::on_runner( + system::error_code const& run_all_ec, + system::error_code const& health_check_ec, + system::error_code const& hello_ec) +{ + if (level_ < level::info) + return; + + write_prefix(); + + std::clog << "runner-op: " + << run_all_ec.message() << " (async_run_all), " + << health_check_ec.message() << " (async_health_check) " + << hello_ec.message() << " (async_hello)."; + + std::clog << std::endl; +} + +void + logger::on_check_health( + system::error_code const& ping_ec, + system::error_code const& timeout_ec) +{ + if (level_ < level::info) + return; + + write_prefix(); + + std::clog << "check-health-op: " + << ping_ec.message() << " (async_ping), " + << timeout_ec.message() << " (async_check_timeout)."; + + std::clog << std::endl; +} + +void logger::trace(std::string_view reason) +{ + if (level_ < level::debug) + return; + + write_prefix(); + + std::clog << reason << std::endl; +} + } // boost::redis diff --git a/include/boost/redis/logger.hpp b/include/boost/redis/logger.hpp index e3a1cd35..1bdaf480 100644 --- a/include/boost/redis/logger.hpp +++ b/include/boost/redis/logger.hpp @@ -19,6 +19,10 @@ namespace boost::redis { * @ingroup high-level-api * * The class can be passed to the connection objects to log to `std::clog` + * + * Notice that currently this class has no stable interface. Users + * that don't want any logging can disable it by contructing a logger + * with logger::level::emerg to the connection. */ class logger { public: @@ -98,6 +102,22 @@ class logger { */ void on_write(system::error_code const& ec, std::string const& payload); + /** @brief Called when the read operation completes. + * @ingroup high-level-api + * + * @param ec Error code returned by the read operation. + * @param n Number of bytes read. + */ + void on_read(system::error_code const& ec, std::size_t n); + + /** @brief Called when the run operation completes. + * @ingroup high-level-api + * + * @param read_ec Error code returned by the read operation. + * @param write_ec Error code returned by the write operation. + */ + void on_run(system::error_code const& reader_ec, system::error_code const& writer_ec); + /** @brief Called when the `HELLO` request completes. * @ingroup high-level-api * @@ -116,6 +136,26 @@ class logger { prefix_ = prefix; } + /** @brief Called when the runner operation completes. + * @ingroup high-level-api + * + * @param run_all_ec Error code returned by the run_all operation. + * @param health_check_ec Error code returned by the health checker operation. + * @param hello_ec Error code returned by the health checker operation. + */ + void + on_runner( + system::error_code const& run_all_ec, + system::error_code const& health_check_ec, + system::error_code const& hello_ec); + + void + on_check_health( + system::error_code const& ping_ec, + system::error_code const& check_timeout_ec); + + void trace(std::string_view reason); + private: void write_prefix(); level level_; diff --git a/include/boost/redis/resp3/impl/parser.ipp b/include/boost/redis/resp3/impl/parser.ipp index 89cae23c..752c5298 100644 --- a/include/boost/redis/resp3/impl/parser.ipp +++ b/include/boost/redis/resp3/impl/parser.ipp @@ -9,6 +9,7 @@ #include #include +#include namespace boost::redis::resp3 { @@ -21,6 +22,16 @@ void to_int(int_type& i, std::string_view sv, system::error_code& ec) parser::parser() { + reset(); +} + +void parser::reset() +{ + depth_ = 0; + sizes_ = {{1}}; + bulk_length_ = (std::numeric_limits::max)(); + bulk_ = type::invalid; + consumed_ = 0; sizes_[0] = 2; // The sentinel must be more than 1. } diff --git a/include/boost/redis/resp3/parser.hpp b/include/boost/redis/resp3/parser.hpp index a66ebbae..bf1de5d7 100644 --- a/include/boost/redis/resp3/parser.hpp +++ b/include/boost/redis/resp3/parser.hpp @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -31,22 +30,22 @@ class parser { // The current depth. Simple data types will have depth 0, whereas // the elements of aggregates will have depth 1. Embedded types // will have increasing depth. - std::size_t depth_ = 0; + std::size_t depth_; // The parser supports up to 5 levels of nested structures. The // first element in the sizes stack is a sentinel and must be // different from 1. - std::array sizes_ = {{1}}; + std::array sizes_; // Contains the length expected in the next bulk read. - int_type bulk_length_ = (std::numeric_limits::max)(); + int_type bulk_length_; // The type of the next bulk. Contains type::invalid if no bulk is // expected. - type bulk_ = type::invalid; + type bulk_; // The number of bytes consumed from the buffer. - std::size_t consumed_ = 0; + std::size_t consumed_; // Returns the number of bytes that have been consumed. auto consume_impl(type t, std::string_view elem, system::error_code& ec) -> node_type; @@ -71,8 +70,13 @@ class parser { auto get_consumed() const noexcept -> std::size_t; auto consume(std::string_view view, system::error_code& ec) noexcept -> result; + + void reset(); }; +// Returns false if more data is needed. If true is returned the +// parser is either done or an error occured, that can be checked on +// ec. template bool parse( diff --git a/tests/test_conn_check_health.cpp b/tests/test_conn_check_health.cpp index dc8fa078..91ece76a 100644 --- a/tests/test_conn_check_health.cpp +++ b/tests/test_conn_check_health.cpp @@ -41,7 +41,7 @@ struct push_callback { { resp2->value().clear(); BOOST_ASIO_CORO_YIELD - conn2->async_receive(*resp2, *this); + conn2->async_receive(*this); if (ec) { std::clog << "Exiting." << std::endl; return; @@ -106,6 +106,7 @@ BOOST_AUTO_TEST_CASE(check_health) request req2; req2.push("MONITOR"); generic_response resp2; + conn2.set_receive_response(resp2); conn2.async_exec(req2, ignore, [](auto ec, auto) { std::cout << "async_exec: " << std::endl; @@ -113,7 +114,7 @@ BOOST_AUTO_TEST_CASE(check_health) }); //-------------------------------- - + push_callback{&conn1, &conn2, &resp2, &req1}(); // Starts reading pushes. ioc.run(); diff --git a/tests/test_conn_echo_stress.cpp b/tests/test_conn_echo_stress.cpp index a967a32f..48bfac0b 100644 --- a/tests/test_conn_echo_stress.cpp +++ b/tests/test_conn_echo_stress.cpp @@ -30,8 +30,12 @@ using boost::redis::connection; auto push_consumer(std::shared_ptr conn, int expected) -> net::awaitable { int c = 0; - for (;;) { - co_await conn->async_receive(ignore, net::use_awaitable); + for (error_code ec;;) { + co_await conn->async_receive(redirect_error(net::use_awaitable, ec)); + if (ec) { + std::cout << "push_consumer error: " << ec.message() << std::endl; + co_return; + } if (++c == expected) break; } @@ -60,7 +64,7 @@ echo_session( boost::system::error_code ec; co_await conn->async_exec(req, resp, redir(ec)); - BOOST_REQUIRE_EQUAL(ec, boost::system::error_code{}); + BOOST_TEST(!ec); BOOST_REQUIRE_EQUAL(msg, std::get<1>(resp).value()); req.clear(); std::get<1>(resp).value().clear(); diff --git a/tests/test_conn_exec.cpp b/tests/test_conn_exec.cpp index bd5bc5ed..f6e5387e 100644 --- a/tests/test_conn_exec.cpp +++ b/tests/test_conn_exec.cpp @@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(hello_priority) conn->async_exec(req1, ignore, [&](auto ec, auto){ // Second callback to the called. std::cout << "req1" << std::endl; - BOOST_CHECK_EQUAL(ec, boost::system::error_code{}); + BOOST_TEST(!ec); BOOST_TEST(!seen2); BOOST_TEST(seen3); seen1 = true; @@ -62,7 +62,7 @@ BOOST_AUTO_TEST_CASE(hello_priority) conn->async_exec(req2, ignore, [&](auto ec, auto){ // Last callback to the called. std::cout << "req2" << std::endl; - BOOST_CHECK_EQUAL(ec, boost::system::error_code{}); + BOOST_TEST(!ec); BOOST_TEST(seen1); BOOST_TEST(seen3); seen2 = true; @@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE(hello_priority) conn->async_exec(req3, ignore, [&](auto ec, auto){ // Callback that will be called first. std::cout << "req3" << std::endl; - BOOST_CHECK_EQUAL(ec, boost::system::error_code{}); + BOOST_TEST(!ec); BOOST_TEST(!seen1); BOOST_TEST(!seen2); seen3 = true; diff --git a/tests/test_conn_exec_error.cpp b/tests/test_conn_exec_error.cpp index fe465325..85c35ba2 100644 --- a/tests/test_conn_exec_error.cpp +++ b/tests/test_conn_exec_error.cpp @@ -242,6 +242,8 @@ BOOST_AUTO_TEST_CASE(subscriber_wrong_syntax) conn->async_exec(req1, ignore, c1); generic_response gresp; + conn->set_receive_response(gresp); + auto c3 = [&](auto ec, auto) { std::cout << "async_receive" << std::endl; @@ -254,7 +256,7 @@ BOOST_AUTO_TEST_CASE(subscriber_wrong_syntax) conn->cancel(operation::reconnection); }; - conn->async_receive(gresp, c3); + conn->async_receive(c3); run(conn); diff --git a/tests/test_conn_push.cpp b/tests/test_conn_push.cpp index 91d1f274..ab2572ce 100644 --- a/tests/test_conn_push.cpp +++ b/tests/test_conn_push.cpp @@ -69,12 +69,12 @@ BOOST_AUTO_TEST_CASE(receives_push_waiting_resps) run(conn, {}, {}); bool push_received = false; - conn->async_receive(ignore, [&, conn](auto ec, auto){ + conn->async_receive([&, conn](auto ec, auto){ std::cout << "async_receive" << std::endl; BOOST_TEST(!ec); + push_received = true; conn->cancel(operation::run); conn->cancel(operation::reconnection); - push_received = true; }); ioc.run(); @@ -99,12 +99,12 @@ BOOST_AUTO_TEST_CASE(push_received1) run(conn); bool push_received = false; - conn->async_receive(ignore, [&, conn](auto ec, auto){ + conn->async_receive([&, conn](auto ec, auto){ std::cout << "async_receive" << std::endl; BOOST_TEST(!ec); + push_received = true; conn->cancel(operation::run); conn->cancel(operation::reconnection); - push_received = true; }); ioc.run(); @@ -128,7 +128,7 @@ BOOST_AUTO_TEST_CASE(push_filtered_out) BOOST_TEST(!ec); }); - conn->async_receive(ignore, [conn](auto ec, auto){ + conn->async_receive([&, conn](auto ec, auto){ BOOST_TEST(!ec); conn->cancel(operation::reconnection); }); @@ -146,12 +146,12 @@ net::awaitable push_consumer1(std::shared_ptr conn, bool& push_received) { { - auto [ec, ev] = co_await conn->async_receive(ignore, as_tuple(net::use_awaitable)); + auto [ec, ev] = co_await conn->async_receive(as_tuple(net::use_awaitable)); BOOST_TEST(!ec); } { - auto [ec, ev] = co_await conn->async_receive(ignore, as_tuple(net::use_awaitable)); + auto [ec, ev] = co_await conn->async_receive(as_tuple(net::use_awaitable)); BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled); } @@ -190,8 +190,10 @@ BOOST_AUTO_TEST_CASE(test_push_adapter) req.push("SUBSCRIBE", "channel"); req.push("PING"); - conn->async_receive(error_tag_obj, [conn](auto ec, auto) { - BOOST_CHECK_EQUAL(ec, boost::redis::error::incompatible_size); + conn->set_receive_response(error_tag_obj); + + conn->async_receive([&, conn](auto ec, auto) { + BOOST_CHECK_EQUAL(ec, boost::asio::experimental::error::channel_cancelled); conn->cancel(operation::reconnection); }); @@ -199,7 +201,9 @@ BOOST_AUTO_TEST_CASE(test_push_adapter) BOOST_CHECK_EQUAL(ec, boost::system::errc::errc_t::operation_canceled); }); - run(conn); + conn->async_run({}, {}, [](auto ec){ + BOOST_CHECK_EQUAL(ec, boost::redis::error::incompatible_size); + }); ioc.run(); @@ -210,7 +214,7 @@ BOOST_AUTO_TEST_CASE(test_push_adapter) net::awaitable push_consumer3(std::shared_ptr conn) { for (;;) { - co_await conn->async_receive(ignore, net::use_awaitable); + co_await conn->async_receive(net::use_awaitable); } } diff --git a/tests/test_conn_tls.cpp b/tests/test_conn_tls.cpp index fef978ff..5e38ef3c 100644 --- a/tests/test_conn_tls.cpp +++ b/tests/test_conn_tls.cpp @@ -17,6 +17,7 @@ using boost::redis::request; using boost::redis::response; using boost::redis::config; using boost::redis::operation; +using boost::system::error_code; bool verify_certificate(bool, net::ssl::verify_context&) { @@ -32,6 +33,7 @@ BOOST_AUTO_TEST_CASE(ping) cfg.password = "aedis"; cfg.addr.host = "db.occase.de"; cfg.addr.port = "6380"; + //cfg.health_check_interval = std::chrono::seconds{0}; std::string const in = "Kabuf"; @@ -55,6 +57,44 @@ BOOST_AUTO_TEST_CASE(ping) ioc.run(); BOOST_CHECK_EQUAL(in, std::get<0>(resp).value()); - std::cout << "===============================" << std::endl; } +BOOST_AUTO_TEST_CASE(acl_does_not_allow_select) +{ + config cfg; + cfg.use_ssl = true; + cfg.username = "aedis"; + cfg.password = "aedis"; + cfg.addr.host = "db.occase.de"; + cfg.addr.port = "6380"; + cfg.database_index = 22; + cfg.reconnect_wait_interval = std::chrono::seconds::zero(); + + std::string const in = "Kabuf"; + + request req; + req.push("PING", in); + + response resp; + + net::io_context ioc; + connection conn{ioc}; + conn.next_layer().set_verify_mode(net::ssl::verify_peer); + conn.next_layer().set_verify_callback(verify_certificate); + + conn.async_exec(req, resp, [&](auto, auto) { + // TODO: We should not need this cancel here because + // reconnect was disabled. + conn.cancel(); + }); + + + error_code ec2; + conn.async_run(cfg, {}, [&](auto ec) { + ec2 = ec; + }); + + ioc.run(); + + BOOST_TEST(!!ec2); +} diff --git a/tests/test_issue_50.cpp b/tests/test_issue_50.cpp index dabffd84..9683592c 100644 --- a/tests/test_issue_50.cpp +++ b/tests/test_issue_50.cpp @@ -47,11 +47,15 @@ receiver(std::shared_ptr conn) -> net::awaitable for (;;) { std::cout << "aaaa" << std::endl; error_code ec; - co_await conn->async_receive(ignore, redirect_error(use_awaitable, ec)); - if (ec) + co_await conn->async_receive(redirect_error(use_awaitable, ec)); + if (ec) { + std::cout << "Error in async_receive" << std::endl; break; + } } } + + std::cout << "Exiting the receiver." << std::endl; } auto diff --git a/tests/test_low_level_async.cpp b/tests/test_low_level_async.cpp index d05345fb..b8b47d14 100644 --- a/tests/test_low_level_async.cpp +++ b/tests/test_low_level_async.cpp @@ -65,7 +65,7 @@ auto co_main(config cfg) -> net::awaitable BOOST_AUTO_TEST_CASE(low_level_async) { net::io_context ioc; - net::co_spawn(ioc, std::move(co_main({})), net::detached); + net::co_spawn(ioc, co_main({}), net::detached); ioc.run(); } From 401dd244199fdaf4228c5ca5eaa2a1e323403015 Mon Sep 17 00:00:00 2001 From: Marcelo Zimbres Date: Sat, 26 Aug 2023 21:40:55 +0200 Subject: [PATCH 04/51] Adds connection usage information. --- README.md | 4 + include/boost/redis/connection.hpp | 8 ++ .../boost/redis/detail/connection_base.hpp | 73 ++++++++++++------- include/boost/redis/logger.hpp | 8 +- include/boost/redis/request.hpp | 12 ++- include/boost/redis/usage.hpp | 43 +++++++++++ tests/test_conn_echo_stress.cpp | 23 +++++- 7 files changed, 136 insertions(+), 35 deletions(-) create mode 100644 include/boost/redis/usage.hpp diff --git a/README.md b/README.md index 355316a1..e6b70566 100644 --- a/README.md +++ b/README.md @@ -686,6 +686,10 @@ https://lists.boost.org/Archives/boost/2023/01/253944.php. performance improvement where one of my benchmark programs passed from 190k/s to 473k/s. +* The connection has a new member `get_usage()` that returns the + connection usage information, such as number of bytes writen, + received etc. + ### v1.4.2 (incorporates changes to conform the boost review and more) * Adds `boost::redis::config::database_index` to make it possible to diff --git a/include/boost/redis/connection.hpp b/include/boost/redis/connection.hpp index 1721a82b..c1cb7dea 100644 --- a/include/boost/redis/connection.hpp +++ b/include/boost/redis/connection.hpp @@ -292,6 +292,10 @@ class basic_connection { void set_receive_response(Response& response) { impl_.set_receive_response(response); } + /// Returns connection usage information. + usage get_usage() const noexcept + { return impl_.get_usage(); } + private: using timer_type = asio::basic_waitable_timer< @@ -394,6 +398,10 @@ class connection { void set_receive_response(Response& response) { impl_.set_receive_response(response); } + /// Returns connection usage information. + usage get_usage() const noexcept + { return impl_.get_usage(); } + private: void async_run_impl( diff --git a/include/boost/redis/detail/connection_base.hpp b/include/boost/redis/detail/connection_base.hpp index ed399a85..78ee229e 100644 --- a/include/boost/redis/detail/connection_base.hpp +++ b/include/boost/redis/detail/connection_base.hpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -40,7 +41,8 @@ #include #include -namespace boost::redis::detail { +namespace boost::redis::detail +{ template struct exec_op { @@ -48,8 +50,6 @@ struct exec_op { using adapter_type = typename Conn::adapter_type; Conn* conn_ = nullptr; - request const* req_ = nullptr; - adapter_type adapter{}; std::shared_ptr info_ = nullptr; asio::coroutine coro{}; @@ -60,14 +60,12 @@ struct exec_op { { // Check whether the user wants to wait for the connection to // be stablished. - if (req_->get_config().cancel_if_not_connected && !conn_->is_open()) { + if (info_->req_->get_config().cancel_if_not_connected && !conn_->is_open()) { BOOST_ASIO_CORO_YIELD asio::post(std::move(self)); return self.complete(error::not_connected, 0); } - info_ = std::allocate_shared(asio::get_associated_allocator(self), *req_, adapter, conn_->get_executor()); - conn_->add_request_info(info_); EXEC_OP_WAIT: @@ -329,6 +327,10 @@ class connection_base { /// Type of the next layer using next_layer_type = asio::ssl::stream>; + using clock_type = std::chrono::steady_clock; + using clock_traits_type = asio::wait_traits; + using timer_type = asio::basic_waitable_timer; + using receiver_adapter_type = std::function const&, system::error_code&)>; using this_type = connection_base; @@ -391,12 +393,14 @@ class connection_base { { using namespace boost::redis::adapter; auto f = boost_redis_adapt(resp); - BOOST_ASSERT_MSG(req.size() <= f.get_supported_response_size(), "Request and response have incompatible sizes."); + BOOST_ASSERT_MSG(req.get_expected_responses() <= f.get_supported_response_size(), "Request and response have incompatible sizes."); + + auto info = std::make_shared(req, f, get_executor()); return asio::async_compose < CompletionToken , void(system::error_code, std::size_t) - >(redis::detail::exec_op{this, &req, f}, token, writer_timer_); + >(exec_op{this, info}, token, writer_timer_); } template @@ -427,12 +431,12 @@ class connection_base { receive_adapter_ = adapter::detail::make_adapter_wrapper(g); } + usage get_usage() const noexcept + { return usage_; } + private: - using clock_type = std::chrono::steady_clock; - using clock_traits_type = asio::wait_traits; - using timer_type = asio::basic_waitable_timer; using receive_channel_type = asio::experimental::channel; - using runner_type = redis::detail::runner; + using runner_type = runner; using adapter_type = std::function const&, system::error_code&)>; auto use_ssl() const noexcept @@ -545,7 +549,7 @@ class connection_base { , action_{action::none} , req_{&req} , adapter_{} - , cmds_{std::size(req)} + , expected_responses_{req.get_expected_responses()} , status_{status::none} , ec_{{}} , read_size_{0} @@ -554,7 +558,7 @@ class connection_base { adapter_ = [this, adapter](node_type const& nd, system::error_code& ec) { - auto const i = std::size(*req_) - cmds_; + auto const i = req_->get_expected_responses() - expected_responses_; adapter(i, nd, ec); }; } @@ -611,7 +615,7 @@ class connection_base { wrapped_adapter_type adapter_; // Contains the number of commands that haven't been read yet. - std::size_t cmds_; + std::size_t expected_responses_; status status_; system::error_code ec_; @@ -625,16 +629,16 @@ class connection_base { using reqs_type = std::deque>; - template friend struct redis::detail::reader_op; - template friend struct redis::detail::writer_op; - template friend struct redis::detail::run_op; - template friend struct redis::detail::exec_op; - template friend struct redis::detail::run_all_op; + template friend struct reader_op; + template friend struct writer_op; + template friend struct run_op; + template friend struct exec_op; + template friend struct run_all_op; void cancel_push_requests() { auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) { - return !(ptr->is_staged() && ptr->req_->size() == 0); + return !(ptr->is_staged() && ptr->req_->get_expected_responses() == 0); }); std::for_each(point, std::end(reqs_), [](auto const& ptr) { @@ -671,7 +675,7 @@ class connection_base { return asio::async_compose < CompletionToken , void(system::error_code) - >(redis::detail::reader_op{this, l}, token, writer_timer_); + >(reader_op{this, l}, token, writer_timer_); } template @@ -680,7 +684,7 @@ class connection_base { return asio::async_compose < CompletionToken , void(system::error_code) - >(redis::detail::writer_op{this, l}, token, writer_timer_); + >(writer_op{this, l}, token, writer_timer_); } template @@ -691,7 +695,7 @@ class connection_base { return asio::async_compose < CompletionToken , void(system::error_code) - >(redis::detail::run_op{this, l}, token, writer_timer_); + >(run_op{this, l}, token, writer_timer_); } [[nodiscard]] bool coalesce_requests() @@ -706,8 +710,11 @@ class connection_base { // Stage the request. write_buffer_ += ri->req_->payload(); ri->mark_staged(); + usage_.commands_sent += ri->expected_responses_; }); + usage_.bytes_sent += std::size(write_buffer_); + return point != std::cend(reqs_); } @@ -758,13 +765,13 @@ class connection_base { return (resp3::to_type(read_buffer_.front()) == resp3::type::push) || reqs_.empty() - || (!reqs_.empty() && reqs_.front()->cmds_ == 0) + || (!reqs_.empty() && reqs_.front()->expected_responses_ == 0) || !is_waiting_response(); // Added to deal with MONITOR. } auto get_suggested_buffer_growth() const noexcept { - return parser_.get_suggested_buffer_growth(1024); + return parser_.get_suggested_buffer_growth(4096); } enum class parse_result { needs_more, push, resp }; @@ -773,6 +780,14 @@ class connection_base { parse_ret_type on_finish_parsing(parse_result t) { + if (t == parse_result::push) { + usage_.pushes_received += 1; + usage_.push_bytes_received += parser_.get_consumed(); + } else { + usage_.responses_received += 1; + usage_.response_bytes_received += parser_.get_consumed(); + } + on_push_ = false; dbuf_.consume(parser_.get_consumed()); auto const res = std::make_pair(t, parser_.get_consumed()); @@ -808,7 +823,7 @@ class connection_base { BOOST_ASSERT_MSG(is_waiting_response(), "Not waiting for a response (using MONITOR command perhaps?)"); BOOST_ASSERT(!reqs_.empty()); BOOST_ASSERT(reqs_.front() != nullptr); - BOOST_ASSERT(reqs_.front()->cmds_ != 0); + BOOST_ASSERT(reqs_.front()->expected_responses_ != 0); if (!resp3::parse(parser_, data, reqs_.front()->adapter_, ec)) return std::make_pair(parse_result::needs_more, 0); @@ -821,7 +836,7 @@ class connection_base { reqs_.front()->read_size_ += parser_.get_consumed(); - if (--reqs_.front()->cmds_ == 0) { + if (--reqs_.front()->expected_responses_ == 0) { // Done with this request. reqs_.front()->proceed(); reqs_.pop_front(); @@ -849,6 +864,8 @@ class connection_base { reqs_type reqs_; resp3::parser parser_{}; bool on_push_ = false; + + usage usage_; }; } // boost::redis::detail diff --git a/include/boost/redis/logger.hpp b/include/boost/redis/logger.hpp index 1bdaf480..b7c1e09c 100644 --- a/include/boost/redis/logger.hpp +++ b/include/boost/redis/logger.hpp @@ -30,7 +30,11 @@ class logger { * @ingroup high-level-api */ enum class level - { /// Emergency + { + /// Disabled + disabled, + + /// Emergency emerg, /// Alert @@ -60,7 +64,7 @@ class logger { * * @param l Log level. */ - logger(level l = level::info) + logger(level l = level::disabled) : level_{l} {} diff --git a/include/boost/redis/request.hpp b/include/boost/redis/request.hpp index b3508c58..6d3e44b7 100644 --- a/include/boost/redis/request.hpp +++ b/include/boost/redis/request.hpp @@ -84,8 +84,12 @@ class request { request(config cfg = config{true, false, true, true}) : cfg_{cfg} {} + //// Returns the number of responses expected for this request. + [[nodiscard]] auto get_expected_responses() const noexcept -> std::size_t + { return expected_responses_;}; + //// Returns the number of commands contained in this request. - [[nodiscard]] auto size() const noexcept -> std::size_t + [[nodiscard]] auto get_commands() const noexcept -> std::size_t { return commands_;}; [[nodiscard]] auto payload() const noexcept -> std::string_view @@ -99,6 +103,7 @@ class request { { payload_.clear(); commands_ = 0; + expected_responses_ = 0; has_hello_priority_ = false; } @@ -303,8 +308,10 @@ class request { private: void check_cmd(std::string_view cmd) { + ++commands_; + if (!detail::has_response(cmd)) - ++commands_; + ++expected_responses_; if (cmd == "HELLO") has_hello_priority_ = cfg_.hello_with_priority; @@ -313,6 +320,7 @@ class request { config cfg_; std::string payload_; std::size_t commands_ = 0; + std::size_t expected_responses_ = 0; bool has_hello_priority_ = false; }; diff --git a/include/boost/redis/usage.hpp b/include/boost/redis/usage.hpp new file mode 100644 index 00000000..91224e96 --- /dev/null +++ b/include/boost/redis/usage.hpp @@ -0,0 +1,43 @@ +/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com) + * + * Distributed under the Boost Software License, Version 1.0. (See + * accompanying file LICENSE.txt) + */ + +#ifndef BOOST_REDIS_USAGE_HPP +#define BOOST_REDIS_USAGE_HPP + +namespace boost::redis +{ + +/** @brief Connection usage information. + * @ingroup high-level-api + * + * @note: To simplify the implementation, the commands_sent and + * bytes_sent in the struct below are computed just before writing to + * the socket, which means on error they might not represent exaclty + * what has been received by the Redis server. + */ +struct usage { + /// Number of commands sent. + std::size_t commands_sent = 0; + + /// Number of bytes sent. + std::size_t bytes_sent = 0; + + /// Number of responses received. + std::size_t responses_received = 0; + + /// Number of pushes received. + std::size_t pushes_received = 0; + + /// Number of response-bytes received. + std::size_t response_bytes_received = 0; + + /// Number of push-bytes received. + std::size_t push_bytes_received = 0; +}; + +} // boost::redis + +#endif // BOOST_REDIS_USAGE_HPP diff --git a/tests/test_conn_echo_stress.cpp b/tests/test_conn_echo_stress.cpp index 48bfac0b..0cbf3a4c 100644 --- a/tests/test_conn_echo_stress.cpp +++ b/tests/test_conn_echo_stress.cpp @@ -26,6 +26,20 @@ using boost::redis::ignore_t; using boost::redis::logger; using boost::redis::config; using boost::redis::connection; +using boost::redis::usage; + +std::ostream& operator<<(std::ostream& os, usage const& u) +{ + os + << "Commands sent: " << u.commands_sent << "\n" + << "Bytes sent: " << u.bytes_sent << "\n" + << "Responses received: " << u.responses_received << "\n" + << "Pushes received: " << u.pushes_received << "\n" + << "Response bytes received: " << u.response_bytes_received << "\n" + << "Push bytes received: " << u.push_bytes_received; + + return os; +} auto push_consumer(std::shared_ptr conn, int expected) -> net::awaitable { @@ -73,10 +87,9 @@ echo_session( } } -auto async_echo_stress() -> net::awaitable +auto async_echo_stress(std::shared_ptr conn) -> net::awaitable { auto ex = co_await net::this_coro::executor; - auto conn = std::make_shared(ex); config cfg; cfg.health_check_interval = std::chrono::seconds::zero(); run(conn, cfg, @@ -117,8 +130,12 @@ auto async_echo_stress() -> net::awaitable BOOST_AUTO_TEST_CASE(echo_stress) { net::io_context ioc; - net::co_spawn(ioc, async_echo_stress(), net::detached); + auto conn = std::make_shared(ioc); + net::co_spawn(ioc, async_echo_stress(conn), net::detached); ioc.run(); + + std::cout << "-------------------\n" + << conn->get_usage() << std::endl; } #else From 1ed8e0182c6662cf85d8fdbca6b04a9d28c6a641 Mon Sep 17 00:00:00 2001 From: Marcelo Zimbres Date: Sat, 2 Sep 2023 13:05:06 +0200 Subject: [PATCH 05/51] Removes resp3::async_read. --- CMakeLists.txt | 2 - examples/cpp17_intro.cpp | 6 +- examples/cpp17_intro_sync.cpp | 1 - examples/cpp20_chat_room.cpp | 43 +- examples/cpp20_containers.cpp | 24 +- examples/cpp20_echo_server.cpp | 32 +- examples/cpp20_intro.cpp | 10 +- examples/cpp20_intro_tls.cpp | 14 +- examples/cpp20_json.cpp | 10 +- examples/cpp20_protobuf.cpp | 10 +- examples/cpp20_resolve_with_sentinel.cpp | 14 +- examples/cpp20_subscriber.cpp | 18 +- examples/main.cpp | 8 +- .../boost/redis/detail/connection_base.hpp | 77 +++- include/boost/redis/detail/read.hpp | 291 -------------- tests/test_low_level.cpp | 378 +++++++----------- tests/test_low_level_async.cpp | 78 ---- tests/test_low_level_sync.cpp | 61 --- 18 files changed, 316 insertions(+), 761 deletions(-) delete mode 100644 include/boost/redis/detail/read.hpp delete mode 100644 tests/test_low_level_async.cpp delete mode 100644 tests/test_low_level_sync.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 5c4347d5..fb9655db 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -162,7 +162,6 @@ if (BOOST_REDIS_TESTS) make_test(test_conn_exec_error 17) make_test(test_request 17) make_test(test_run 17) - make_test(test_low_level_sync 17) make_test(test_low_level_sync_sans_io 17) make_test(test_conn_check_health 17) @@ -172,7 +171,6 @@ if (BOOST_REDIS_TESTS) make_test(test_conn_exec_cancel 20) make_test(test_conn_exec_cancel2 20) make_test(test_conn_echo_stress 20) - make_test(test_low_level_async 20) make_test(test_conn_run_cancel 20) make_test(test_issue_50 20) endif() diff --git a/examples/cpp17_intro.cpp b/examples/cpp17_intro.cpp index 13a303a0..d0a98b4e 100644 --- a/examples/cpp17_intro.cpp +++ b/examples/cpp17_intro.cpp @@ -8,7 +8,7 @@ #include #include -namespace net = boost::asio; +namespace asio = boost::asio; using boost::redis::connection; using boost::redis::request; using boost::redis::response; @@ -29,10 +29,10 @@ auto main(int argc, char * argv[]) -> int response resp; - net::io_context ioc; + asio::io_context ioc; connection conn{ioc}; - conn.async_run(cfg, {}, net::detached); + conn.async_run(cfg, {}, asio::detached); conn.async_exec(req, resp, [&](auto ec, auto) { if (!ec) diff --git a/examples/cpp17_intro_sync.cpp b/examples/cpp17_intro_sync.cpp index e9a4627d..1ed9a7d6 100644 --- a/examples/cpp17_intro_sync.cpp +++ b/examples/cpp17_intro_sync.cpp @@ -9,7 +9,6 @@ #include #include -namespace net = boost::asio; using boost::redis::sync_connection; using boost::redis::request; using boost::redis::response; diff --git a/examples/cpp20_chat_room.cpp b/examples/cpp20_chat_room.cpp index bc9a73e7..167f59e1 100644 --- a/examples/cpp20_chat_room.cpp +++ b/examples/cpp20_chat_room.cpp @@ -17,16 +17,23 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) #if defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR) -namespace net = boost::asio; -using stream_descriptor = net::deferred_t::as_default_on_t; -using signal_set = net::deferred_t::as_default_on_t; -using boost::redis::request; -using boost::redis::generic_response; +namespace asio = boost::asio; +using stream_descriptor = asio::deferred_t::as_default_on_t; +using signal_set = asio::deferred_t::as_default_on_t; +using boost::asio::async_read_until; +using boost::asio::awaitable; +using boost::asio::co_spawn; +using boost::asio::consign; +using boost::asio::deferred; +using boost::asio::detached; +using boost::asio::dynamic_buffer; +using boost::asio::redirect_error; +using boost::asio::use_awaitable; using boost::redis::config; using boost::redis::connection; +using boost::redis::generic_response; using boost::redis::ignore; -using net::redirect_error; -using net::use_awaitable; +using boost::redis::request; using boost::system::error_code; using namespace std::chrono_literals; @@ -34,7 +41,7 @@ using namespace std::chrono_literals; // terminals and type messages to stdin. auto -receiver(std::shared_ptr conn) -> net::awaitable +receiver(std::shared_ptr conn) -> awaitable { request req; req.push("SUBSCRIBE", "channel"); @@ -45,7 +52,7 @@ receiver(std::shared_ptr conn) -> net::awaitable while (conn->will_reconnect()) { // Subscribe to channels. - co_await conn->async_exec(req, ignore, net::deferred); + co_await conn->async_exec(req, ignore, deferred); // Loop reading Redis push messages. for (error_code ec;;) { @@ -63,27 +70,27 @@ receiver(std::shared_ptr conn) -> net::awaitable } // Publishes stdin messages to a Redis channel. -auto publisher(std::shared_ptr in, std::shared_ptr conn) -> net::awaitable +auto publisher(std::shared_ptr in, std::shared_ptr conn) -> awaitable { for (std::string msg;;) { - auto n = co_await net::async_read_until(*in, net::dynamic_buffer(msg, 1024), "\n"); + auto n = co_await async_read_until(*in, dynamic_buffer(msg, 1024), "\n"); request req; req.push("PUBLISH", "channel", msg); - co_await conn->async_exec(req, ignore, net::deferred); + co_await conn->async_exec(req, ignore, deferred); msg.erase(0, n); } } // Called from the main function (see main.cpp) -auto co_main(config cfg) -> net::awaitable +auto co_main(config cfg) -> awaitable { - auto ex = co_await net::this_coro::executor; + auto ex = co_await asio::this_coro::executor; auto conn = std::make_shared(ex); auto stream = std::make_shared(ex, ::dup(STDIN_FILENO)); - net::co_spawn(ex, receiver(conn), net::detached); - net::co_spawn(ex, publisher(stream, conn), net::detached); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + co_spawn(ex, receiver(conn), detached); + co_spawn(ex, publisher(stream, conn), detached); + conn->async_run(cfg, {}, consign(detached, conn)); signal_set sig_set{ex, SIGINT, SIGTERM}; co_await sig_set.async_wait(); @@ -92,7 +99,7 @@ auto co_main(config cfg) -> net::awaitable } #else // defined(BOOST_ASIO_HAS_POSIX_STREAM_DESCRIPTOR) -auto co_main(config const&) -> net::awaitable +auto co_main(config const&) -> awaitable { std::cout << "Requires support for posix streams." << std::endl; co_return; diff --git a/examples/cpp20_containers.cpp b/examples/cpp20_containers.cpp index dfedd82e..66a7e942 100644 --- a/examples/cpp20_containers.cpp +++ b/examples/cpp20_containers.cpp @@ -14,13 +14,17 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) -namespace net = boost::asio; +namespace asio = boost::asio; using boost::redis::request; using boost::redis::response; using boost::redis::ignore_t; using boost::redis::ignore; using boost::redis::config; using boost::redis::connection; +using boost::asio::awaitable; +using boost::asio::deferred; +using boost::asio::detached; +using boost::asio::consign; void print(std::map const& cont) { @@ -35,7 +39,7 @@ void print(std::vector const& cont) } // Stores the content of some STL containers in Redis. -auto store(std::shared_ptr conn) -> net::awaitable +auto store(std::shared_ptr conn) -> awaitable { std::vector vec {1, 2, 3, 4, 5, 6}; @@ -47,10 +51,10 @@ auto store(std::shared_ptr conn) -> net::awaitable req.push_range("RPUSH", "rpush-key", vec); req.push_range("HSET", "hset-key", map); - co_await conn->async_exec(req, ignore, net::deferred); + co_await conn->async_exec(req, ignore, deferred); } -auto hgetall(std::shared_ptr conn) -> net::awaitable +auto hgetall(std::shared_ptr conn) -> awaitable { // A request contains multiple commands. request req; @@ -60,13 +64,13 @@ auto hgetall(std::shared_ptr conn) -> net::awaitable response> resp; // Executes the request and reads the response. - co_await conn->async_exec(req, resp, net::deferred); + co_await conn->async_exec(req, resp, deferred); print(std::get<0>(resp).value()); } // Retrieves in a transaction. -auto transaction(std::shared_ptr conn) -> net::awaitable +auto transaction(std::shared_ptr conn) -> awaitable { request req; req.push("MULTI"); @@ -81,17 +85,17 @@ auto transaction(std::shared_ptr conn) -> net::awaitable response>, std::optional>> // exec > resp; - co_await conn->async_exec(req, resp, net::deferred); + co_await conn->async_exec(req, resp, deferred); print(std::get<0>(std::get<3>(resp).value()).value().value()); print(std::get<1>(std::get<3>(resp).value()).value().value()); } // Called from the main function (see main.cpp) -net::awaitable co_main(config cfg) +awaitable co_main(config cfg) { - auto conn = std::make_shared(co_await net::this_coro::executor); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + auto conn = std::make_shared(co_await asio::this_coro::executor); + conn->async_run(cfg, {}, consign(detached, conn)); co_await store(conn); co_await transaction(conn); diff --git a/examples/cpp20_echo_server.cpp b/examples/cpp20_echo_server.cpp index 9b637240..eba908a2 100644 --- a/examples/cpp20_echo_server.cpp +++ b/examples/cpp20_echo_server.cpp @@ -14,10 +14,10 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) -namespace net = boost::asio; -using tcp_socket = net::deferred_t::as_default_on_t; -using tcp_acceptor = net::deferred_t::as_default_on_t; -using signal_set = net::deferred_t::as_default_on_t; +namespace asio = boost::asio; +using tcp_socket = asio::deferred_t::as_default_on_t; +using tcp_acceptor = asio::deferred_t::as_default_on_t; +using signal_set = asio::deferred_t::as_default_on_t; using boost::redis::request; using boost::redis::response; using boost::redis::config; @@ -25,16 +25,16 @@ using boost::system::error_code; using boost::redis::connection; using namespace std::chrono_literals; -auto echo_server_session(tcp_socket socket, std::shared_ptr conn) -> net::awaitable +auto echo_server_session(tcp_socket socket, std::shared_ptr conn) -> asio::awaitable { request req; response resp; for (std::string buffer;;) { - auto n = co_await net::async_read_until(socket, net::dynamic_buffer(buffer, 1024), "\n"); + auto n = co_await asio::async_read_until(socket, asio::dynamic_buffer(buffer, 1024), "\n"); req.push("PING", buffer); - co_await conn->async_exec(req, resp, net::deferred); - co_await net::async_write(socket, net::buffer(std::get<0>(resp).value())); + co_await conn->async_exec(req, resp, asio::deferred); + co_await asio::async_write(socket, asio::buffer(std::get<0>(resp).value())); std::get<0>(resp).value().clear(); req.clear(); buffer.erase(0, n); @@ -42,25 +42,25 @@ auto echo_server_session(tcp_socket socket, std::shared_ptr conn) -> } // Listens for tcp connections. -auto listener(std::shared_ptr conn) -> net::awaitable +auto listener(std::shared_ptr conn) -> asio::awaitable { try { - auto ex = co_await net::this_coro::executor; - tcp_acceptor acc(ex, {net::ip::tcp::v4(), 55555}); + auto ex = co_await asio::this_coro::executor; + tcp_acceptor acc(ex, {asio::ip::tcp::v4(), 55555}); for (;;) - net::co_spawn(ex, echo_server_session(co_await acc.async_accept(), conn), net::detached); + asio::co_spawn(ex, echo_server_session(co_await acc.async_accept(), conn), asio::detached); } catch (std::exception const& e) { std::clog << "Listener: " << e.what() << std::endl; } } // Called from the main function (see main.cpp) -auto co_main(config cfg) -> net::awaitable +auto co_main(config cfg) -> asio::awaitable { - auto ex = co_await net::this_coro::executor; + auto ex = co_await asio::this_coro::executor; auto conn = std::make_shared(ex); - net::co_spawn(ex, listener(conn), net::detached); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + asio::co_spawn(ex, listener(conn), asio::detached); + conn->async_run(cfg, {}, asio::consign(asio::detached, conn)); signal_set sig_set(ex, SIGINT, SIGTERM); co_await sig_set.async_wait(); diff --git a/examples/cpp20_intro.cpp b/examples/cpp20_intro.cpp index 195122cf..b2154d31 100644 --- a/examples/cpp20_intro.cpp +++ b/examples/cpp20_intro.cpp @@ -13,17 +13,17 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) -namespace net = boost::asio; +namespace asio = boost::asio; using boost::redis::request; using boost::redis::response; using boost::redis::config; using boost::redis::connection; // Called from the main function (see main.cpp) -auto co_main(config cfg) -> net::awaitable +auto co_main(config cfg) -> asio::awaitable { - auto conn = std::make_shared(co_await net::this_coro::executor); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + auto conn = std::make_shared(co_await asio::this_coro::executor); + conn->async_run(cfg, {}, asio::consign(asio::detached, conn)); // A request containing only a ping command. request req; @@ -33,7 +33,7 @@ auto co_main(config cfg) -> net::awaitable response resp; // Executes the request. - co_await conn->async_exec(req, resp, net::deferred); + co_await conn->async_exec(req, resp, asio::deferred); conn->cancel(); std::cout << "PING: " << std::get<0>(resp).value() << std::endl; diff --git a/examples/cpp20_intro_tls.cpp b/examples/cpp20_intro_tls.cpp index b911af27..b98028ce 100644 --- a/examples/cpp20_intro_tls.cpp +++ b/examples/cpp20_intro_tls.cpp @@ -13,20 +13,20 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) -namespace net = boost::asio; +namespace asio = boost::asio; using boost::redis::request; using boost::redis::response; using boost::redis::config; using boost::redis::logger; using boost::redis::connection; -auto verify_certificate(bool, net::ssl::verify_context&) -> bool +auto verify_certificate(bool, asio::ssl::verify_context&) -> bool { std::cout << "set_verify_callback" << std::endl; return true; } -auto co_main(config cfg) -> net::awaitable +auto co_main(config cfg) -> asio::awaitable { cfg.use_ssl = true; cfg.username = "aedis"; @@ -34,18 +34,18 @@ auto co_main(config cfg) -> net::awaitable cfg.addr.host = "db.occase.de"; cfg.addr.port = "6380"; - auto conn = std::make_shared(co_await net::this_coro::executor); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + auto conn = std::make_shared(co_await asio::this_coro::executor); + conn->async_run(cfg, {}, asio::consign(asio::detached, conn)); request req; req.push("PING"); response resp; - conn->next_layer().set_verify_mode(net::ssl::verify_peer); + conn->next_layer().set_verify_mode(asio::ssl::verify_peer); conn->next_layer().set_verify_callback(verify_certificate); - co_await conn->async_exec(req, resp, net::deferred); + co_await conn->async_exec(req, resp, asio::deferred); conn->cancel(); std::cout << "Response: " << std::get<0>(resp).value() << std::endl; diff --git a/examples/cpp20_json.cpp b/examples/cpp20_json.cpp index d0c6423c..8a18e1d7 100644 --- a/examples/cpp20_json.cpp +++ b/examples/cpp20_json.cpp @@ -23,7 +23,7 @@ #include #include -namespace net = boost::asio; +namespace asio = boost::asio; using namespace boost::describe; using boost::redis::request; using boost::redis::response; @@ -48,11 +48,11 @@ void boost_redis_to_bulk(std::string& to, user const& u) void boost_redis_from_bulk(user& u, std::string_view sv, boost::system::error_code&) { u = boost::json::value_to(boost::json::parse(sv)); } -auto co_main(config cfg) -> net::awaitable +auto co_main(config cfg) -> asio::awaitable { - auto ex = co_await net::this_coro::executor; + auto ex = co_await asio::this_coro::executor; auto conn = std::make_shared(ex); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + conn->async_run(cfg, {}, asio::consign(asio::detached, conn)); // user object that will be stored in Redis in json format. user const u{"Joao", "58", "Brazil"}; @@ -64,7 +64,7 @@ auto co_main(config cfg) -> net::awaitable response resp; - co_await conn->async_exec(req, resp, net::deferred); + co_await conn->async_exec(req, resp, asio::deferred); conn->cancel(); // Prints the first ping diff --git a/examples/cpp20_protobuf.cpp b/examples/cpp20_protobuf.cpp index 75eb8fd2..f8ab5494 100644 --- a/examples/cpp20_protobuf.cpp +++ b/examples/cpp20_protobuf.cpp @@ -19,7 +19,7 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) -namespace net = boost::asio; +namespace asio = boost::asio; using boost::redis::request; using boost::redis::response; using boost::redis::operation; @@ -58,11 +58,11 @@ void boost_redis_from_bulk(person& u, std::string_view sv, boost::system::error_ using tutorial::boost_redis_to_bulk; using tutorial::boost_redis_from_bulk; -net::awaitable co_main(config cfg) +asio::awaitable co_main(config cfg) { - auto ex = co_await net::this_coro::executor; + auto ex = co_await asio::this_coro::executor; auto conn = std::make_shared(ex); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + conn->async_run(cfg, {}, asio::consign(asio::detached, conn)); person p; p.set_name("Louis"); @@ -76,7 +76,7 @@ net::awaitable co_main(config cfg) response resp; // Sends the request and receives the response. - co_await conn->async_exec(req, resp, net::deferred); + co_await conn->async_exec(req, resp, asio::deferred); conn->cancel(); std::cout diff --git a/examples/cpp20_resolve_with_sentinel.cpp b/examples/cpp20_resolve_with_sentinel.cpp index 8401cd1e..cca77ded 100644 --- a/examples/cpp20_resolve_with_sentinel.cpp +++ b/examples/cpp20_resolve_with_sentinel.cpp @@ -12,8 +12,8 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) -namespace net = boost::asio; -using endpoints = net::ip::tcp::resolver::results_type; +namespace asio = boost::asio; +using endpoints = asio::ip::tcp::resolver::results_type; using boost::redis::request; using boost::redis::response; using boost::redis::ignore_t; @@ -22,18 +22,18 @@ using boost::redis::address; using boost::redis::connection; auto redir(boost::system::error_code& ec) - { return net::redirect_error(net::use_awaitable, ec); } + { return asio::redirect_error(asio::use_awaitable, ec); } // For more info see // - https://redis.io/docs/manual/sentinel. // - https://redis.io/docs/reference/sentinel-clients. -auto resolve_master_address(std::vector
const& addresses) -> net::awaitable
+auto resolve_master_address(std::vector
const& addresses) -> asio::awaitable
{ request req; req.push("SENTINEL", "get-master-addr-by-name", "mymaster"); req.push("QUIT"); - auto conn = std::make_shared(co_await net::this_coro::executor); + auto conn = std::make_shared(co_await asio::this_coro::executor); response>, ignore_t> resp; for (auto addr : addresses) { @@ -43,7 +43,7 @@ auto resolve_master_address(std::vector
const& addresses) -> net::await // TODO: async_run and async_exec should be lauched in // parallel here so we can wait for async_run completion // before eventually calling it again. - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + conn->async_run(cfg, {}, asio::consign(asio::detached, conn)); co_await conn->async_exec(req, resp, redir(ec)); conn->cancel(); conn->reset_stream(); @@ -54,7 +54,7 @@ auto resolve_master_address(std::vector
const& addresses) -> net::await co_return address{}; } -auto co_main(config cfg) -> net::awaitable +auto co_main(config cfg) -> asio::awaitable { // A list of sentinel addresses from which only one is responsive. // This simulates sentinels that are down. diff --git a/examples/cpp20_subscriber.cpp b/examples/cpp20_subscriber.cpp index 30dcb308..ac1cc884 100644 --- a/examples/cpp20_subscriber.cpp +++ b/examples/cpp20_subscriber.cpp @@ -18,7 +18,7 @@ #if defined(BOOST_ASIO_HAS_CO_AWAIT) -namespace net = boost::asio; +namespace asio = boost::asio; using namespace std::chrono_literals; using boost::redis::request; using boost::redis::generic_response; @@ -27,7 +27,7 @@ using boost::redis::config; using boost::redis::ignore; using boost::system::error_code; using boost::redis::connection; -using signal_set = net::deferred_t::as_default_on_t; +using signal_set = asio::deferred_t::as_default_on_t; /* This example will subscribe and read pushes indefinitely. * @@ -47,7 +47,7 @@ using signal_set = net::deferred_t::as_default_on_t; // Receives server pushes. auto -receiver(std::shared_ptr conn) -> net::awaitable +receiver(std::shared_ptr conn) -> asio::awaitable { request req; req.push("SUBSCRIBE", "channel"); @@ -59,11 +59,11 @@ receiver(std::shared_ptr conn) -> net::awaitable while (conn->will_reconnect()) { // Reconnect to channels. - co_await conn->async_exec(req, ignore, net::deferred); + co_await conn->async_exec(req, ignore, asio::deferred); // Loop reading Redis pushs messages. for (error_code ec;;) { - co_await conn->async_receive(net::redirect_error(net::use_awaitable, ec)); + co_await conn->async_receive(asio::redirect_error(asio::use_awaitable, ec)); if (ec) break; // Connection lost, break so we can reconnect to channels. std::cout @@ -76,12 +76,12 @@ receiver(std::shared_ptr conn) -> net::awaitable } } -auto co_main(config cfg) -> net::awaitable +auto co_main(config cfg) -> asio::awaitable { - auto ex = co_await net::this_coro::executor; + auto ex = co_await asio::this_coro::executor; auto conn = std::make_shared(ex); - net::co_spawn(ex, receiver(conn), net::detached); - conn->async_run(cfg, {}, net::consign(net::detached, conn)); + asio::co_spawn(ex, receiver(conn), asio::detached); + conn->async_run(cfg, {}, asio::consign(asio::detached, conn)); signal_set sig_set(ex, SIGINT, SIGTERM); co_await sig_set.async_wait(); diff --git a/examples/main.cpp b/examples/main.cpp index f2d79213..0bef3ccc 100644 --- a/examples/main.cpp +++ b/examples/main.cpp @@ -11,13 +11,13 @@ #include #include -namespace net = boost::asio; +namespace asio = boost::asio; using boost::redis::config; using boost::redis::logger; #if defined(BOOST_ASIO_HAS_CO_AWAIT) -extern net::awaitable co_main(config); +extern asio::awaitable co_main(config); auto main(int argc, char * argv[]) -> int { @@ -29,8 +29,8 @@ auto main(int argc, char * argv[]) -> int cfg.addr.port = argv[2]; } - net::io_context ioc; - net::co_spawn(ioc, co_main(cfg), [](std::exception_ptr p) { + asio::io_context ioc; + asio::co_spawn(ioc, co_main(cfg), [](std::exception_ptr p) { if (p) std::rethrow_exception(p); }); diff --git a/include/boost/redis/detail/connection_base.hpp b/include/boost/redis/detail/connection_base.hpp index 78ee229e..1c4ff578 100644 --- a/include/boost/redis/detail/connection_base.hpp +++ b/include/boost/redis/detail/connection_base.hpp @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -44,6 +43,66 @@ namespace boost::redis::detail { +template +std::string_view buffer_view(DynamicBuffer buf) noexcept +{ + char const* start = static_cast(buf.data(0, buf.size()).data()); + return std::string_view{start, std::size(buf)}; +} + +template +class append_some_op { +private: + AsyncReadStream& stream_; + DynamicBuffer buf_; + std::size_t size_ = 0; + std::size_t tmp_ = 0; + asio::coroutine coro_{}; + +public: + append_some_op(AsyncReadStream& stream, DynamicBuffer buf, std::size_t size) + : stream_ {stream} + , buf_ {std::move(buf)} + , size_{size} + { } + + template + void operator()( Self& self + , system::error_code ec = {} + , std::size_t n = 0) + { + BOOST_ASIO_CORO_REENTER (coro_) + { + tmp_ = buf_.size(); + buf_.grow(size_); + + BOOST_ASIO_CORO_YIELD + stream_.async_read_some(buf_.data(tmp_, size_), std::move(self)); + if (ec) { + self.complete(ec, 0); + return; + } + + buf_.shrink(buf_.size() - tmp_ - n); + self.complete({}, n); + } + } +}; + +template +auto +async_append_some( + AsyncReadStream& stream, + DynamicBuffer buffer, + std::size_t size, + CompletionToken&& token) +{ + return asio::async_compose + < CompletionToken + , void(system::error_code, std::size_t) + >(append_some_op {stream, buffer, size}, token, stream); +} + template struct exec_op { using req_info_type = typename Conn::req_info; @@ -128,9 +187,7 @@ struct run_op { { BOOST_ASIO_CORO_REENTER (coro) { - conn->write_buffer_.clear(); - conn->read_buffer_.clear(); - conn->parser_.reset(); + conn->reset(); BOOST_ASIO_CORO_YIELD asio::experimental::make_parallel_group( @@ -331,8 +388,6 @@ class connection_base { using clock_traits_type = asio::wait_traits; using timer_type = asio::basic_waitable_timer; - using receiver_adapter_type = std::function const&, system::error_code&)>; - using this_type = connection_base; /// Constructs from an executor. @@ -438,6 +493,7 @@ class connection_base { using receive_channel_type = asio::experimental::channel; using runner_type = runner; using adapter_type = std::function const&, system::error_code&)>; + using receiver_adapter_type = std::function const&, system::error_code&)>; auto use_ssl() const noexcept { return runner_.get_config().use_ssl;} @@ -726,7 +782,6 @@ class connection_base { void close() { if (stream_->next_layer().is_open()) { - // TODO: Communicate the error to the caller. system::error_code ec; stream_->next_layer().close(ec); } @@ -845,6 +900,14 @@ class connection_base { return on_finish_parsing(parse_result::resp); } + void reset() + { + write_buffer_.clear(); + read_buffer_.clear(); + parser_.reset(); + on_push_ = false; + } + asio::ssl::context ctx_; std::unique_ptr stream_; diff --git a/include/boost/redis/detail/read.hpp b/include/boost/redis/detail/read.hpp deleted file mode 100644 index 9a74a498..00000000 --- a/include/boost/redis/detail/read.hpp +++ /dev/null @@ -1,291 +0,0 @@ -/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com) - * - * Distributed under the Boost Software License, Version 1.0. (See - * accompanying file LICENSE.txt) - */ - -#ifndef BOOST_REDIS_READ_HPP -#define BOOST_REDIS_READ_HPP - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -namespace boost::redis::detail { - -template -std::string_view buffer_view(DynamicBuffer buf) noexcept -{ - char const* start = static_cast(buf.data(0, buf.size()).data()); - return std::string_view{start, std::size(buf)}; -} - -template -class append_some_op { -private: - AsyncReadStream& stream_; - DynamicBuffer buf_; - std::size_t size_ = 0; - std::size_t tmp_ = 0; - asio::coroutine coro_{}; - -public: - append_some_op(AsyncReadStream& stream, DynamicBuffer buf, std::size_t size) - : stream_ {stream} - , buf_ {std::move(buf)} - , size_{size} - { } - - template - void operator()( Self& self - , system::error_code ec = {} - , std::size_t n = 0) - { - BOOST_ASIO_CORO_REENTER (coro_) - { - tmp_ = buf_.size(); - buf_.grow(size_); - - BOOST_ASIO_CORO_YIELD - stream_.async_read_some(buf_.data(tmp_, size_), std::move(self)); - if (ec) { - self.complete(ec, 0); - return; - } - - buf_.shrink(buf_.size() - tmp_ - n); - self.complete({}, n); - } - } -}; - -template -auto -async_append_some( - AsyncReadStream& stream, - DynamicBuffer buffer, - std::size_t size, - CompletionToken&& token) -{ - return asio::async_compose - < CompletionToken - , void(system::error_code, std::size_t) - >(append_some_op {stream, buffer, size}, token, stream); -} - -template < - class AsyncReadStream, - class DynamicBuffer, - class ResponseAdapter> -class parse_op { -private: - AsyncReadStream& stream_; - DynamicBuffer buf_; - resp3::parser parser_; - ResponseAdapter adapter_; - bool needs_rescheduling_ = true; - system::error_code ec_; - asio::coroutine coro_{}; - - static std::size_t const growth = 1024; - -public: - parse_op(AsyncReadStream& stream, DynamicBuffer buf, ResponseAdapter adapter) - : stream_ {stream} - , buf_ {std::move(buf)} - , adapter_ {std::move(adapter)} - { } - - template - void operator()( Self& self - , system::error_code ec = {} - , std::size_t = 0) - { - BOOST_ASIO_CORO_REENTER (coro_) - { - while (!resp3::parse(parser_, buffer_view(buf_), adapter_, ec)) { - needs_rescheduling_ = false; - BOOST_ASIO_CORO_YIELD - async_append_some( - stream_, buf_, parser_.get_suggested_buffer_growth(growth), - std::move(self)); - if (ec) { - self.complete(ec, 0); - return; - } - } - - ec_ = ec; - if (needs_rescheduling_) { - BOOST_ASIO_CORO_YIELD - asio::post(std::move(self)); - } - - self.complete(ec_, parser_.get_consumed()); - } - } -}; - -/** \brief Reads a complete response to a command sychronously. - * - * This function reads a complete response to a command or a - * server push synchronously. For example - * - * @code - * int resp; - * std::string buffer; - * resp3::read(socket, dynamic_buffer(buffer), adapt(resp)); - * @endcode - * - * For a complete example see examples/intro_sync.cpp. This function - * is implemented in terms of one or more calls to @c - * asio::read_until and @c asio::read functions, and is known as a @a - * composed @a operation. Furthermore, the implementation may read - * additional bytes from the stream that lie past the end of the - * message being read. These additional bytes are stored in the - * dynamic buffer, which must be preserved for subsequent reads. - * - * \param stream The stream from which to read e.g. a tcp socket. - * \param buf Dynamic buffer (version 2). - * \param adapter The response adapter. - * \param ec If an error occurs, it will be assigned to this paramter. - * \returns The number of bytes that have been consumed from the dynamic buffer. - * - * \remark This function calls buf.consume() in each chunk of data - * after it has been passed to the adapter. Users must not consume - * the bytes after it returns. - */ -template < - class SyncReadStream, - class DynamicBuffer, - class ResponseAdapter - > -auto -read( - SyncReadStream& stream, - DynamicBuffer buf, - ResponseAdapter adapter, - system::error_code& ec) -> std::size_t -{ - static std::size_t const growth = 1024; - - resp3::parser parser; - while (!parser.done()) { - auto const res = parser.consume(detail::buffer_view(buf), ec); - if (ec) - return 0UL; - - if (!res.has_value()) { - auto const size_before = buf.size(); - buf.grow(parser.get_suggested_buffer_growth(growth)); - auto const n = - stream.read_some( - buf.data(size_before, parser.get_suggested_buffer_growth(growth)), - ec); - if (ec) - return 0UL; - - buf.shrink(buf.size() - size_before - n); - continue; - } - - adapter(res.value(), ec); - if (ec) - return 0UL; - } - - return parser.get_consumed(); -} - -/** \brief Reads a complete response to a command sychronously. - * - * Same as the error_code overload but throws on error. - */ -template< - class SyncReadStream, - class DynamicBuffer, - class ResponseAdapter = adapter::ignore> -auto -read( - SyncReadStream& stream, - DynamicBuffer buf, - ResponseAdapter adapter = ResponseAdapter{}) -{ - system::error_code ec; - auto const n = redis::detail::read(stream, buf, adapter, ec); - - if (ec) - BOOST_THROW_EXCEPTION(system::system_error{ec}); - - return n; -} - -/** \brief Reads a complete response to a Redis command asynchronously. - * - * This function reads a complete response to a command or a - * server push asynchronously. For example - * - * @code - * std::string buffer; - * std::set resp; - * co_await resp3::async_read(socket, dynamic_buffer(buffer), adapt(resp)); - * @endcode - * - * For a complete example see examples/transaction.cpp. This function - * is implemented in terms of one or more calls to @c - * asio::async_read_until and @c asio::async_read functions, and is - * known as a @a composed @a operation. Furthermore, the - * implementation may read additional bytes from the stream that lie - * past the end of the message being read. These additional bytes are - * stored in the dynamic buffer, which must be preserved for - * subsequent reads. - * - * \param stream The stream from which to read e.g. a tcp socket. - * \param buffer Dynamic buffer (version 2). - * \param adapter The response adapter. - * \param token The completion token. - * - * The completion handler will receive as a parameter the total - * number of bytes transferred from the stream and must have the - * following signature - * - * @code - * void(system::error_code, std::size_t); - * @endcode - * - * \remark This function calls buf.consume() in each chunk of data - * after it has been passed to the adapter. Users must not consume - * the bytes after it returns. - */ -template < - class AsyncReadStream, - class DynamicBuffer, - class ResponseAdapter = adapter::ignore, - class CompletionToken = asio::default_completion_token_t - > -auto async_read( - AsyncReadStream& stream, - DynamicBuffer buffer, - ResponseAdapter adapter = ResponseAdapter{}, - CompletionToken&& token = - asio::default_completion_token_t{}) -{ - return asio::async_compose - < CompletionToken - , void(system::error_code, std::size_t) - >(parse_op {stream, buffer, adapter}, - token, - stream); -} - -} // boost::redis::detail - -#endif // BOOST_REDIS_READ_HPP diff --git a/tests/test_low_level.cpp b/tests/test_low_level.cpp index 0fb5b2e3..44419386 100644 --- a/tests/test_low_level.cpp +++ b/tests/test_low_level.cpp @@ -7,16 +7,11 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include + #define BOOST_TEST_MODULE low level #include + #include #include #include @@ -30,16 +25,17 @@ auto operator==(boost::redis::ignore_t, boost::redis::ignore_t) noexcept {return auto operator!=(boost::redis::ignore_t, boost::redis::ignore_t) noexcept {return false;} } -namespace net = boost::asio; namespace redis = boost::redis; namespace resp3 = boost::redis::resp3; +using boost::system::error_code; using boost::redis::request; using boost::redis::response; using boost::redis::ignore; using boost::redis::ignore_t; using boost::redis::adapter::result; +using boost::redis::resp3::parser; +using boost::redis::resp3::parse; -using test_stream = boost::beast::test::stream; using boost::redis::adapter::adapt2; using node_type = result; using vec_node_type = result>; @@ -82,95 +78,51 @@ template struct expect { std::string in; Result expected; - boost::system::error_code ec{}; + error_code ec{}; resp3::type error_type = resp3::type::invalid; }; template -auto make_expected(std::string in, Result expected, boost::system::error_code ec = {}, resp3::type error_type = resp3::type::invalid) +auto make_expected(std::string in, Result expected, error_code ec = {}, resp3::type error_type = resp3::type::invalid) { return expect{in, expected, ec, error_type}; } template -void test_sync(net::any_io_executor ex, expect e) +void test_sync(expect e) { - std::string rbuffer; - test_stream ts {ex}; - ts.append(e.in); + parser p; Result result; - boost::system::error_code ec; - auto dbuf = net::dynamic_buffer(rbuffer); - auto const consumed = redis::detail::read(ts, dbuf, adapt2(result), ec); - if (e.ec) { + auto adapter = adapt2(result); + error_code ec; + auto const res = parse(p, e.in, adapter, ec); + + BOOST_TEST(res); // None of these tests need more data. + + if (ec) { BOOST_CHECK_EQUAL(ec, e.ec); return; } - dbuf.consume(consumed); - - BOOST_TEST(!ec); - BOOST_TEST(rbuffer.empty()); - if (result.has_value()) { - auto const res = result == e.expected; - BOOST_TEST(res); + BOOST_TEST(bool(result == e.expected)); + BOOST_CHECK_EQUAL(e.in.size(), p.get_consumed()); } else { - BOOST_TEST(result.has_error()); BOOST_CHECK_EQUAL(result.error().data_type, e.error_type); } } template -class async_test: public std::enable_shared_from_this> { -private: - std::string rbuffer_; - test_stream ts_; - expect data_; - Result result_; - -public: - async_test(net::any_io_executor ex, expect e) - : ts_{ex} - , data_{e} - { - ts_.append(e.in); - } - - void run() - { - auto self = this->shared_from_this(); - auto f = [self](auto ec, auto) - { - if (self->data_.ec) { - BOOST_CHECK_EQUAL(ec, self->data_.ec); - return; - } - - BOOST_TEST(!ec); - //BOOST_TEST(self->rbuffer_.empty()); - - if (self->result_.has_value()) { - auto const res = self->result_ == self->data_.expected; - BOOST_TEST(res); - } else { - BOOST_TEST(self->result_.has_error()); - BOOST_CHECK_EQUAL(self->result_.error().data_type, self->data_.error_type); - } - }; - - redis::detail::async_read( - ts_, - net::dynamic_buffer(rbuffer_), - adapt2(result_), - f); - } -}; - -template -void test_async(net::any_io_executor ex, expect e) +void test_sync2(expect e) { - std::make_shared>(ex, e)->run(); + parser p; + Result result; + auto adapter = adapt2(result); + error_code ec; + auto const res = parse(p, e.in, adapter, ec); + + BOOST_TEST(res); // None of these tests need more data. + BOOST_CHECK_EQUAL(ec, e.ec); } auto make_blob() @@ -397,173 +349,136 @@ vec_node_type const attr_e1b #define S18d "$0\r\n\r\n" #define NUMBER_TEST_CONDITIONS(test) \ - test(ex, make_expected(S01a, result>{}, boost::redis::error::unexpected_bool_value)); \ - test(ex, make_expected(S01b, result{{false}})); \ - test(ex, make_expected(S01b, node_type{{resp3::type::boolean, 1UL, 0UL, {"f"}}})); \ - test(ex, make_expected(S01c, result{{true}})); \ - test(ex, make_expected(S01c, node_type{{resp3::type::boolean, 1UL, 0UL, {"t"}}})); \ - test(ex, make_expected(S01c, op_bool_ok)); \ - test(ex, make_expected(S01c, result>{}, boost::redis::error::expects_resp3_map)); \ - test(ex, make_expected(S01c, result>{}, boost::redis::error::expects_resp3_set)); \ - test(ex, make_expected(S01c, result>{}, boost::redis::error::expects_resp3_map)); \ - test(ex, make_expected(S01c, result>{}, boost::redis::error::expects_resp3_set)); \ - test(ex, make_expected(S02a, streamed_string_e2)); \ - test(ex, make_expected(S03a, result{}, boost::redis::error::expects_resp3_simple_type));\ - test(ex, make_expected(S03a, result>{}, boost::redis::error::expects_resp3_simple_type));; \ - test(ex, make_expected(S02b, result{}, boost::redis::error::not_a_number)); \ - test(ex, make_expected(S02b, result{std::string{"Hello word"}})); \ - test(ex, make_expected(S02b, streamed_string_e1)); \ - test(ex, make_expected(S02c, result{}, boost::redis::error::not_a_number)); \ - test(ex, make_expected(S05a, node_type{{resp3::type::number, 1UL, 0UL, {"-3"}}})); \ - test(ex, make_expected(S05b, result{11})); \ - test(ex, make_expected(S05b, op_int_ok)); \ - test(ex, make_expected(S05b, result>{}, boost::redis::error::expects_resp3_aggregate)); \ - test(ex, make_expected(S05b, result>{}, boost::redis::error::expects_resp3_map)); \ - test(ex, make_expected(S05b, result>{}, boost::redis::error::expects_resp3_set)); \ - test(ex, make_expected(S05b, result>{}, boost::redis::error::expects_resp3_map)); \ - test(ex, make_expected(S05b, result>{}, boost::redis::error::expects_resp3_set)); \ - test(ex, make_expected(s05c, array_type2{}, boost::redis::error::expects_resp3_aggregate));\ - test(ex, make_expected(s05c, node_type{{resp3::type::number, 1UL, 0UL, {"3"}}}));\ - test(ex, make_expected(S06a, op_type_01{})); \ - test(ex, make_expected(S06a, op_type_02{}));\ - test(ex, make_expected(S06a, op_type_03{}));\ - test(ex, make_expected(S06a, op_type_04{}));\ - test(ex, make_expected(S06a, op_type_05{}));\ - test(ex, make_expected(S06a, op_type_06{}));\ - test(ex, make_expected(S06a, op_type_07{}));\ - test(ex, make_expected(S06a, op_type_08{}));\ - test(ex, make_expected(S06a, op_type_09{}));\ - test(ex, make_expected(S07a, push_e1a)); \ - test(ex, make_expected(S07b, push_e1b)); \ - test(ex, make_expected(S04b, map_type{}, boost::redis::error::expects_resp3_map));\ - test(ex, make_expected(S03b, map_e1f));\ - test(ex, make_expected(S03b, map_e1g));\ - test(ex, make_expected(S03b, map_e1k));\ - test(ex, make_expected(S03b, map_expected_1a));\ - test(ex, make_expected(S03b, map_expected_1b));\ - test(ex, make_expected(S03b, map_expected_1c));\ - test(ex, make_expected(S03b, map_expected_1d));\ - test(ex, make_expected(S03b, map_expected_1e));\ - test(ex, make_expected(S08a, attr_e1a)); \ - test(ex, make_expected(S08b, attr_e1b)); \ - test(ex, make_expected(S04e, array_e1a));\ - test(ex, make_expected(S04e, array_e1b));\ - test(ex, make_expected(S04e, array_e1c));\ - test(ex, make_expected(S04e, array_e1f));\ - test(ex, make_expected(S04e, array_e1g));\ - test(ex, make_expected(S04e, array_e1h));\ - test(ex, make_expected(S04e, array_type2{}, boost::redis::error::incompatible_size));\ - test(ex, make_expected(S04e, tuple_int_2{}, boost::redis::error::incompatible_size));\ - test(ex, make_expected(S04f, array_type2{}, boost::redis::error::nested_aggregate_not_supported));\ - test(ex, make_expected(S04g, vec_node_type{}, boost::redis::error::exceeeds_max_nested_depth));\ - test(ex, make_expected(S04h, array_e1d));\ - test(ex, make_expected(S04h, array_e1e));\ - test(ex, make_expected(S04i, set_type{}, boost::redis::error::expects_resp3_set)); \ - test(ex, make_expected(S09a, set_e1c)); \ - test(ex, make_expected(S09a, set_e1d)); \ - test(ex, make_expected(S09a, set_e1f)); \ - test(ex, make_expected(S09a, set_e1g)); \ - test(ex, make_expected(S09a, set_expected1a)); \ - test(ex, make_expected(S09a, set_expected_1e)); \ - test(ex, make_expected(S09a, set_type{{"apple", "one", "orange", "three", "two"}})); \ - test(ex, make_expected(S09b, vec_node_type{{{resp3::type::set, 0UL, 0UL, {}}}})); \ - test(ex, make_expected(S03c, map_type{}));\ - test(ex, make_expected(S11a, node_type{{resp3::type::doublean, 1UL, 0UL, {"1.23"}}}));\ - test(ex, make_expected(S11b, node_type{{resp3::type::doublean, 1UL, 0UL, {"inf"}}}));\ - test(ex, make_expected(S11c, node_type{{resp3::type::doublean, 1UL, 0UL, {"-inf"}}}));\ - test(ex, make_expected(S11d, result{{1.23}}));\ - test(ex, make_expected(S11e, result{{0}}, boost::redis::error::not_a_double));\ - test(ex, make_expected(S13a, node_type{{resp3::type::verbatim_string, 1UL, 0UL, {"txt:Some string"}}}));\ - test(ex, make_expected(S13b, node_type{{resp3::type::verbatim_string, 1UL, 0UL, {}}}));\ - test(ex, make_expected(S14a, node_type{{resp3::type::big_number, 1UL, 0UL, {"3492890328409238509324850943850943825024385"}}}));\ - test(ex, make_expected(S14b, result{}, boost::redis::error::empty_field));\ - test(ex, make_expected(S15a, result>{{"OK"}}));\ - test(ex, make_expected(S15a, result{{"OK"}}));\ - test(ex, make_expected(S15b, result>{""}));\ - test(ex, make_expected(S15b, result{{""}}));\ - test(ex, make_expected(S16a, result{}, boost::redis::error::invalid_data_type));\ - test(ex, make_expected(S05d, result{11}, boost::redis::error::not_a_number));\ - test(ex, make_expected(S03d, map_type{}, boost::redis::error::not_a_number));\ - test(ex, make_expected(S02d, result{}, boost::redis::error::not_a_number));\ - test(ex, make_expected(S17a, result{}, boost::redis::error::not_a_number));\ - test(ex, make_expected(S05e, result{}, boost::redis::error::empty_field));\ - test(ex, make_expected(S01d, result>{}, boost::redis::error::empty_field));\ - test(ex, make_expected(S11f, result{}, boost::redis::error::empty_field));\ - test(ex, make_expected(S17b, node_type{{resp3::type::blob_string, 1UL, 0UL, {"hh"}}}));\ - test(ex, make_expected(S18c, node_type{{resp3::type::blob_string, 1UL, 0UL, {"hhaa\aaaa\raaaaa\r\naaaaaaaaaa"}}}));\ - test(ex, make_expected(S18d, node_type{{resp3::type::blob_string, 1UL, 0UL, {}}}));\ - test(ex, make_expected(make_blob_string(blob), node_type{{resp3::type::blob_string, 1UL, 0UL, {blob}}}));\ - test(ex, make_expected(S04a, result>{{11}})); \ - test(ex, make_expected(S04d, result>>{response>{{set_e1c}}})); \ - test(ex, make_expected(S04c, result>>{response>{{map_expected_1b}}}));\ - test(ex, make_expected(S03b, map_e1l));\ - test(ex, make_expected(S06a, result{0}, {}, resp3::type::null)); \ - test(ex, make_expected(S06a, map_type{}, {}, resp3::type::null));\ - test(ex, make_expected(S06a, array_type{}, {}, resp3::type::null));\ - test(ex, make_expected(S06a, result>{}, {}, resp3::type::null));\ - test(ex, make_expected(S06a, result>{}, {}, resp3::type::null));\ - test(ex, make_expected(S10a, result{}, boost::redis::error::resp3_simple_error)); \ - test(ex, make_expected(S10a, node_type{{resp3::type::simple_error, 1UL, 0UL, {"Error"}}}, {}, resp3::type::simple_error)); \ - test(ex, make_expected(S10b, node_type{{resp3::type::simple_error, 1UL, 0UL, {""}}}, {}, resp3::type::simple_error)); \ - test(ex, make_expected(S12a, node_type{{resp3::type::blob_error, 1UL, 0UL, {"SYNTAX invalid syntax"}}}, {}, resp3::type::blob_error));\ - test(ex, make_expected(S12b, node_type{{resp3::type::blob_error, 1UL, 0UL, {}}}, {}, resp3::type::blob_error));\ - test(ex, make_expected(S12c, result{}, boost::redis::error::resp3_blob_error));\ - -BOOST_AUTO_TEST_CASE(parser) + test(make_expected(S01a, result>{}, boost::redis::error::unexpected_bool_value)); \ + test(make_expected(S01b, result{{false}})); \ + test(make_expected(S01b, node_type{{resp3::type::boolean, 1UL, 0UL, {"f"}}})); \ + test(make_expected(S01c, result{{true}})); \ + test(make_expected(S01c, node_type{{resp3::type::boolean, 1UL, 0UL, {"t"}}})); \ + test(make_expected(S01c, op_bool_ok)); \ + test(make_expected(S01c, result>{}, boost::redis::error::expects_resp3_map)); \ + test(make_expected(S01c, result>{}, boost::redis::error::expects_resp3_set)); \ + test(make_expected(S01c, result>{}, boost::redis::error::expects_resp3_map)); \ + test(make_expected(S01c, result>{}, boost::redis::error::expects_resp3_set)); \ + test(make_expected(S02a, streamed_string_e2)); \ + test(make_expected(S03a, result{}, boost::redis::error::expects_resp3_simple_type));\ + test(make_expected(S03a, result>{}, boost::redis::error::expects_resp3_simple_type));; \ + test(make_expected(S02b, result{}, boost::redis::error::not_a_number)); \ + test(make_expected(S02b, result{std::string{"Hello word"}})); \ + test(make_expected(S02b, streamed_string_e1)); \ + test(make_expected(S02c, result{}, boost::redis::error::not_a_number)); \ + test(make_expected(S05a, node_type{{resp3::type::number, 1UL, 0UL, {"-3"}}})); \ + test(make_expected(S05b, result{11})); \ + test(make_expected(S05b, op_int_ok)); \ + test(make_expected(S05b, result>{}, boost::redis::error::expects_resp3_aggregate)); \ + test(make_expected(S05b, result>{}, boost::redis::error::expects_resp3_map)); \ + test(make_expected(S05b, result>{}, boost::redis::error::expects_resp3_set)); \ + test(make_expected(S05b, result>{}, boost::redis::error::expects_resp3_map)); \ + test(make_expected(S05b, result>{}, boost::redis::error::expects_resp3_set)); \ + test(make_expected(s05c, array_type2{}, boost::redis::error::expects_resp3_aggregate));\ + test(make_expected(s05c, node_type{{resp3::type::number, 1UL, 0UL, {"3"}}}));\ + test(make_expected(S06a, op_type_01{})); \ + test(make_expected(S06a, op_type_02{}));\ + test(make_expected(S06a, op_type_03{}));\ + test(make_expected(S06a, op_type_04{}));\ + test(make_expected(S06a, op_type_05{}));\ + test(make_expected(S06a, op_type_06{}));\ + test(make_expected(S06a, op_type_07{}));\ + test(make_expected(S06a, op_type_08{}));\ + test(make_expected(S06a, op_type_09{}));\ + test(make_expected(S07a, push_e1a)); \ + test(make_expected(S07b, push_e1b)); \ + test(make_expected(S04b, map_type{}, boost::redis::error::expects_resp3_map));\ + test(make_expected(S03b, map_e1f));\ + test(make_expected(S03b, map_e1g));\ + test(make_expected(S03b, map_e1k));\ + test(make_expected(S03b, map_expected_1a));\ + test(make_expected(S03b, map_expected_1b));\ + test(make_expected(S03b, map_expected_1c));\ + test(make_expected(S03b, map_expected_1d));\ + test(make_expected(S03b, map_expected_1e));\ + test(make_expected(S08a, attr_e1a)); \ + test(make_expected(S08b, attr_e1b)); \ + test(make_expected(S04e, array_e1a));\ + test(make_expected(S04e, array_e1b));\ + test(make_expected(S04e, array_e1c));\ + test(make_expected(S04e, array_e1f));\ + test(make_expected(S04e, array_e1g));\ + test(make_expected(S04e, array_e1h));\ + test(make_expected(S04e, array_type2{}, boost::redis::error::incompatible_size));\ + test(make_expected(S04e, tuple_int_2{}, boost::redis::error::incompatible_size));\ + test(make_expected(S04f, array_type2{}, boost::redis::error::nested_aggregate_not_supported));\ + test(make_expected(S04g, vec_node_type{}, boost::redis::error::exceeeds_max_nested_depth));\ + test(make_expected(S04h, array_e1d));\ + test(make_expected(S04h, array_e1e));\ + test(make_expected(S04i, set_type{}, boost::redis::error::expects_resp3_set)); \ + test(make_expected(S09a, set_e1c)); \ + test(make_expected(S09a, set_e1d)); \ + test(make_expected(S09a, set_e1f)); \ + test(make_expected(S09a, set_e1g)); \ + test(make_expected(S09a, set_expected1a)); \ + test(make_expected(S09a, set_expected_1e)); \ + test(make_expected(S09a, set_type{{"apple", "one", "orange", "three", "two"}})); \ + test(make_expected(S09b, vec_node_type{{{resp3::type::set, 0UL, 0UL, {}}}})); \ + test(make_expected(S03c, map_type{}));\ + test(make_expected(S11a, node_type{{resp3::type::doublean, 1UL, 0UL, {"1.23"}}}));\ + test(make_expected(S11b, node_type{{resp3::type::doublean, 1UL, 0UL, {"inf"}}}));\ + test(make_expected(S11c, node_type{{resp3::type::doublean, 1UL, 0UL, {"-inf"}}}));\ + test(make_expected(S11d, result{{1.23}}));\ + test(make_expected(S11e, result{{0}}, boost::redis::error::not_a_double));\ + test(make_expected(S13a, node_type{{resp3::type::verbatim_string, 1UL, 0UL, {"txt:Some string"}}}));\ + test(make_expected(S13b, node_type{{resp3::type::verbatim_string, 1UL, 0UL, {}}}));\ + test(make_expected(S14a, node_type{{resp3::type::big_number, 1UL, 0UL, {"3492890328409238509324850943850943825024385"}}}));\ + test(make_expected(S14b, result{}, boost::redis::error::empty_field));\ + test(make_expected(S15a, result>{{"OK"}}));\ + test(make_expected(S15a, result{{"OK"}}));\ + test(make_expected(S15b, result>{""}));\ + test(make_expected(S15b, result{{""}}));\ + test(make_expected(S16a, result{}, boost::redis::error::invalid_data_type));\ + test(make_expected(S05d, result{11}, boost::redis::error::not_a_number));\ + test(make_expected(S03d, map_type{}, boost::redis::error::not_a_number));\ + test(make_expected(S02d, result{}, boost::redis::error::not_a_number));\ + test(make_expected(S17a, result{}, boost::redis::error::not_a_number));\ + test(make_expected(S05e, result{}, boost::redis::error::empty_field));\ + test(make_expected(S01d, result>{}, boost::redis::error::empty_field));\ + test(make_expected(S11f, result{}, boost::redis::error::empty_field));\ + test(make_expected(S17b, node_type{{resp3::type::blob_string, 1UL, 0UL, {"hh"}}}));\ + test(make_expected(S18c, node_type{{resp3::type::blob_string, 1UL, 0UL, {"hhaa\aaaa\raaaaa\r\naaaaaaaaaa"}}}));\ + test(make_expected(S18d, node_type{{resp3::type::blob_string, 1UL, 0UL, {}}}));\ + test(make_expected(make_blob_string(blob), node_type{{resp3::type::blob_string, 1UL, 0UL, {blob}}}));\ + test(make_expected(S04a, result>{{11}})); \ + test(make_expected(S04d, result>>{response>{{set_e1c}}})); \ + test(make_expected(S04c, result>>{response>{{map_expected_1b}}}));\ + test(make_expected(S03b, map_e1l));\ + test(make_expected(S06a, result{0}, {}, resp3::type::null)); \ + test(make_expected(S06a, map_type{}, {}, resp3::type::null));\ + test(make_expected(S06a, array_type{}, {}, resp3::type::null));\ + test(make_expected(S06a, result>{}, {}, resp3::type::null));\ + test(make_expected(S06a, result>{}, {}, resp3::type::null));\ + test(make_expected(S10a, result{}, boost::redis::error::resp3_simple_error)); \ + test(make_expected(S10a, node_type{{resp3::type::simple_error, 1UL, 0UL, {"Error"}}}, {}, resp3::type::simple_error)); \ + test(make_expected(S10b, node_type{{resp3::type::simple_error, 1UL, 0UL, {""}}}, {}, resp3::type::simple_error)); \ + test(make_expected(S12a, node_type{{resp3::type::blob_error, 1UL, 0UL, {"SYNTAX invalid syntax"}}}, {}, resp3::type::blob_error));\ + test(make_expected(S12b, node_type{{resp3::type::blob_error, 1UL, 0UL, {}}}, {}, resp3::type::blob_error));\ + test(make_expected(S12c, result{}, boost::redis::error::resp3_blob_error));\ + +BOOST_AUTO_TEST_CASE(sansio) { - net::io_context ioc; - - auto ex = ioc.get_executor(); - -#define TEST test_sync - NUMBER_TEST_CONDITIONS(TEST) -#undef TEST - -#define TEST test_async - NUMBER_TEST_CONDITIONS(TEST) -#undef TEST - - ioc.run(); + NUMBER_TEST_CONDITIONS(test_sync) } BOOST_AUTO_TEST_CASE(ignore_adapter_simple_error) { - net::io_context ioc; - std::string rbuffer; - - boost::system::error_code ec; - - test_stream ts {ioc}; - ts.append(S10a); - redis::detail::read(ts, net::dynamic_buffer(rbuffer), adapt2(ignore), ec); - BOOST_CHECK_EQUAL(ec, boost::redis::error::resp3_simple_error); - BOOST_TEST(!rbuffer.empty()); + test_sync2(make_expected(S10a, ignore, boost::redis::error::resp3_simple_error)); } BOOST_AUTO_TEST_CASE(ignore_adapter_blob_error) { - net::io_context ioc; - std::string rbuffer; - boost::system::error_code ec; - - test_stream ts {ioc}; - ts.append(S12a); - redis::detail::read(ts, net::dynamic_buffer(rbuffer), adapt2(ignore), ec); - BOOST_CHECK_EQUAL(ec, boost::redis::error::resp3_blob_error); - BOOST_TEST(!rbuffer.empty()); + test_sync2(make_expected(S12a, ignore, boost::redis::error::resp3_blob_error)); } BOOST_AUTO_TEST_CASE(ignore_adapter_no_error) { - net::io_context ioc; - std::string rbuffer; - boost::system::error_code ec; - - test_stream ts {ioc}; - ts.append(S05b); - auto const consumed = redis::detail::read(ts, net::dynamic_buffer(rbuffer), adapt2(ignore), ec); - BOOST_TEST(!ec); - BOOST_CHECK_EQUAL(rbuffer.size(), consumed); + test_sync2(make_expected(S05b, ignore)); } //----------------------------------------------------------------------------------- @@ -660,7 +575,7 @@ BOOST_AUTO_TEST_CASE(adapter) using boost::redis::adapter::boost_redis_adapt; using resp3::type; - boost::system::error_code ec; + error_code ec; response resp; @@ -674,4 +589,3 @@ BOOST_AUTO_TEST_CASE(adapter) BOOST_CHECK_EQUAL(std::get<1>(resp).value(), 42); BOOST_TEST(!ec); } - diff --git a/tests/test_low_level_async.cpp b/tests/test_low_level_async.cpp deleted file mode 100644 index b8b47d14..00000000 --- a/tests/test_low_level_async.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com) - * - * Distributed under the Boost Software License, Version 1.0. (See - * accompanying file LICENSE.txt) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#define BOOST_TEST_MODULE conn-tls -#include -#if defined(BOOST_ASIO_HAS_CO_AWAIT) - -namespace net = boost::asio; -namespace redis = boost::redis; -using resolver = net::use_awaitable_t<>::as_default_on_t; -using tcp_socket = net::use_awaitable_t<>::as_default_on_t; -using boost::redis::adapter::adapt2; -using net::ip::tcp; -using boost::redis::request; -using boost::redis::adapter::result; -using redis::config; - -auto co_main(config cfg) -> net::awaitable -{ - auto ex = co_await net::this_coro::executor; - - resolver resv{ex}; - auto const addrs = co_await resv.async_resolve(cfg.addr.host, cfg.addr.port); - tcp_socket socket{ex}; - co_await net::async_connect(socket, addrs); - - // Creates the request and writes to the socket. - request req; - req.push("HELLO", 3); - req.push("PING", "Hello world"); - req.push("QUIT"); - co_await redis::detail::async_write(socket, req); - - // Responses - std::string buffer; - result resp; - - std::size_t consumed = 0; - // Reads the responses to all commands in the request. - auto dbuf = net::dynamic_buffer(buffer); - consumed = co_await redis::detail::async_read(socket, dbuf); - dbuf.consume(consumed); - consumed = co_await redis::detail::async_read(socket, dbuf, adapt2(resp)); - dbuf.consume(consumed); - consumed = co_await redis::detail::async_read(socket, dbuf); - dbuf.consume(consumed); - - std::cout << "Ping: " << resp.value() << std::endl; -} - -BOOST_AUTO_TEST_CASE(low_level_async) -{ - net::io_context ioc; - net::co_spawn(ioc, co_main({}), net::detached); - ioc.run(); -} - -#else // defined(BOOST_ASIO_HAS_CO_AWAIT) - -BOOST_AUTO_TEST_CASE(low_level_async) -{ -} - -#endif // defined(BOOST_ASIO_HAS_CO_AWAIT) diff --git a/tests/test_low_level_sync.cpp b/tests/test_low_level_sync.cpp deleted file mode 100644 index 2349fbee..00000000 --- a/tests/test_low_level_sync.cpp +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com) - * - * Distributed under the Boost Software License, Version 1.0. (See - * accompanying file LICENSE.txt) - */ - -#include -#include -#include -#include -#include -#define BOOST_TEST_MODULE conn-quit -#include -#include -#include - -namespace net = boost::asio; -namespace redis = boost::redis; -using boost::redis::adapter::adapt2; -using boost::redis::request; -using boost::redis::adapter::result; - -BOOST_AUTO_TEST_CASE(low_level_sync) -{ - try { - std::string const host = "127.0.0.1"; - std::string const port = "6379"; - - net::io_context ioc; - net::ip::tcp::resolver resv{ioc}; - auto const res = resv.resolve(host, port); - net::ip::tcp::socket socket{ioc}; - net::connect(socket, res); - - // Creates the request and writes to the socket. - request req; - req.push("HELLO", 3); - req.push("PING", "Hello world"); - req.push("QUIT"); - redis::detail::write(socket, req); - - std::string buffer; - result resp; - - std::size_t consumed = 0; - // Reads the responses to all commands in the request. - auto dbuf = net::dynamic_buffer(buffer); - consumed = redis::detail::read(socket, dbuf); - dbuf.consume(consumed); - consumed = redis::detail::read(socket, dbuf, adapt2(resp)); - dbuf.consume(consumed); - consumed = redis::detail::read(socket, dbuf); - dbuf.consume(consumed); - - std::cout << "Ping: " << resp.value() << std::endl; - - } catch (std::exception const& e) { - std::cerr << e.what() << std::endl; - exit(EXIT_FAILURE); - } -} From 4547e1ac0792f22be45cd4a2a89b6ebf2f15b57b Mon Sep 17 00:00:00 2001 From: Marcelo Zimbres Date: Mon, 4 Sep 2023 14:00:12 +0200 Subject: [PATCH 06/51] First steps with using adapters to process a generic_response. --- .../boost/redis/adapter/detail/adapters.hpp | 36 ++++++++++++------- .../redis/adapter/detail/response_traits.hpp | 12 ++++--- .../redis/adapter/detail/result_traits.hpp | 6 ++-- tests/test_low_level.cpp | 13 +++++++ 4 files changed, 49 insertions(+), 18 deletions(-) diff --git a/include/boost/redis/adapter/detail/adapters.hpp b/include/boost/redis/adapter/detail/adapters.hpp index 43bf6866..82875ec4 100644 --- a/include/boost/redis/adapter/detail/adapters.hpp +++ b/include/boost/redis/adapter/detail/adapters.hpp @@ -92,7 +92,8 @@ class general_aggregate { public: explicit general_aggregate(Result* c = nullptr): result_(c) {} - void operator()(resp3::basic_node const& nd, system::error_code&) + template + void operator()(resp3::basic_node const& nd, system::error_code&) { BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer"); switch (nd.data_type) { @@ -114,7 +115,8 @@ class general_simple { public: explicit general_simple(Node* t = nullptr) : result_(t) {} - void operator()(resp3::basic_node const& nd, system::error_code&) + template + void operator()(resp3::basic_node const& nd, system::error_code&) { BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer"); switch (nd.data_type) { @@ -136,10 +138,11 @@ class simple_impl { public: void on_value_available(Result&) {} + template void operator()( Result& result, - resp3::basic_node const& n, + resp3::basic_node const& n, system::error_code& ec) { if (is_aggregate(n.data_type)) { @@ -160,10 +163,11 @@ class set_impl { void on_value_available(Result& result) { hint_ = std::end(result); } + template void operator()( Result& result, - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { @@ -195,10 +199,11 @@ class map_impl { void on_value_available(Result& result) { current_ = std::end(result); } + template void operator()( Result& result, - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { @@ -233,10 +238,11 @@ class vector_impl { public: void on_value_available(Result& ) { } + template void operator()( Result& result, - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { @@ -257,10 +263,11 @@ class array_impl { public: void on_value_available(Result& ) { } + template void operator()( Result& result, - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { @@ -292,10 +299,11 @@ struct list_impl { void on_value_available(Result& ) { } + template void operator()( Result& result, - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { if (!is_aggregate(nd.data_type)) { @@ -365,7 +373,8 @@ class wrapper> { response_type* result_; typename impl_map::type impl_; - bool set_if_resp3_error(resp3::basic_node const& nd) noexcept + template + bool set_if_resp3_error(resp3::basic_node const& nd) noexcept { switch (nd.data_type) { case resp3::type::null: @@ -387,9 +396,10 @@ class wrapper> { } } + template void operator()( - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer"); @@ -414,7 +424,8 @@ class wrapper>> { response_type* result_; typename impl_map::type impl_{}; - bool set_if_resp3_error(resp3::basic_node const& nd) noexcept + template + bool set_if_resp3_error(resp3::basic_node const& nd) noexcept { switch (nd.data_type) { case resp3::type::blob_error: @@ -429,9 +440,10 @@ class wrapper>> { public: explicit wrapper(response_type* o = nullptr) : result_(o) {} + template void operator()( - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer"); diff --git a/include/boost/redis/adapter/detail/response_traits.hpp b/include/boost/redis/adapter/detail/response_traits.hpp index 919ed255..78bd1e28 100644 --- a/include/boost/redis/adapter/detail/response_traits.hpp +++ b/include/boost/redis/adapter/detail/response_traits.hpp @@ -23,8 +23,9 @@ namespace boost::redis::adapter::detail class ignore_adapter { public: + template void - operator()(std::size_t, resp3::basic_node const& nd, system::error_code& ec) + operator()(std::size_t, resp3::basic_node const& nd, system::error_code& ec) { switch (nd.data_type) { case resp3::type::simple_error: ec = redis::error::resp3_simple_error; break; @@ -59,10 +60,11 @@ class static_adapter { auto get_supported_response_size() const noexcept { return size;} + template void operator()( std::size_t i, - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { using std::visit; @@ -88,10 +90,11 @@ class vector_adapter { get_supported_response_size() const noexcept { return static_cast(-1);} + template void operator()( std::size_t, - resp3::basic_node const& nd, + resp3::basic_node const& nd, system::error_code& ec) { adapter_(nd, ec); @@ -142,7 +145,8 @@ class wrapper { public: explicit wrapper(Adapter adapter) : adapter_{adapter} {} - void operator()(resp3::basic_node const& nd, system::error_code& ec) + template + void operator()(resp3::basic_node const& nd, system::error_code& ec) { return adapter_(0, nd, ec); } [[nodiscard]] diff --git a/include/boost/redis/adapter/detail/result_traits.hpp b/include/boost/redis/adapter/detail/result_traits.hpp index 09c3b520..22cdebd3 100644 --- a/include/boost/redis/adapter/detail/result_traits.hpp +++ b/include/boost/redis/adapter/detail/result_traits.hpp @@ -116,7 +116,8 @@ class static_aggregate_adapter> { } } - void count(resp3::basic_node const& nd) + template + void count(resp3::basic_node const& nd) { if (nd.depth == 1) { if (is_aggregate(nd.data_type)) @@ -131,7 +132,8 @@ class static_aggregate_adapter> { ++i_; } - void operator()(resp3::basic_node const& nd, system::error_code& ec) + template + void operator()(resp3::basic_node const& nd, system::error_code& ec) { using std::visit; diff --git a/tests/test_low_level.cpp b/tests/test_low_level.cpp index 44419386..6bfe8d0f 100644 --- a/tests/test_low_level.cpp +++ b/tests/test_low_level.cpp @@ -589,3 +589,16 @@ BOOST_AUTO_TEST_CASE(adapter) BOOST_CHECK_EQUAL(std::get<1>(resp).value(), 42); BOOST_TEST(!ec); } + +// TODO: This was an experiment, I will resume implementing this +// later. +BOOST_AUTO_TEST_CASE(adapter_as) +{ + result> set; + auto adapter = adapt2(set); + + for (auto const& e: set_expected1a.value()) { + error_code ec; + adapter(e, ec); + } +} From 2a4936a9e1520c8ef037ecdebc856457788e051c Mon Sep 17 00:00:00 2001 From: Marcelo Zimbres Date: Thu, 7 Sep 2023 00:08:26 +0200 Subject: [PATCH 07/51] Implements batch reads for server pushes. --- README.md | 26 ++++-- examples/cpp20_subscriber.cpp | 16 +++- .../boost/redis/adapter/detail/adapters.hpp | 41 ++------- .../redis/adapter/detail/response_traits.hpp | 15 +-- include/boost/redis/connection.hpp | 23 +++++ .../boost/redis/detail/connection_base.hpp | 28 +++++- include/boost/redis/error.hpp | 6 ++ include/boost/redis/impl/error.ipp | 3 + include/boost/redis/impl/response.ipp | 48 ++++++++++ include/boost/redis/response.hpp | 47 +++++++++- include/boost/redis/src.hpp | 1 + tests/common.hpp | 2 +- tests/test_conn_check_health.cpp | 4 +- tests/test_conn_echo_stress.cpp | 48 ++++------ tests/test_conn_push.cpp | 42 ++++++--- tests/test_low_level.cpp | 91 ++++++++++++++++--- 16 files changed, 323 insertions(+), 118 deletions(-) create mode 100644 include/boost/redis/impl/response.ipp diff --git a/README.md b/README.md index e6b70566..6a69e0db 100644 --- a/README.md +++ b/README.md @@ -674,22 +674,34 @@ https://lists.boost.org/Archives/boost/2023/01/253944.php. ## Changelog -### develop (incorporates changes to conform the boost review and more) +### develop * Deprecates the `async_receive` overload that takes a response. Users - should now first call `set_receive_response` to avoid contantly seting - the same response. + should now first call `set_receive_response` to avoid constantly and + unnecessarily setting the same response. * Uses `std::function` to type erase the response adapter. This change should not influence users in any way but allowed important - simplification in the connections internals. This resulted in big - performance improvement where one of my benchmark programs passed - from 190k/s to 473k/s. + simplification in the connections internals. This resulted in + massive performance improvement. * The connection has a new member `get_usage()` that returns the - connection usage information, such as number of bytes writen, + connection usage information, such as number of bytes written, received etc. +* There are massive performance improvements in the consuming of + server pushes which are now communicated with an `asio::channel` and + therefore can be buffered which avoids blocking the socket read-loop. + Batch reads are also supported by means of `channel.try_send` and + buffered messages can be consumed synchronously with + `connection::receive`. The function `boost::redis::cancel_one` has + been added to simplify processing multiple server pushes contained + in the same `generic_response`. *IMPORTANT*: These changes may + result in more than one push in the response when + `connection::async_receive` resumes. The user must therefore be + careful when calling `resp.clear()`: either ensure that all message + have been processed or just use `consume_one`. + ### v1.4.2 (incorporates changes to conform the boost review and more) * Adds `boost::redis::config::database_index` to make it possible to diff --git a/examples/cpp20_subscriber.cpp b/examples/cpp20_subscriber.cpp index ac1cc884..c112d75c 100644 --- a/examples/cpp20_subscriber.cpp +++ b/examples/cpp20_subscriber.cpp @@ -22,9 +22,11 @@ namespace asio = boost::asio; using namespace std::chrono_literals; using boost::redis::request; using boost::redis::generic_response; +using boost::redis::consume_one; using boost::redis::logger; using boost::redis::config; using boost::redis::ignore; +using boost::redis::error; using boost::system::error_code; using boost::redis::connection; using signal_set = asio::deferred_t::as_default_on_t; @@ -58,20 +60,28 @@ receiver(std::shared_ptr conn) -> asio::awaitable // Loop while reconnection is enabled while (conn->will_reconnect()) { - // Reconnect to channels. + // Reconnect to the channels. co_await conn->async_exec(req, ignore, asio::deferred); // Loop reading Redis pushs messages. for (error_code ec;;) { - co_await conn->async_receive(asio::redirect_error(asio::use_awaitable, ec)); + // First tries to read any buffered pushes. + conn->receive(ec); + if (ec == error::sync_receive_push_failed) { + ec = {}; + co_await conn->async_receive(asio::redirect_error(asio::use_awaitable, ec)); + } + if (ec) break; // Connection lost, break so we can reconnect to channels. + std::cout << resp.value().at(1).value << " " << resp.value().at(2).value << " " << resp.value().at(3).value << std::endl; - resp.value().clear(); + + consume_one(resp); } } } diff --git a/include/boost/redis/adapter/detail/adapters.hpp b/include/boost/redis/adapter/detail/adapters.hpp index 82875ec4..a1f91076 100644 --- a/include/boost/redis/adapter/detail/adapters.hpp +++ b/include/boost/redis/adapter/detail/adapters.hpp @@ -139,11 +139,7 @@ class simple_impl { void on_value_available(Result&) {} template - void - operator()( - Result& result, - resp3::basic_node const& n, - system::error_code& ec) + void operator()(Result& result, resp3::basic_node const& n, system::error_code& ec) { if (is_aggregate(n.data_type)) { ec = redis::error::expects_resp3_simple_type; @@ -164,11 +160,7 @@ class set_impl { { hint_ = std::end(result); } template - void - operator()( - Result& result, - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(Result& result, resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { if (nd.data_type != resp3::type::set) @@ -200,11 +192,7 @@ class map_impl { { current_ = std::end(result); } template - void - operator()( - Result& result, - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(Result& result, resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { if (element_multiplicity(nd.data_type) != 2) @@ -239,11 +227,7 @@ class vector_impl { void on_value_available(Result& ) { } template - void - operator()( - Result& result, - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(Result& result, resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { auto const m = element_multiplicity(nd.data_type); @@ -264,11 +248,7 @@ class array_impl { void on_value_available(Result& ) { } template - void - operator()( - Result& result, - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(Result& result, resp3::basic_node const& nd, system::error_code& ec) { if (is_aggregate(nd.data_type)) { if (i_ != -1) { @@ -300,11 +280,7 @@ struct list_impl { void on_value_available(Result& ) { } template - void - operator()( - Result& result, - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(Result& result, resp3::basic_node const& nd, system::error_code& ec) { if (!is_aggregate(nd.data_type)) { BOOST_ASSERT(nd.aggregate_size == 1); @@ -397,10 +373,7 @@ class wrapper> { } template - void - operator()( - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(resp3::basic_node const& nd, system::error_code& ec) { BOOST_ASSERT_MSG(!!result_, "Unexpected null pointer"); diff --git a/include/boost/redis/adapter/detail/response_traits.hpp b/include/boost/redis/adapter/detail/response_traits.hpp index 78bd1e28..3ba5bfec 100644 --- a/include/boost/redis/adapter/detail/response_traits.hpp +++ b/include/boost/redis/adapter/detail/response_traits.hpp @@ -24,8 +24,7 @@ namespace boost::redis::adapter::detail class ignore_adapter { public: template - void - operator()(std::size_t, resp3::basic_node const& nd, system::error_code& ec) + void operator()(std::size_t, resp3::basic_node const& nd, system::error_code& ec) { switch (nd.data_type) { case resp3::type::simple_error: ec = redis::error::resp3_simple_error; break; @@ -61,11 +60,7 @@ class static_adapter { { return size;} template - void - operator()( - std::size_t i, - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(std::size_t i, resp3::basic_node const& nd, system::error_code& ec) { using std::visit; // I am usure whether this should be an error or an assertion. @@ -91,11 +86,7 @@ class vector_adapter { { return static_cast(-1);} template - void - operator()( - std::size_t, - resp3::basic_node const& nd, - system::error_code& ec) + void operator()(std::size_t, resp3::basic_node const& nd, system::error_code& ec) { adapter_(nd, ec); } diff --git a/include/boost/redis/connection.hpp b/include/boost/redis/connection.hpp index c1cb7dea..0b0c6267 100644 --- a/include/boost/redis/connection.hpp +++ b/include/boost/redis/connection.hpp @@ -188,6 +188,23 @@ class basic_connection { auto async_receive(CompletionToken token = CompletionToken{}) { return impl_.async_receive(std::move(token)); } + + /** @brief Receives server pushes synchronously without blocking. + * + * Receives a server push synchronously by calling `try_receive` on + * the underlying channel. If the operation fails because + * `try_receive` returns `false`, `ec` will be set to + * `boost::redis::error::sync_receive_push_failed`. + * + * @param ec Contains the error if any occurred. + * + * @returns The number of bytes read from the socket. + */ + std::size_t receive(system::error_code& ec) + { + return impl_.receive(ec); + } + template < class Response = ignore_t, class CompletionToken = asio::default_completion_token_t @@ -367,6 +384,12 @@ class connection { auto async_receive(CompletionToken token) { return impl_.async_receive(std::move(token)); } + /// Calls `boost::redis::basic_connection::receive`. + std::size_t receive(system::error_code& ec) + { + return impl_.receive(ec); + } + /// Calls `boost::redis::basic_connection::async_exec`. template auto async_exec(request const& req, Response& resp, CompletionToken token) diff --git a/include/boost/redis/detail/connection_base.hpp b/include/boost/redis/detail/connection_base.hpp index 1c4ff578..6afea4cc 100644 --- a/include/boost/redis/detail/connection_base.hpp +++ b/include/boost/redis/detail/connection_base.hpp @@ -348,8 +348,10 @@ struct reader_op { } if (res_.first == parse_result::push) { - BOOST_ASIO_CORO_YIELD - conn_->receive_channel_.async_send(ec, res_.second, std::move(self)); + if (!conn_->receive_channel_.try_send(ec, res_.second)) { + BOOST_ASIO_CORO_YIELD + conn_->receive_channel_.async_send(ec, res_.second, std::move(self)); + } if (ec) { logger_.trace("reader-op: error. Exiting ..."); @@ -398,7 +400,7 @@ class connection_base { : ctx_{method} , stream_{std::make_unique(ex, ctx_)} , writer_timer_{ex} - , receive_channel_{ex} + , receive_channel_{ex, 256} , runner_{ex, {}} , dbuf_{read_buffer_, max_read_size} { @@ -470,6 +472,26 @@ class connection_base { auto async_receive(CompletionToken token) { return receive_channel_.async_receive(std::move(token)); } + std::size_t receive(system::error_code& ec) + { + std::size_t size = 0; + + auto f = [&](system::error_code const& ec2, std::size_t n) + { + ec = ec2; + size = n; + }; + + auto const res = receive_channel_.try_receive(f); + if (ec) + return 0; + + if (!res) + ec = error::sync_receive_push_failed; + + return size; + } + template auto async_run(config const& cfg, Logger l, CompletionToken token) { diff --git a/include/boost/redis/error.hpp b/include/boost/redis/error.hpp index 7424aea7..85b152d2 100644 --- a/include/boost/redis/error.hpp +++ b/include/boost/redis/error.hpp @@ -75,6 +75,12 @@ enum class error /// SSL handshake timeout ssl_handshake_timeout, + + /// Can't receive push synchronously without blocking + sync_receive_push_failed, + + /// Incompatible node depth. + incompatible_node_depth, }; /** \internal diff --git a/include/boost/redis/impl/error.ipp b/include/boost/redis/impl/error.ipp index 9f5c06eb..6a5c8cb8 100644 --- a/include/boost/redis/impl/error.ipp +++ b/include/boost/redis/impl/error.ipp @@ -41,6 +41,9 @@ struct error_category_impl : system::error_category { case error::resolve_timeout: return "Resolve timeout."; case error::connect_timeout: return "Connect timeout."; case error::pong_timeout: return "Pong timeout."; + case error::ssl_handshake_timeout: return "SSL handshake timeout."; + case error::sync_receive_push_failed: return "Can't receive server push synchronously without blocking."; + case error::incompatible_node_depth: return "Incompatible node depth."; default: BOOST_ASSERT(false); return "Boost.Redis error."; } } diff --git a/include/boost/redis/impl/response.ipp b/include/boost/redis/impl/response.ipp new file mode 100644 index 00000000..c2306e1e --- /dev/null +++ b/include/boost/redis/impl/response.ipp @@ -0,0 +1,48 @@ +/* Copyright (c) 2018-2022 Marcelo Zimbres Silva (mzimbres@gmail.com) + * + * Distributed under the Boost Software License, Version 1.0. (See + * accompanying file LICENSE.txt) + */ + +#include +#include +#include + +namespace boost::redis +{ + +void consume_one(generic_response& r, system::error_code& ec) +{ + if (r.has_error()) + return; // Nothing to consume. + + if (std::empty(r.value())) + return; // Nothing to consume. + + auto const depth = r.value().front().depth; + + // To simplify we will refuse to consume any data-type that is not + // a root node. I think there is no use for that and it is complex + // since it requires updating parent nodes. + if (depth != 0) { + ec = error::incompatible_node_depth; + return; + } + + auto f = [depth](auto const& e) + { return e.depth == depth; }; + + auto match = std::find_if(std::next(std::cbegin(r.value())), std::cend(r.value()), f); + + r.value().erase(std::cbegin(r.value()), match); +} + +void consume_one(generic_response& r) +{ + system::error_code ec; + consume_one(r, ec); + if (ec) + throw system::system_error(ec); +} + +} // boost::redis::resp3 diff --git a/include/boost/redis/response.hpp b/include/boost/redis/response.hpp index 5f6c5c37..b3f76ad5 100644 --- a/include/boost/redis/response.hpp +++ b/include/boost/redis/response.hpp @@ -9,12 +9,14 @@ #include #include +#include #include #include #include -namespace boost::redis { +namespace boost::redis +{ /** @brief Response with compile-time size. * @ingroup high-level-api @@ -32,6 +34,47 @@ using response = std::tuple...>; */ using generic_response = adapter::result>; -} // boost::redis::resp3 +/** @brief Consume on response from a generic response + * + * This function rotates the elements so that the start of the next + * response becomes the new front element. For example the output of + * the following code + * + * @code + * request req; + * req.push("PING", "one"); + * req.push("PING", "two"); + * req.push("PING", "three"); + * + * generic_response resp; + * co_await conn->async_exec(req, resp, asio::deferred); + * + * std::cout << "PING: " << resp.value().front().value << std::endl; + * consume_one(resp); + * std::cout << "PING: " << resp.value().front().value << std::endl; + * consume_one(resp); + * std::cout << "PING: " << resp.value().front().value << std::endl; + * @code + * + * is + * + * @code + * PING: one + * PING: two + * PING: three + * @code + * + * Given that this function rotates elements, it won't be very + * efficient for responses with a large number of elements. It was + * introduced mainly to deal with buffers server pushes as shown in + * the cpp20_subscriber.cpp example. In the future queue-like + * responses might be introduced to consume in O(1) operations. + */ +void consume_one(generic_response& r, system::error_code& ec); + +/// Throwing overload of `consume_one`. +void consume_one(generic_response& r); + +} // boost::redis #endif // BOOST_REDIS_RESPONSE_HPP diff --git a/include/boost/redis/src.hpp b/include/boost/redis/src.hpp index 3a06c3e0..7075bf13 100644 --- a/include/boost/redis/src.hpp +++ b/include/boost/redis/src.hpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/tests/common.hpp b/tests/common.hpp index b5bc7dab..bbe3e3f2 100644 --- a/tests/common.hpp +++ b/tests/common.hpp @@ -21,5 +21,5 @@ run( boost::redis::config cfg = {}, boost::system::error_code ec = boost::asio::error::operation_aborted, boost::redis::operation op = boost::redis::operation::receive, - boost::redis::logger::level l = boost::redis::logger::level::info); + boost::redis::logger::level l = boost::redis::logger::level::disabled); diff --git a/tests/test_conn_check_health.cpp b/tests/test_conn_check_health.cpp index 91ece76a..3038ae0d 100644 --- a/tests/test_conn_check_health.cpp +++ b/tests/test_conn_check_health.cpp @@ -5,6 +5,7 @@ */ #include +#include #include #define BOOST_TEST_MODULE check-health #include @@ -21,6 +22,7 @@ using boost::redis::ignore; using boost::redis::operation; using boost::redis::generic_response; using boost::redis::logger; +using boost::redis::consume_one; using redis::config; // TODO: Test cancel(health_check) @@ -39,7 +41,6 @@ struct push_callback { { BOOST_ASIO_CORO_REENTER (coro) for (;;) { - resp2->value().clear(); BOOST_ASIO_CORO_YIELD conn2->async_receive(*this); if (ec) { @@ -50,6 +51,7 @@ struct push_callback { BOOST_TEST(resp2->has_value()); BOOST_TEST(!resp2->value().empty()); std::clog << "Event> " << resp2->value().front().value << std::endl; + consume_one(*resp2); ++i; diff --git a/tests/test_conn_echo_stress.cpp b/tests/test_conn_echo_stress.cpp index 0cbf3a4c..95115584 100644 --- a/tests/test_conn_echo_stress.cpp +++ b/tests/test_conn_echo_stress.cpp @@ -27,6 +27,7 @@ using boost::redis::logger; using boost::redis::config; using boost::redis::connection; using boost::redis::usage; +using boost::redis::error; std::ostream& operator<<(std::ostream& os, usage const& u) { @@ -45,8 +46,16 @@ auto push_consumer(std::shared_ptr conn, int expected) -> net::await { int c = 0; for (error_code ec;;) { - co_await conn->async_receive(redirect_error(net::use_awaitable, ec)); + conn->receive(ec); + if (ec == error::sync_receive_push_failed) { + ec = {}; + co_await conn->async_receive(redirect_error(net::use_awaitable, ec)); + } else if (!ec) { + //std::cout << "Skipping suspension." << std::endl; + } + if (ec) { + BOOST_TEST(false); std::cout << "push_consumer error: " << ec.message() << std::endl; co_return; } @@ -61,30 +70,10 @@ auto echo_session( std::shared_ptr conn, std::shared_ptr pubs, - std::string id, int n) -> net::awaitable { - auto ex = co_await net::this_coro::executor; - - request req; - response resp; - - for (auto i = 0; i < n; ++i) { - auto const msg = id + "/" + std::to_string(i); - //std::cout << msg << std::endl; - req.push("HELLO", 3); // Just to mess around. - req.push("PING", msg); - req.push("PING", "lsls"); // TODO: Change to HELLO after fixing issue 105. - boost::system::error_code ec; - co_await conn->async_exec(req, resp, redir(ec)); - - BOOST_TEST(!ec); - BOOST_REQUIRE_EQUAL(msg, std::get<1>(resp).value()); - req.clear(); - std::get<1>(resp).value().clear(); - + for (auto i = 0; i < n; ++i) co_await conn->async_exec(*pubs, ignore, net::deferred); - } } auto async_echo_stress(std::shared_ptr conn) -> net::awaitable @@ -103,19 +92,20 @@ auto async_echo_stress(std::shared_ptr conn) -> net::awaitable // Number of coroutines that will send pings sharing the same // connection to redis. - int const sessions = 500; + int const sessions = 1000; // The number of pings that will be sent by each session. - int const msgs = 1000; + int const msgs = 500; // The number of publishes that will be sent by each session with // each message. - int const n_pubs = 10; + int const n_pubs = 100; // This is the total number of pushes we will receive. int total_pushes = sessions * msgs * n_pubs + 1; auto pubs = std::make_shared(); + pubs->push("PING"); for (int i = 0; i < n_pubs; ++i) pubs->push("PUBLISH", "channel", "payload"); @@ -124,7 +114,7 @@ auto async_echo_stress(std::shared_ptr conn) -> net::awaitable net::co_spawn(ex, push_consumer(conn, total_pushes), net::detached); for (int i = 0; i < sessions; ++i) - net::co_spawn(ex, echo_session(conn, pubs, std::to_string(i), msgs), net::detached); + net::co_spawn(ex, echo_session(conn, pubs, msgs), net::detached); } BOOST_AUTO_TEST_CASE(echo_stress) @@ -134,8 +124,10 @@ BOOST_AUTO_TEST_CASE(echo_stress) net::co_spawn(ioc, async_echo_stress(conn), net::detached); ioc.run(); - std::cout << "-------------------\n" - << conn->get_usage() << std::endl; + std::cout + << "-------------------\n" + << conn->get_usage() + << std::endl; } #else diff --git a/tests/test_conn_push.cpp b/tests/test_conn_push.cpp index ab2572ce..40eff4ea 100644 --- a/tests/test_conn_push.cpp +++ b/tests/test_conn_push.cpp @@ -26,6 +26,7 @@ using boost::redis::request; using boost::redis::response; using boost::redis::ignore; using boost::redis::ignore_t; +using boost::system::error_code; using redis::config; using boost::redis::logger; using namespace std::chrono_literals; @@ -49,7 +50,7 @@ BOOST_AUTO_TEST_CASE(receives_push_waiting_resps) auto c3 =[](auto ec, auto...) { - BOOST_TEST(!!ec); + std::cout << "c3: " << ec.message() << std::endl; }; auto c2 =[&, conn](auto ec, auto...) @@ -73,8 +74,7 @@ BOOST_AUTO_TEST_CASE(receives_push_waiting_resps) std::cout << "async_receive" << std::endl; BOOST_TEST(!ec); push_received = true; - conn->cancel(operation::run); - conn->cancel(operation::reconnection); + conn->cancel(); }); ioc.run(); @@ -87,29 +87,45 @@ BOOST_AUTO_TEST_CASE(push_received1) net::io_context ioc; auto conn = std::make_shared(ioc); + // Trick: Uses SUBSCRIBE because this command has no response or + // better said, its response is a server push, which is what we + // want to test. We send two because we want to test both + // async_receive and receive. request req; - //req.push("HELLO", 3); - req.push("SUBSCRIBE", "channel"); + req.push("SUBSCRIBE", "channel1"); + req.push("SUBSCRIBE", "channel2"); conn->async_exec(req, ignore, [conn](auto ec, auto){ std::cout << "async_exec" << std::endl; BOOST_TEST(!ec); }); - run(conn); - - bool push_received = false; + bool push_async_received = false; conn->async_receive([&, conn](auto ec, auto){ - std::cout << "async_receive" << std::endl; + std::cout << "(1) async_receive" << std::endl; + BOOST_TEST(!ec); - push_received = true; - conn->cancel(operation::run); - conn->cancel(operation::reconnection); + push_async_received = true; + + // Receives the second push synchronously. + error_code ec2; + std::size_t res = 0; + res = conn->receive(ec2); + BOOST_TEST(!ec2); + BOOST_TEST(res != std::size_t(0)); + + // Tries to receive a third push synchronously. + ec2 = {}; + res = conn->receive(ec2); + BOOST_CHECK_EQUAL(ec2, boost::redis::make_error_code(boost::redis::error::sync_receive_push_failed)); + + conn->cancel(); }); + run(conn); ioc.run(); - BOOST_TEST(push_received); + BOOST_TEST(push_async_received); } BOOST_AUTO_TEST_CASE(push_filtered_out) diff --git a/tests/test_low_level.cpp b/tests/test_low_level.cpp index 6bfe8d0f..dadbe454 100644 --- a/tests/test_low_level.cpp +++ b/tests/test_low_level.cpp @@ -30,15 +30,17 @@ namespace resp3 = boost::redis::resp3; using boost::system::error_code; using boost::redis::request; using boost::redis::response; +using boost::redis::generic_response; using boost::redis::ignore; using boost::redis::ignore_t; using boost::redis::adapter::result; using boost::redis::resp3::parser; using boost::redis::resp3::parse; +using boost::redis::consume_one; +using boost::redis::error; using boost::redis::adapter::adapt2; using node_type = result; -using vec_node_type = result>; using vec_type = result>; using op_vec_type = result>>; @@ -154,7 +156,7 @@ result> op_bool_ok = true; // TODO: Test a streamed string that is not finished with a string of // size 0 but other command comes in. -vec_node_type streamed_string_e1 +generic_response streamed_string_e1 {{ {boost::redis::resp3::type::streamed_string, 0, 1, ""} , {boost::redis::resp3::type::streamed_string_part, 1, 1, "Hell"} , {boost::redis::resp3::type::streamed_string_part, 1, 1, "o wor"} @@ -162,10 +164,10 @@ vec_node_type streamed_string_e1 , {boost::redis::resp3::type::streamed_string_part, 1, 1, ""} }}; -vec_node_type streamed_string_e2 +generic_response streamed_string_e2 {{{resp3::type::streamed_string, 0UL, 1UL, {}}, {resp3::type::streamed_string_part, 1UL, 1UL, {}} }}; -vec_node_type const push_e1a +generic_response const push_e1a {{ {resp3::type::push, 4UL, 0UL, {}} , {resp3::type::simple_string, 1UL, 1UL, "pubsub"} , {resp3::type::simple_string, 1UL, 1UL, "message"} @@ -173,10 +175,10 @@ vec_node_type const push_e1a , {resp3::type::simple_string, 1UL, 1UL, "some message"} }}; -vec_node_type const push_e1b +generic_response const push_e1b {{{resp3::type::push, 0UL, 0UL, {}}}}; -vec_node_type const set_expected1a +generic_response const set_expected1a {{{resp3::type::set, 6UL, 0UL, {}} , {resp3::type::simple_string, 1UL, 1UL, {"orange"}} , {resp3::type::simple_string, 1UL, 1UL, {"apple"}} @@ -192,7 +194,7 @@ muset_type const set_e1g{{"apple", "one", "orange", "orange", "three", "two"}}; vec_type const set_e1d = {{"orange", "apple", "one", "two", "three", "orange"}}; op_vec_type const set_expected_1e = set_e1d; -vec_node_type const array_e1a +generic_response const array_e1a {{ {resp3::type::array, 3UL, 0UL, {}} , {resp3::type::blob_string, 1UL, 1UL, {"11"}} , {resp3::type::blob_string, 1UL, 1UL, {"22"}} @@ -202,12 +204,12 @@ vec_node_type const array_e1a result> const array_e1b{{11, 22, 3}}; result> const array_e1c{{"11", "22", "3"}}; result> const array_e1d{}; -vec_node_type const array_e1e{{{resp3::type::array, 0UL, 0UL, {}}}}; +generic_response const array_e1e{{{resp3::type::array, 0UL, 0UL, {}}}}; array_type const array_e1f{{11, 22, 3}}; result> const array_e1g{{11, 22, 3}}; result> const array_e1h{{11, 22, 3}}; -vec_node_type const map_expected_1a +generic_response const map_expected_1a {{ {resp3::type::map, 4UL, 0UL, {}} , {resp3::type::blob_string, 1UL, 1UL, {"key1"}} , {resp3::type::blob_string, 1UL, 1UL, {"value1"}} @@ -263,7 +265,7 @@ tuple8_type const map_e1f , std::string{"key3"}, std::string{"value3"} }; -vec_node_type const attr_e1a +generic_response const attr_e1a {{ {resp3::type::attribute, 1UL, 0UL, {}} , {resp3::type::simple_string, 1UL, 1UL, "key-popularity"} , {resp3::type::map, 2UL, 1UL, {}} @@ -273,7 +275,7 @@ vec_node_type const attr_e1a , {resp3::type::doublean, 1UL, 2UL, "0.0012"} } }; -vec_node_type const attr_e1b +generic_response const attr_e1b {{{resp3::type::attribute, 0UL, 0UL, {}} }}; #define S01a "#11\r\n" @@ -407,7 +409,7 @@ vec_node_type const attr_e1b test(make_expected(S04e, array_type2{}, boost::redis::error::incompatible_size));\ test(make_expected(S04e, tuple_int_2{}, boost::redis::error::incompatible_size));\ test(make_expected(S04f, array_type2{}, boost::redis::error::nested_aggregate_not_supported));\ - test(make_expected(S04g, vec_node_type{}, boost::redis::error::exceeeds_max_nested_depth));\ + test(make_expected(S04g, generic_response{}, boost::redis::error::exceeeds_max_nested_depth));\ test(make_expected(S04h, array_e1d));\ test(make_expected(S04h, array_e1e));\ test(make_expected(S04i, set_type{}, boost::redis::error::expects_resp3_set)); \ @@ -418,7 +420,7 @@ vec_node_type const attr_e1b test(make_expected(S09a, set_expected1a)); \ test(make_expected(S09a, set_expected_1e)); \ test(make_expected(S09a, set_type{{"apple", "one", "orange", "three", "two"}})); \ - test(make_expected(S09b, vec_node_type{{{resp3::type::set, 0UL, 0UL, {}}}})); \ + test(make_expected(S09b, generic_response{{{resp3::type::set, 0UL, 0UL, {}}}})); \ test(make_expected(S03c, map_type{}));\ test(make_expected(S11a, node_type{{resp3::type::doublean, 1UL, 0UL, {"1.23"}}}));\ test(make_expected(S11b, node_type{{resp3::type::doublean, 1UL, 0UL, {"inf"}}}));\ @@ -496,7 +498,7 @@ void check_error(char const* name, boost::redis::error ev) static_cast::type>(ev))); } -BOOST_AUTO_TEST_CASE(error) +BOOST_AUTO_TEST_CASE(cover_error) { check_error("boost.redis", boost::redis::error::invalid_data_type); check_error("boost.redis", boost::redis::error::not_a_number); @@ -514,6 +516,12 @@ BOOST_AUTO_TEST_CASE(error) check_error("boost.redis", boost::redis::error::not_a_double); check_error("boost.redis", boost::redis::error::resp3_null); check_error("boost.redis", boost::redis::error::not_connected); + check_error("boost.redis", boost::redis::error::resolve_timeout); + check_error("boost.redis", boost::redis::error::connect_timeout); + check_error("boost.redis", boost::redis::error::pong_timeout); + check_error("boost.redis", boost::redis::error::ssl_handshake_timeout); + check_error("boost.redis", boost::redis::error::sync_receive_push_failed); + check_error("boost.redis", boost::redis::error::incompatible_node_depth); } std::string get_type_as_str(boost::redis::resp3::type t) @@ -602,3 +610,58 @@ BOOST_AUTO_TEST_CASE(adapter_as) adapter(e, ec); } } + +BOOST_AUTO_TEST_CASE(cancel_one_1) +{ + auto resp = push_e1a; + BOOST_TEST(resp.has_value()); + + consume_one(resp); + BOOST_TEST(resp.value().empty()); +} + +BOOST_AUTO_TEST_CASE(cancel_one_empty) +{ + generic_response resp; + BOOST_TEST(resp.has_value()); + + consume_one(resp); + BOOST_TEST(resp.value().empty()); +} + +BOOST_AUTO_TEST_CASE(cancel_one_has_error) +{ + generic_response resp = boost::redis::adapter::error{resp3::type::simple_string, {}}; + BOOST_TEST(resp.has_error()); + + consume_one(resp); + BOOST_TEST(resp.has_error()); +} + +BOOST_AUTO_TEST_CASE(cancel_one_has_does_not_consume_past_the_end) +{ + auto resp = push_e1a; + BOOST_TEST(resp.has_value()); + resp.value().insert( + std::cend(resp.value()), + std::cbegin(push_e1a.value()), + std::cend(push_e1a.value())); + + consume_one(resp); + + BOOST_CHECK_EQUAL(resp.value().size(), push_e1a.value().size()); +} + +BOOST_AUTO_TEST_CASE(cancel_one_incompatible_depth) +{ + auto resp = streamed_string_e1; + BOOST_TEST(resp.has_value()); + + error_code ec; + consume_one(resp, ec); + + error_code expected = error::incompatible_node_depth; + BOOST_CHECK_EQUAL(ec, expected); + + BOOST_CHECK_EQUAL(resp.value().size(), push_e1a.value().size()); +} From d5031c3f6983245048cdff0bf25855ef3ccce2a1 Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Mon, 2 Oct 2023 17:17:44 +0200 Subject: [PATCH 08/51] libraries.json --- meta/libraries.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 meta/libraries.json diff --git a/meta/libraries.json b/meta/libraries.json new file mode 100644 index 00000000..242df684 --- /dev/null +++ b/meta/libraries.json @@ -0,0 +1,16 @@ +{ + "key": "redis", + "name": "Redis", + "authors": [ + "Marcelo Zimbres" + ], + "description": "Redis async client library built on top of Boost.Asio.", + "category": [ + "Concurrent", + "IO" + ], + "maintainers": [ + "Marcelo Zimbres " + ], + "cxxstd": "17" +} \ No newline at end of file From be20c0d48c6cf77d6c622cb8af3dc20aadd3bf72 Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Tue, 3 Oct 2023 16:51:05 +0200 Subject: [PATCH 09/51] Docs via b2 --- doc/Jamfile | 67 ++++++++++++++++++++++++++++++ include/boost/redis/connection.hpp | 2 - include/boost/redis/logger.hpp | 4 +- include/boost/redis/response.hpp | 4 +- 4 files changed, 71 insertions(+), 6 deletions(-) create mode 100644 doc/Jamfile diff --git a/doc/Jamfile b/doc/Jamfile new file mode 100644 index 00000000..f18ccdae --- /dev/null +++ b/doc/Jamfile @@ -0,0 +1,67 @@ +project redis/doc ; + +import doxygen ; +import path ; +import sequence ; + +# All paths must be absolute to work well with the Doxygen rules. +path-constant this_dir : . ; +path-constant include_dir : ../include ; +path-constant examples_dir : ../examples ; +path-constant readme : ../README.md ; +path-constant layout_file : DoxygenLayout.xml ; + +local stylesheet_files = [ path.glob $(this_dir) : *.css ] ; +local includes = [ path.glob-tree $(include_dir) : *.hpp *.cpp ] ; +local examples = [ path.glob-tree $(examples_dir) : *.hpp *.cpp ] ; + +# If passed directly, several HTML_EXTRA_STYLESHEET tags are generated, +# which is not correct. +local stylesheet_arg = [ sequence.join $(stylesheet_files) : " " ] ; + +doxygen autodoc.html + : + $(includes) $(examples) $(readme) + : + PROJECT_NAME=boost_redis + PROJECT_NUMBER="1.4.2" + PROJECT_BRIEF="A redis client library" + BUILTIN_STL_SUPPORT=YES + INLINE_SIMPLE_STRUCTS=YES + HIDE_UNDOC_MEMBERS=YES + HIDE_UNDOC_CLASSES=YES + SHOW_HEADERFILE=YES + SORT_BRIEF_DOCS=YES + SORT_MEMBERS_CTORS_1ST=YES + SHOW_FILES=NO + SHOW_NAMESPACES=NO + LAYOUT_FILE=$(layout_file) + WARN_IF_INCOMPLETE_DOC=YES + FILE_PATTERNS="*.hpp *.cpp" + EXCLUDE_SYMBOLS=std + USE_MDFILE_AS_MAINPAGE=$(readme) + SOURCE_BROWSER=YES + "HTML_EXTRA_STYLESHEET=$(stylesheet_arg)" + HTML_TIMESTAMP=YES + GENERATE_TREEVIEW=YES + FULL_SIDEBAR=NO + ENUM_VALUES_PER_LINE=0 + OBFUSCATE_EMAILS=YES + USE_MATHJAX=YES + MATHJAX_VERSION=MathJax_2 + MATHJAX_RELPATH="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/" + GENERATE_LATEX=NO + MACRO_EXPANSION=YES + HAVE_DOT=NO + CLASS_GRAPH=NO + DIRECTORY_GRAPH=NO + ; + +# These are used to inform the build system of the +# means to build the integrated and stand-alone docs. + +alias boostdoc ; +explicit boostdoc ; + +alias boostrelease : autodoc.html ; +explicit boostrelease ; diff --git a/include/boost/redis/connection.hpp b/include/boost/redis/connection.hpp index 0b0c6267..7e68475e 100644 --- a/include/boost/redis/connection.hpp +++ b/include/boost/redis/connection.hpp @@ -171,7 +171,6 @@ class basic_connection { * To cancel an ongoing receive operation apps should call * `connection::cancel(operation::receive)`. * - * @param response Response object. * @param token Completion token. * * For an example see cpp20_subscriber.cpp. The completion token must @@ -264,7 +263,6 @@ class basic_connection { * @li operation::all: Cancels all operations listed above. * * @param op: The operation to be cancelled. - * @returns The number of operations that have been canceled. */ void cancel(operation op = operation::all) { diff --git a/include/boost/redis/logger.hpp b/include/boost/redis/logger.hpp index b7c1e09c..41d8e18a 100644 --- a/include/boost/redis/logger.hpp +++ b/include/boost/redis/logger.hpp @@ -117,8 +117,8 @@ class logger { /** @brief Called when the run operation completes. * @ingroup high-level-api * - * @param read_ec Error code returned by the read operation. - * @param write_ec Error code returned by the write operation. + * @param reader_ec Error code returned by the read operation. + * @param writer_ec Error code returned by the write operation. */ void on_run(system::error_code const& reader_ec, system::error_code const& writer_ec); diff --git a/include/boost/redis/response.hpp b/include/boost/redis/response.hpp index b3f76ad5..326012db 100644 --- a/include/boost/redis/response.hpp +++ b/include/boost/redis/response.hpp @@ -54,7 +54,7 @@ using generic_response = adapter::result>; * std::cout << "PING: " << resp.value().front().value << std::endl; * consume_one(resp); * std::cout << "PING: " << resp.value().front().value << std::endl; - * @code + * @endcode * * is * @@ -62,7 +62,7 @@ using generic_response = adapter::result>; * PING: one * PING: two * PING: three - * @code + * @endcode * * Given that this function rotates elements, it won't be very * efficient for responses with a large number of elements. It was From ecfe51c7ae80ddeba229dc746b4cf104088087af Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Tue, 3 Oct 2023 17:27:31 +0200 Subject: [PATCH 10/51] Doc fixes --- doc/Jamfile | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/Jamfile b/doc/Jamfile index f18ccdae..69432c43 100644 --- a/doc/Jamfile +++ b/doc/Jamfile @@ -6,6 +6,7 @@ import sequence ; # All paths must be absolute to work well with the Doxygen rules. path-constant this_dir : . ; +path-constant redis_root_dir : .. ; path-constant include_dir : ../include ; path-constant examples_dir : ../examples ; path-constant readme : ../README.md ; @@ -17,7 +18,7 @@ local examples = [ path.glob-tree $(examples_dir) : *.hpp *.cpp ] ; # If passed directly, several HTML_EXTRA_STYLESHEET tags are generated, # which is not correct. -local stylesheet_arg = [ sequence.join $(stylesheet_files) : " " ] ; +local stylesheet_arg = [ sequence.join "\"$(stylesheet_files)\"" : " " ] ; doxygen autodoc.html : @@ -26,6 +27,8 @@ doxygen autodoc.html PROJECT_NAME=boost_redis PROJECT_NUMBER="1.4.2" PROJECT_BRIEF="A redis client library" + "STRIP_FROM_PATH=\"$(redis_root_dir)\"" + "STRIP_FROM_INC_PATH=\"$(include_dir)\"" BUILTIN_STL_SUPPORT=YES INLINE_SIMPLE_STRUCTS=YES HIDE_UNDOC_MEMBERS=YES @@ -35,11 +38,11 @@ doxygen autodoc.html SORT_MEMBERS_CTORS_1ST=YES SHOW_FILES=NO SHOW_NAMESPACES=NO - LAYOUT_FILE=$(layout_file) + "LAYOUT_FILE=\"$(layout_file)\"" WARN_IF_INCOMPLETE_DOC=YES FILE_PATTERNS="*.hpp *.cpp" EXCLUDE_SYMBOLS=std - USE_MDFILE_AS_MAINPAGE=$(readme) + "USE_MDFILE_AS_MAINPAGE=\"$(readme)\"" SOURCE_BROWSER=YES "HTML_EXTRA_STYLESHEET=$(stylesheet_arg)" HTML_TIMESTAMP=YES @@ -50,7 +53,6 @@ doxygen autodoc.html USE_MATHJAX=YES MATHJAX_VERSION=MathJax_2 MATHJAX_RELPATH="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/" - GENERATE_LATEX=NO MACRO_EXPANSION=YES HAVE_DOT=NO CLASS_GRAPH=NO From 53ef947cf3d6fef6fba6ef38ce4e0798136f7672 Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Tue, 3 Oct 2023 18:59:21 +0200 Subject: [PATCH 11/51] Doc install and redirection --- doc/Jamfile | 22 ++++++++++++++++++++-- index.html | 24 ++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 index.html diff --git a/doc/Jamfile b/doc/Jamfile index 69432c43..de957e6f 100644 --- a/doc/Jamfile +++ b/doc/Jamfile @@ -6,6 +6,7 @@ import sequence ; # All paths must be absolute to work well with the Doxygen rules. path-constant this_dir : . ; +path-constant target_dir : html ; path-constant redis_root_dir : .. ; path-constant include_dir : ../include ; path-constant examples_dir : ../examples ; @@ -20,7 +21,8 @@ local examples = [ path.glob-tree $(examples_dir) : *.hpp *.cpp ] ; # which is not correct. local stylesheet_arg = [ sequence.join "\"$(stylesheet_files)\"" : " " ] ; -doxygen autodoc.html +# The doxygen rule requires the target name to end in .html to generate HTML files +doxygen doc.html : $(includes) $(examples) $(readme) : @@ -59,11 +61,27 @@ doxygen autodoc.html DIRECTORY_GRAPH=NO ; +explicit doc.html ; + +# The doxygen rule only informs b2 about the main HTML file, and not about +# all the doc directory that gets generated. Using the install rule copies +# only a single file, which is incorrect. This is a workaround to copy +# the generated docs to the doc/html directory, where they should be. +make copyhtml.tag : doc.html : @copy_html_dir ; +explicit copyhtml.tag ; +actions copy_html_dir +{ + rm -rf $(target_dir) + mkdir -p $(target_dir) + cp -r $(<:D)/html/doc/* $(target_dir)/ + echo "Stamped" > "$(<)" +} + # These are used to inform the build system of the # means to build the integrated and stand-alone docs. alias boostdoc ; explicit boostdoc ; -alias boostrelease : autodoc.html ; +alias boostrelease : copyhtml.tag ; explicit boostrelease ; diff --git a/index.html b/index.html new file mode 100644 index 00000000..a75e6454 --- /dev/null +++ b/index.html @@ -0,0 +1,24 @@ + + + + Boost.Redis + + + + + Automatic redirection failed, please go to + ./doc/html/index.html +
+ + Boost.Redis
+
+ Copyright (C) 2023 Marcelo Zimbres
+
+ Distributed under the Boost Software License, Version 1.0. + (See accompanying file LICENSE_1_0.txt or copy at + http://www.boost.org/LICENSE_1_0.txt)
+
+
+ + + \ No newline at end of file From bc08a8d41145685247724fc1652ee895195dc979 Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Tue, 3 Oct 2023 21:04:43 +0200 Subject: [PATCH 12/51] Trigger CI --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3fe46a04..e84bbc4e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,6 @@ name: CI + on: [push, pull_request] jobs: From b66d067af8ec755b3f4db67dfc28a0254754deb4 Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Tue, 3 Oct 2023 23:08:59 +0200 Subject: [PATCH 13/51] tests => test --- CMakeLists.txt | 4 ++-- {tests => test}/common.cpp | 0 {tests => test}/common.hpp | 0 {tests => test}/test_conn_check_health.cpp | 0 {tests => test}/test_conn_echo_stress.cpp | 0 {tests => test}/test_conn_exec.cpp | 0 {tests => test}/test_conn_exec_cancel.cpp | 0 {tests => test}/test_conn_exec_cancel2.cpp | 0 {tests => test}/test_conn_exec_error.cpp | 0 {tests => test}/test_conn_exec_retry.cpp | 0 {tests => test}/test_conn_push.cpp | 0 {tests => test}/test_conn_quit.cpp | 0 {tests => test}/test_conn_reconnect.cpp | 0 {tests => test}/test_conn_run_cancel.cpp | 0 {tests => test}/test_conn_tls.cpp | 0 {tests => test}/test_issue_50.cpp | 0 {tests => test}/test_low_level.cpp | 0 {tests => test}/test_low_level_sync_sans_io.cpp | 0 {tests => test}/test_request.cpp | 0 {tests => test}/test_run.cpp | 0 20 files changed, 2 insertions(+), 2 deletions(-) rename {tests => test}/common.cpp (100%) rename {tests => test}/common.hpp (100%) rename {tests => test}/test_conn_check_health.cpp (100%) rename {tests => test}/test_conn_echo_stress.cpp (100%) rename {tests => test}/test_conn_exec.cpp (100%) rename {tests => test}/test_conn_exec_cancel.cpp (100%) rename {tests => test}/test_conn_exec_cancel2.cpp (100%) rename {tests => test}/test_conn_exec_error.cpp (100%) rename {tests => test}/test_conn_exec_retry.cpp (100%) rename {tests => test}/test_conn_push.cpp (100%) rename {tests => test}/test_conn_quit.cpp (100%) rename {tests => test}/test_conn_reconnect.cpp (100%) rename {tests => test}/test_conn_run_cancel.cpp (100%) rename {tests => test}/test_conn_tls.cpp (100%) rename {tests => test}/test_issue_50.cpp (100%) rename {tests => test}/test_low_level.cpp (100%) rename {tests => test}/test_low_level_sync_sans_io.cpp (100%) rename {tests => test}/test_request.cpp (100%) rename {tests => test}/test_run.cpp (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index fb9655db..dbb406f2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -143,12 +143,12 @@ endif() if (BOOST_REDIS_TESTS) enable_testing() - add_library(tests_common STATIC tests/common.cpp) + add_library(tests_common STATIC test/common.cpp) target_compile_features(tests_common PRIVATE cxx_std_17) target_link_libraries(tests_common PRIVATE boost_redis_project_options) macro(make_test TEST_NAME STANDARD) - add_executable(${TEST_NAME} tests/${TEST_NAME}.cpp) + add_executable(${TEST_NAME} test/${TEST_NAME}.cpp) target_link_libraries(${TEST_NAME} PRIVATE boost_redis_src tests_common) target_link_libraries(${TEST_NAME} PRIVATE boost_redis_project_options) target_compile_features(${TEST_NAME} PRIVATE cxx_std_${STANDARD}) diff --git a/tests/common.cpp b/test/common.cpp similarity index 100% rename from tests/common.cpp rename to test/common.cpp diff --git a/tests/common.hpp b/test/common.hpp similarity index 100% rename from tests/common.hpp rename to test/common.hpp diff --git a/tests/test_conn_check_health.cpp b/test/test_conn_check_health.cpp similarity index 100% rename from tests/test_conn_check_health.cpp rename to test/test_conn_check_health.cpp diff --git a/tests/test_conn_echo_stress.cpp b/test/test_conn_echo_stress.cpp similarity index 100% rename from tests/test_conn_echo_stress.cpp rename to test/test_conn_echo_stress.cpp diff --git a/tests/test_conn_exec.cpp b/test/test_conn_exec.cpp similarity index 100% rename from tests/test_conn_exec.cpp rename to test/test_conn_exec.cpp diff --git a/tests/test_conn_exec_cancel.cpp b/test/test_conn_exec_cancel.cpp similarity index 100% rename from tests/test_conn_exec_cancel.cpp rename to test/test_conn_exec_cancel.cpp diff --git a/tests/test_conn_exec_cancel2.cpp b/test/test_conn_exec_cancel2.cpp similarity index 100% rename from tests/test_conn_exec_cancel2.cpp rename to test/test_conn_exec_cancel2.cpp diff --git a/tests/test_conn_exec_error.cpp b/test/test_conn_exec_error.cpp similarity index 100% rename from tests/test_conn_exec_error.cpp rename to test/test_conn_exec_error.cpp diff --git a/tests/test_conn_exec_retry.cpp b/test/test_conn_exec_retry.cpp similarity index 100% rename from tests/test_conn_exec_retry.cpp rename to test/test_conn_exec_retry.cpp diff --git a/tests/test_conn_push.cpp b/test/test_conn_push.cpp similarity index 100% rename from tests/test_conn_push.cpp rename to test/test_conn_push.cpp diff --git a/tests/test_conn_quit.cpp b/test/test_conn_quit.cpp similarity index 100% rename from tests/test_conn_quit.cpp rename to test/test_conn_quit.cpp diff --git a/tests/test_conn_reconnect.cpp b/test/test_conn_reconnect.cpp similarity index 100% rename from tests/test_conn_reconnect.cpp rename to test/test_conn_reconnect.cpp diff --git a/tests/test_conn_run_cancel.cpp b/test/test_conn_run_cancel.cpp similarity index 100% rename from tests/test_conn_run_cancel.cpp rename to test/test_conn_run_cancel.cpp diff --git a/tests/test_conn_tls.cpp b/test/test_conn_tls.cpp similarity index 100% rename from tests/test_conn_tls.cpp rename to test/test_conn_tls.cpp diff --git a/tests/test_issue_50.cpp b/test/test_issue_50.cpp similarity index 100% rename from tests/test_issue_50.cpp rename to test/test_issue_50.cpp diff --git a/tests/test_low_level.cpp b/test/test_low_level.cpp similarity index 100% rename from tests/test_low_level.cpp rename to test/test_low_level.cpp diff --git a/tests/test_low_level_sync_sans_io.cpp b/test/test_low_level_sync_sans_io.cpp similarity index 100% rename from tests/test_low_level_sync_sans_io.cpp rename to test/test_low_level_sync_sans_io.cpp diff --git a/tests/test_request.cpp b/test/test_request.cpp similarity index 100% rename from tests/test_request.cpp rename to test/test_request.cpp diff --git a/tests/test_run.cpp b/test/test_run.cpp similarity index 100% rename from tests/test_run.cpp rename to test/test_run.cpp From 1c96a6070971f0178afe9d0f3060899f114d0aab Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Tue, 3 Oct 2023 23:09:34 +0200 Subject: [PATCH 14/51] ci.py first version --- tools/ci.py | 340 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 340 insertions(+) create mode 100644 tools/ci.py diff --git a/tools/ci.py b/tools/ci.py new file mode 100644 index 00000000..8c5a1032 --- /dev/null +++ b/tools/ci.py @@ -0,0 +1,340 @@ +#!/usr/bin/python3 + +from pathlib import Path +from typing import List, Union +import subprocess +import os +import stat +from shutil import rmtree, copytree, ignore_patterns +import argparse + + +_is_windows = os.name == 'nt' +_boost_root = Path(os.path.expanduser('~')).joinpath('boost-root') +_b2_command = str(_boost_root.joinpath('b2')) + + +def _run(args: List[str]) -> None: + print('+ ', args, flush=True) + subprocess.run(args, check=True) + + +def _mkdir_and_cd(path: Path) -> None: + os.makedirs(str(path), exist_ok=True) + os.chdir(str(path)) + + +def _cmake_bool(value: bool) -> str: + return 'ON' if value else 'OFF' + + +def _remove_readonly(func, path, _): + os.chmod(path, stat.S_IWRITE) + func(path) + + +def _build_prefix_path(*paths: Union[str, Path]) -> str: + return ';'.join(str(p) for p in paths) + + +def _str2bool(v: Union[bool, str]) -> bool: + if isinstance(v, bool): + return v + elif v == '1': + return True + elif v == '0': + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def _deduce_boost_branch() -> str: + # Are we in GitHub Actions? + if os.environ.get('GITHUB_ACTIONS') is not None: + ci = 'GitHub Actions' + ref = os.environ.get('GITHUB_BASE_REF', '') or os.environ.get('GITHUB_REF', '') + res = 'master' if ref == 'master' or ref.endswith('/master') else 'develop' + elif os.environ.get('DRONE') is not None: + ref = os.environ.get('DRONE_BRANCH', '') + ci = 'Drone' + res = 'master' if ref == 'master' else 'develop' + else: + ci = 'Unknown' + ref = '' + res = 'develop' + + print('+ Found CI {}, ref={}, deduced branch {}'.format(ci, ref, res)) + + return res + + +def _install_boost( + source_dir: Path +) -> None: + assert source_dir.is_absolute() + assert not _boost_root.exists() + lib_dir = _boost_root.joinpath('libs', 'redis') + branch = _deduce_boost_branch() + + # Clone Boost + _run(['git', 'clone', '-b', branch, '--depth', '1', 'https://github.com/boostorg/boost.git', str(_boost_root)]) + os.chdir(str(_boost_root)) + + # Put our library inside boost root + if lib_dir.exists(): + rmtree(str(lib_dir), onerror=_remove_readonly) + copytree( + str(source_dir), + str(lib_dir), + ignore=ignore_patterns('__build*__', '.git'), + dirs_exist_ok=True + ) + + # Install Boost dependencies + _run(["git", "config", "submodule.fetchJobs", "8"]) + _run(["git", "submodule", "update", "-q", "--init", "tools/boostdep"]) + _run(["python", "tools/boostdep/depinst/depinst.py", "--include", "examples", "redis"]) + + # Bootstrap + if _is_windows: + _run(['cmd', '/q', '/c', 'bootstrap.bat']) + else: + _run(['bash', 'bootstrap.sh']) + _run([_b2_command, 'headers']) + + +def _build_b2_distro( + install_prefix: Path +): + os.chdir(str(_boost_root)) + _run([ + _b2_command, + '--prefix={}'.format(install_prefix), + '--with-system', + '-d0', + 'install' + ]) + + +def _run_cmake_superproject_tests( + install_prefix: Path, + generator: str, + build_type: str, + cxxstd: str, + build_shared_libs: bool = False +): + _mkdir_and_cd(_boost_root.joinpath('__build_cmake_test__')) + _run([ + 'cmake', + '-G', + generator, + '-DCMAKE_BUILD_TYPE={}'.format(build_type), + '-DCMAKE_CXX_STANDARD={}'.format(cxxstd), + '-DBOOST_INCLUDE_LIBRARIES=redis', + '-DBUILD_SHARED_LIBS={}'.format(_cmake_bool(build_shared_libs)), + '-DCMAKE_INSTALL_PREFIX={}'.format(install_prefix), + '-DBUILD_TESTING=ON', + '-DBoost_VERBOSE=ON', + '-DCMAKE_INSTALL_MESSAGE=NEVER', + '..' + ]) + _run(['cmake', '--build', '.', '--target', 'tests', '--config', build_type]) + _run(['ctest', '--output-on-failure', '--build-config', build_type]) + + +def _install_cmake_distro(build_type: str): + _run(['cmake', '--build', '.', '--target', 'install', '--config', build_type]) + + +def _run_cmake_standalone_tests( + b2_distro: Path, + generator: str, + build_type: str, + cxxstd: str, + build_shared_libs: bool = False +): + _mkdir_and_cd(_boost_root.joinpath('libs', 'redis', '__build_standalone__')) + _run([ + 'cmake', + '-DCMAKE_PREFIX_PATH={}'.format(_build_prefix_path(b2_distro)), + '-DCMAKE_BUILD_TYPE={}'.format(build_type), + '-DBUILD_SHARED_LIBS={}'.format(_cmake_bool(build_shared_libs)), + '-DCMAKE_CXX_STANDARD={}'.format(cxxstd), + '-G', + generator, + '..' + ]) + _run(['cmake', '--build', '.']) + _run(['ctest', '--output-on-failure', '--build-config', build_type]) + + +def _run_cmake_add_subdirectory_tests( + generator: str, + build_type: str, + build_shared_libs: bool = False +): + test_folder = _boost_root.joinpath('libs', 'redis', 'test', 'cmake_test', '__build_cmake_subdir_test__') + _mkdir_and_cd(test_folder) + _run([ + 'cmake', + '-G', + generator, + '-DBOOST_CI_INSTALL_TEST=OFF', + '-DCMAKE_BUILD_TYPE={}'.format(build_type), + '-DBUILD_SHARED_LIBS={}'.format(_cmake_bool(build_shared_libs)), + '..' + ]) + _run(['cmake', '--build', '.', '--config', build_type]) + _run(['ctest', '--output-on-failure', '--build-config', build_type]) + + +def _run_cmake_find_package_tests( + cmake_distro: Path, + generator: str, + build_type: str, + build_shared_libs: bool = False +): + _mkdir_and_cd(_boost_root.joinpath('libs', 'redis', 'test', 'cmake_test', '__build_cmake_install_test__')) + _run([ + 'cmake', + '-G', + generator, + '-DBOOST_CI_INSTALL_TEST=ON', + '-DCMAKE_BUILD_TYPE={}'.format(build_type), + '-DBUILD_SHARED_LIBS={}'.format(_cmake_bool(build_shared_libs)), + '-DCMAKE_PREFIX_PATH={}'.format(_build_prefix_path(cmake_distro)), + '..' + ]) + _run(['cmake', '--build', '.', '--config', build_type]) + _run(['ctest', '--output-on-failure', '--build-config', build_type]) + + +def _run_cmake_b2_find_package_tests( + b2_distro: Path, + generator: str, + build_type: str, + build_shared_libs: bool = False +): + _mkdir_and_cd(_boost_root.joinpath('libs', 'redis', 'test', 'cmake_b2_test', '__build_cmake_b2_test__')) + _run([ + 'cmake', + '-G', + generator, + '-DCMAKE_PREFIX_PATH={}'.format(_build_prefix_path(b2_distro)), + '-DCMAKE_BUILD_TYPE={}'.format(build_type), + '-DBUILD_SHARED_LIBS={}'.format(_cmake_bool(build_shared_libs)), + '-DBUILD_TESTING=ON', + '..' + ]) + _run(['cmake', '--build', '.', '--config', build_type]) + _run(['ctest', '--output-on-failure', '--build-config', build_type]) + + +def _run_b2_tests( + toolset: str, + cxxstd: str, + variant: str, + stdlib: str = 'native', + address_model: str = '64', + address_sanitizer: bool = False, + undefined_sanitizer: bool = False, +): + os.chdir(str(_boost_root)) + _run([ + _b2_command, + '--abbreviate-paths', + 'toolset={}'.format(toolset), + 'cxxstd={}'.format(cxxstd), + 'address-model={}'.format(address_model), + 'variant={}'.format(variant), + 'stdlib={}'.format(stdlib), + ] + (['address-sanitizer=norecover'] if address_sanitizer else []) # can only be disabled by omitting the arg + + (['undefined-sanitizer=norecover'] if undefined_sanitizer else []) # can only be disabled by omitting the arg + + [ + 'warnings-as-errors=on', + '-j4', + 'libs/redis/test', + 'libs/redis/example' + ]) + + # Get Boost + # Generate "pre-built" b2 distro + # Build the library, run the tests, and install, from the superproject + # Library tests, using the b2 Boost distribution generated before (this tests our normal dev workflow) + # Subdir tests, using add_subdirectory() (lib can be consumed using add_subdirectory) + # Subdir tests, using find_package with the library installed in the previous step + # (library can be consumed using find_package on a distro built by cmake) + + # Subdir tests, using find_package with the b2 distribution + # (library can be consumed using find_package on a distro built by b2) + + + +def main(): + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers() + + subp = subparsers.add_parser('install-boost') + subp.add_argument('--source-dir', type=Path, required=True) + subp.set_defaults(func=_install_boost) + + subp = subparsers.add_parser('build-b2-distro') + subp.add_argument('--install-prefix', type=Path, required=True) + subp.set_defaults(func=_build_b2_distro) + + subp = subparsers.add_parser('run-cmake-superproject-tests') + subp.add_argument('--install-prefix', type=Path, required=True) + subp.add_argument('--generator', default='Unix Makefiles') + subp.add_argument('--build-type', default='Debug') + subp.add_argument('--cxxstd', default='20') + subp.add_argument('--build-shared-libs', type=_str2bool, default=False) + subp.set_defaults(func=_run_cmake_superproject_tests) + + subp = subparsers.add_parser('install-cmake-distro') + subp.add_argument('--build-type', default='Debug') + subp.set_defaults(func=_install_cmake_distro) + + subp = subparsers.add_parser('run-cmake-standalone-tests') + subp.add_argument('--b2-distro', type=Path, required=True) + subp.add_argument('--generator', default='Unix Makefiles') + subp.add_argument('--build-type', default='Debug') + subp.add_argument('--cxxstd', default='20') + subp.add_argument('--build-shared-libs', type=_str2bool, default=False) + subp.set_defaults(func=_run_cmake_standalone_tests) + + subp = subparsers.add_parser('run-cmake-add-subdirectory-tests') + subp.add_argument('--generator', default='Unix Makefiles') + subp.add_argument('--build-type', default='Debug') + subp.add_argument('--build-shared-libs', type=_str2bool, default=False) + subp.set_defaults(func=_run_cmake_add_subdirectory_tests) + + subp = subparsers.add_parser('run-cmake-find-package-tests') + subp.add_argument('--cmake-distro', type=Path, required=True) + subp.add_argument('--generator', default='Unix Makefiles') + subp.add_argument('--build-type', default='Debug') + subp.add_argument('--build-shared-libs', type=_str2bool, default=False) + subp.set_defaults(func=_run_cmake_find_package_tests) + + subp = subparsers.add_parser('run-cmake-b2-find-package-tests') + subp.add_argument('--cmake-distro', type=Path, required=True) + subp.add_argument('--generator', default='Unix Makefiles') + subp.add_argument('--build-type', default='Debug') + subp.add_argument('--build-shared-libs', type=_str2bool, default=False) + subp.set_defaults(func=_run_cmake_b2_find_package_tests) + + subp = subparsers.add_parser('run-b2-tests') + subp.add_argument('--toolset', required=True) + subp.add_argument('--cxxstd', default='20') + subp.add_argument('--variant', default='debug,release') + subp.add_argument('--stdlib', default='native') + subp.add_argument('--address-model', default='64') + subp.add_argument('--address-sanitizer', type=_str2bool, default=False) + subp.add_argument('--undefined-sanitizer', type=_str2bool, default=False) + subp.set_defaults(func=_run_b2_tests) + + args = parser.parse_args() + args.func(**{k: v for k, v in vars(args).items() if k != 'func'}) + + +if __name__ == '__main__': + main() From cb9fdba0a4fe6806b49d141e9e0126e42db3dd97 Mon Sep 17 00:00:00 2001 From: Ruben Perez Date: Wed, 4 Oct 2023 11:28:55 +0200 Subject: [PATCH 15/51] New cmakes --- CMakeLists.txt | 298 +-- CMakePresets.json | 20 +- benchmarks/CMakeLists.txt | 20 + cmake/BoostRedisConfig.cmake.in | 4 - doc/Doxyfile.in | 2690 ---------------------------- examples/CMakeLists.txt | 49 + test/CMakeLists.txt | 72 + {examples => test}/boost_redis.cpp | 0 tools/ci.py | 29 +- 9 files changed, 220 insertions(+), 2962 deletions(-) create mode 100644 benchmarks/CMakeLists.txt delete mode 100644 cmake/BoostRedisConfig.cmake.in delete mode 100644 doc/Doxyfile.in create mode 100644 examples/CMakeLists.txt create mode 100644 test/CMakeLists.txt rename {examples => test}/boost_redis.cpp (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index dbb406f2..af097451 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,262 +1,78 @@ -cmake_minimum_required(VERSION 3.14) - -#set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CMAKE_COMMAND} -E time") +cmake_minimum_required(VERSION 3.8...3.20) # determine whether it's main/root project # or being built under another project. if (NOT DEFINED BOOST_REDIS_MAIN_PROJECT) - set(BOOST_REDIS_MAIN_PROJECT OFF) - if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) - set(BOOST_REDIS_MAIN_PROJECT ON) - endif() + set(BOOST_REDIS_MAIN_PROJECT OFF) + if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) + set(BOOST_REDIS_MAIN_PROJECT ON) + endif() endif() -project( - boost_redis - VERSION 1.4.2 - DESCRIPTION "A redis client library" - HOMEPAGE_URL "https://boostorg.github.io/redis/" - LANGUAGES CXX -) - -option(BOOST_REDIS_INSTALL "Generate install targets." ${BOOST_REDIS_MAIN_PROJECT}) -option(BOOST_REDIS_TESTS "Build tests." ${BOOST_REDIS_MAIN_PROJECT}) -option(BOOST_REDIS_EXAMPLES "Build examples." ${BOOST_REDIS_MAIN_PROJECT}) -option(BOOST_REDIS_BENCHMARKS "Build benchmarks." ${BOOST_REDIS_MAIN_PROJECT}) -option(BOOST_REDIS_DOC "Generate documentations." ${BOOST_REDIS_MAIN_PROJECT}) +project(boost_redis VERSION "${BOOST_SUPERPROJECT_VERSION}" LANGUAGES CXX) +# Library add_library(boost_redis INTERFACE) add_library(Boost::redis ALIAS boost_redis) -target_include_directories(boost_redis INTERFACE - $ - $ -) - -target_link_libraries( - boost_redis - INTERFACE - Boost::asio - Boost::assert - Boost::config - Boost::core - Boost::mp11 - Boost::system - Boost::utility -) - +target_include_directories(boost_redis INTERFACE include) target_compile_features(boost_redis INTERFACE cxx_std_17) -# Asio bases C++ feature detection on __cplusplus. Make MSVC -# define it correctly -if (MSVC) - target_compile_options(boost_redis INTERFACE /Zc:__cplusplus) -endif() - -find_package(Boost 1.80 REQUIRED) - -include_directories(${Boost_INCLUDE_DIRS}) - -find_package(OpenSSL REQUIRED) - -include_directories(include) - -# Common -#======================================================================= - -add_library(boost_redis_project_options INTERFACE) -target_link_libraries(boost_redis_project_options INTERFACE OpenSSL::Crypto OpenSSL::SSL) -if (MSVC) - target_compile_options(boost_redis_project_options INTERFACE /bigobj) - target_compile_definitions(boost_redis_project_options INTERFACE _WIN32_WINNT=0x0601) -endif() - -add_library(boost_redis_src STATIC examples/boost_redis.cpp) -target_compile_features(boost_redis_src PRIVATE cxx_std_17) -target_link_libraries(boost_redis_src PRIVATE boost_redis_project_options) - -# Executables -#======================================================================= - -if (BOOST_REDIS_BENCHMARKS) - add_library(benchmarks_options INTERFACE) - target_link_libraries(benchmarks_options INTERFACE boost_redis_src) - target_link_libraries(benchmarks_options INTERFACE boost_redis_project_options) - target_compile_features(benchmarks_options INTERFACE cxx_std_20) - - add_executable(echo_server_client benchmarks/cpp/asio/echo_server_client.cpp) - target_link_libraries(echo_server_client PRIVATE benchmarks_options) - - add_executable(echo_server_direct benchmarks/cpp/asio/echo_server_direct.cpp) - target_link_libraries(echo_server_direct PRIVATE benchmarks_options) -endif() - -if (BOOST_REDIS_EXAMPLES) - add_library(examples_main STATIC examples/main.cpp) - target_compile_features(examples_main PRIVATE cxx_std_20) - target_link_libraries(examples_main PRIVATE boost_redis_project_options) - - macro(make_example EXAMPLE_NAME STANDARD) - add_executable(${EXAMPLE_NAME} examples/${EXAMPLE_NAME}.cpp) - target_link_libraries(${EXAMPLE_NAME} PRIVATE boost_redis_src) - target_link_libraries(${EXAMPLE_NAME} PRIVATE boost_redis_project_options) - target_compile_features(${EXAMPLE_NAME} PRIVATE cxx_std_${STANDARD}) - if (${STANDARD} STREQUAL "20") - target_link_libraries(${EXAMPLE_NAME} PRIVATE examples_main) - endif() - endmacro() - - macro(make_testable_example EXAMPLE_NAME STANDARD) - make_example(${EXAMPLE_NAME} ${STANDARD}) - add_test(${EXAMPLE_NAME} ${EXAMPLE_NAME}) - endmacro() - - make_testable_example(cpp17_intro 17) - make_testable_example(cpp17_intro_sync 17) - - make_testable_example(cpp20_intro 20) - make_testable_example(cpp20_containers 20) - make_testable_example(cpp20_json 20) - make_testable_example(cpp20_intro_tls 20) - - make_example(cpp20_subscriber 20) - make_example(cpp20_streams 20) - make_example(cpp20_echo_server 20) - make_example(cpp20_resolve_with_sentinel 20) - - # We test the protobuf example only on gcc. - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - find_package(Protobuf) - if (Protobuf_FOUND) - protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS examples/person.proto) - make_testable_example(cpp20_protobuf 20) - target_sources(cpp20_protobuf PUBLIC ${PROTO_SRCS} ${PROTO_HDRS}) - target_link_libraries(cpp20_protobuf PRIVATE ${Protobuf_LIBRARIES}) - target_include_directories(cpp20_protobuf PUBLIC ${Protobuf_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}) - endif() +# Dependencies +if (BOOST_REDIS_MAIN_PROJECT) + # If we're the root project, error if a dependency is not found + find_package(Boost 1.83 REQUIRED COMPONENTS headers) + find_package(Threads REQUIRED) + find_package(OpenSSL REQUIRED) + target_link_libraries(boost_redis + INTERFACE + Boost::headers + Threads::Threads + OpenSSL::Crypto + OpenSSL::SSL + ) +else() + # If we're in the superproject or called from add_subdirectory, + # Boost dependencies should be already available. + # If other dependencies are not found, we bail out + find_package(Threads) + if(NOT Threads_FOUND) + message(STATUS "Boost.Redis has been disabled, because the required package Threads hasn't been found") + return() endif() - - if (NOT MSVC) - make_example(cpp20_chat_room 20) + find_package(OpenSSL) + if(NOT OpenSSL_FOUND) + message(STATUS "Boost.Redis has been disabled, because the required package OpenSSL hasn't been found") + return() endif() -endif() - -if (BOOST_REDIS_TESTS) - enable_testing() - - add_library(tests_common STATIC test/common.cpp) - target_compile_features(tests_common PRIVATE cxx_std_17) - target_link_libraries(tests_common PRIVATE boost_redis_project_options) - - macro(make_test TEST_NAME STANDARD) - add_executable(${TEST_NAME} test/${TEST_NAME}.cpp) - target_link_libraries(${TEST_NAME} PRIVATE boost_redis_src tests_common) - target_link_libraries(${TEST_NAME} PRIVATE boost_redis_project_options) - target_compile_features(${TEST_NAME} PRIVATE cxx_std_${STANDARD}) - add_test(${TEST_NAME} ${TEST_NAME}) - endmacro() - - make_test(test_conn_quit 17) - make_test(test_conn_tls 17) - make_test(test_low_level 17) - make_test(test_conn_exec_retry 17) - make_test(test_conn_exec_error 17) - make_test(test_request 17) - make_test(test_run 17) - make_test(test_low_level_sync_sans_io 17) - make_test(test_conn_check_health 17) - - make_test(test_conn_exec 20) - make_test(test_conn_push 20) - make_test(test_conn_reconnect 20) - make_test(test_conn_exec_cancel 20) - make_test(test_conn_exec_cancel2 20) - make_test(test_conn_echo_stress 20) - make_test(test_conn_run_cancel 20) - make_test(test_issue_50 20) -endif() - -# Install -#======================================================================= -if (BOOST_REDIS_INSTALL) - install(TARGETS boost_redis - EXPORT boost_redis - PUBLIC_HEADER DESTINATION include COMPONENT Development + # This is generated by boostdep + target_link_libraries(boost_redis + INTERFACE + Boost::asio + Boost::assert + Boost::core + Boost::mp11 + Boost::system + Boost::throw_exception + Threads::Threads + OpenSSL::Crypto + OpenSSL::SSL ) - - include(CMakePackageConfigHelpers) - - configure_package_config_file( - "${PROJECT_SOURCE_DIR}/cmake/BoostRedisConfig.cmake.in" - "${PROJECT_BINARY_DIR}/BoostRedisConfig.cmake" - INSTALL_DESTINATION lib/cmake/boost/redis - ) - - install(EXPORT boost_redis DESTINATION lib/cmake/boost/redis) - install(FILES "${PROJECT_BINARY_DIR}/BoostRedisConfigVersion.cmake" - "${PROJECT_BINARY_DIR}/BoostRedisConfig.cmake" - DESTINATION lib/cmake/boost/redis) - - install(DIRECTORY ${PROJECT_SOURCE_DIR}/include/ DESTINATION include) - - include(CMakePackageConfigHelpers) - write_basic_package_version_file( - "${PROJECT_BINARY_DIR}/BoostRedisConfigVersion.cmake" - COMPATIBILITY AnyNewerVersion - ) - - include(CPack) endif() -# Doxygen -#======================================================================= - -if (BOOST_REDIS_DOC) - set(DOXYGEN_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/doc") - configure_file(doc/Doxyfile.in doc/Doxyfile @ONLY) - - add_custom_target( - doc - COMMAND doxygen "${PROJECT_BINARY_DIR}/doc/Doxyfile" - COMMENT "Building documentation using Doxygen" - WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" - VERBATIM - ) +# Enable testing. If we're being called from the superproject, this has already been done +if (BOOST_REDIS_MAIN_PROJECT) + include(CTest) endif() -# Coverage -#======================================================================= +# Most tests require a running Redis server, so we only run them if we're the main project +if(BOOST_REDIS_MAIN_PROJECT AND BUILD_TESTING) + # Tests and common utilities + add_subdirectory(test) -set( - COVERAGE_TRACE_COMMAND - lcov --capture - -output-file "${PROJECT_BINARY_DIR}/coverage.info" - --directory "${PROJECT_BINARY_DIR}" - --include "${PROJECT_SOURCE_DIR}/include/*" -) - -set( - COVERAGE_HTML_COMMAND - genhtml --legend -f -q - "${PROJECT_BINARY_DIR}/coverage.info" - --prefix "${PROJECT_SOURCE_DIR}" - --output-directory "${PROJECT_BINARY_DIR}/coverage_html" -) - -add_custom_target( - coverage - COMMAND ${COVERAGE_TRACE_COMMAND} - COMMAND ${COVERAGE_HTML_COMMAND} - COMMENT "Generating coverage report" - VERBATIM -) - -# TODO -#======================================================================= - -#.PHONY: bench -#bench: -# pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex -# pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex -# pdftoppm {input.pdf} {output.file} -png + # Benchmarks. Build them with tests to prevent code rotting + add_subdirectory(benchmarks) + # Examples + add_subdirectory(examples) +endif() diff --git a/CMakePresets.json b/CMakePresets.json index bac4c390..34b32648 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -12,7 +12,7 @@ "warnings": { "dev": true, "deprecated": true, - "uninitialized": true, + "uninitialized": false, "unusedCli": true, "systemVars": false }, @@ -52,8 +52,7 @@ "CMAKE_CXX_COMPILER": "g++-11", "CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address", "CMAKE_CXX_STANDARD_REQUIRED": "ON", - "PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11", - "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/g++-11/doc/" + "PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11" } }, { @@ -69,8 +68,7 @@ "CMAKE_CXX_COMPILER": "g++-11", "CMAKE_SHARED_LINKER_FLAGS": "", "CMAKE_CXX_STANDARD_REQUIRED": "ON", - "PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11-release", - "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/g++-11-release/doc/" + "PROJECT_BINARY_DIR": "${sourceDir}/build/g++-11-release" } }, { @@ -86,8 +84,7 @@ "CMAKE_CXX_COMPILER": "clang++-13", "CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address", "CMAKE_CXX_STANDARD_REQUIRED": "ON", - "PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-13", - "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/clang++-13/doc/" + "PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-13" } }, { @@ -103,8 +100,7 @@ "CMAKE_CXX_COMPILER": "clang++-14", "CMAKE_SHARED_LINKER_FLAGS": "-fsanitize=address", "CMAKE_CXX_STANDARD_REQUIRED": "ON", - "PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-14", - "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/clang++-14/doc/" + "PROJECT_BINARY_DIR": "${sourceDir}/build/clang++-14" } }, { @@ -121,8 +117,7 @@ "CMAKE_CXX_COMPILER": "clang++-14", "CMAKE_SHARED_LINKER_FLAGS": "", "CMAKE_CXX_STANDARD_REQUIRED": "ON", - "PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp17", - "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/libc++-14-cpp17/doc/" + "PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp17" } }, { @@ -139,8 +134,7 @@ "CMAKE_CXX_COMPILER": "clang++-14", "CMAKE_SHARED_LINKER_FLAGS": "", "CMAKE_CXX_STANDARD_REQUIRED": "ON", - "PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp20", - "DOXYGEN_OUTPUT_DIRECTORY": "${sourceDir}/build/libc++-14-cpp20/doc/" + "PROJECT_BINARY_DIR": "${sourceDir}/build/libc++-14-cpp20" } }, { diff --git a/benchmarks/CMakeLists.txt b/benchmarks/CMakeLists.txt new file mode 100644 index 00000000..e40a7dae --- /dev/null +++ b/benchmarks/CMakeLists.txt @@ -0,0 +1,20 @@ + +add_library(benchmarks_options INTERFACE) +target_link_libraries(benchmarks_options INTERFACE boost_redis_src) +target_link_libraries(benchmarks_options INTERFACE boost_redis_project_options) +target_compile_features(benchmarks_options INTERFACE cxx_std_20) + +add_executable(echo_server_client cpp/asio/echo_server_client.cpp) +target_link_libraries(echo_server_client PRIVATE benchmarks_options) + +add_executable(echo_server_direct cpp/asio/echo_server_direct.cpp) +target_link_libraries(echo_server_direct PRIVATE benchmarks_options) + +# TODO +#======================================================================= + +#.PHONY: bench +#bench: +# pdflatex --jobname=echo-f0 benchmarks/benchmarks.tex +# pdflatex --jobname=echo-f1 benchmarks/benchmarks.tex +# pdftoppm {input.pdf} {output.file} -png \ No newline at end of file diff --git a/cmake/BoostRedisConfig.cmake.in b/cmake/BoostRedisConfig.cmake.in deleted file mode 100644 index c620b40c..00000000 --- a/cmake/BoostRedisConfig.cmake.in +++ /dev/null @@ -1,4 +0,0 @@ -@PACKAGE_INIT@ - -include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@.cmake") -check_required_components("@PROJECT_NAME@") diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in deleted file mode 100644 index bce07317..00000000 --- a/doc/Doxyfile.in +++ /dev/null @@ -1,2690 +0,0 @@ -# Doxyfile 1.9.4 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). -# -# Note: -# -# Use doxygen to compare the used configuration file with the template -# configuration file: -# doxygen -x [configFile] -# Use doxygen to compare the used configuration file with the template -# configuration file without replacing the environment variables: -# doxygen -x_noenv [configFile] - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the configuration -# file that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# https://www.gnu.org/software/libiconv/ for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = @PROJECT_NAME@ - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = @PROJECT_VERSION@ - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = @PROJECT_DESCRIPTION@ - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = @DOXYGEN_OUTPUT_DIRECTORY@ - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096 -# sub-directories (in 2 levels) under the output directory of each output format -# and will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to -# control the number of sub-directories. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# Controls the number of sub-directories that will be created when -# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every -# level increment doubles the number of directories, resulting in 4096 -# directories at level 8 which is the default and also the maximum value. The -# sub-directories are organized in 2 levels, the first level always has a fixed -# numer of 16 directories. -# Minimum value: 0, maximum value: 8, default value: 8. -# This tag requires that the tag CREATE_SUBDIRS is set to YES. - -CREATE_SUBDIRS_LEVEL = 8 - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian, -# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English -# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek, -# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with -# English messages), Korean, Korean-en (Korean with English messages), Latvian, -# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, -# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, -# Swedish, Turkish, Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = @PROJECT_SOURCE_DIR@/include \ - @PROJECT_SOURCE_DIR@ - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = . - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line -# such as -# /*************** -# as being the beginning of a Javadoc-style comment "banner". If set to NO, the -# Javadoc-style will behave just like regular comments and it will not be -# interpreted by doxygen. -# The default value is: NO. - -JAVADOC_BANNER = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# By default Python docstrings are displayed as preformatted text and doxygen's -# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the -# doxygen's special commands can be used and the contents of the docstring -# documentation blocks is shown as doxygen documentation. -# The default value is: YES. - -PYTHON_DOCSTRING = YES - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:^^" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". Note that you cannot put \n's in the value part of an alias -# to insert newlines (in the resulting output). You can put ^^ in the value part -# of an alias to insert a newline as if a physical newline was in the original -# file. When you need a literal { or } or , in the value part of an alias you -# have to escape them by means of a backslash (\), this can lead to conflicts -# with the commands \{ and \} for these it is advised to use the version @{ and -# @} or use a double escape (\\{ and \\}) - -ALIASES = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice -# sources only. Doxygen will then generate output that is more tailored for that -# language. For instance, namespaces will be presented as modules, types will be -# separated into more groups, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_SLICE = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, -# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, -# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: -# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser -# tries to guess whether the code is fixed or free formatted code, this is the -# default for Fortran type files). For instance to make doxygen treat .inc files -# as Fortran files (default is PHP), and .f files as C (default is Fortran), -# use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. When specifying no_extension you should add -# * to the FILE_PATTERNS. -# -# Note see also the list of default file extension mappings. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See https://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up -# to that level are automatically included in the table of contents, even if -# they do not have an id attribute. -# Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 5. -# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. - -TOC_INCLUDE_HEADINGS = 5 - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = YES - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = YES - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use -# during processing. When set to 0 doxygen will based this on the number of -# cores available in the system. You can set it explicitly to a value larger -# than 0 to get more control over the balance between CPU load and processing -# speed. At this moment only the input processing can be done using multiple -# threads. Since this is still an experimental feature the default is set to 1, -# which effectively disables parallel processing. Please report any issues you -# encounter. Generating dot graphs in parallel is controlled by the -# DOT_NUM_THREADS setting. -# Minimum value: 0, maximum value: 32, default value: 1. - -NUM_PROC_THREADS = 1 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual -# methods of a class will be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIV_VIRTUAL = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If this flag is set to YES, the name of an unnamed parameter in a declaration -# will be determined by the corresponding definition. By default unnamed -# parameters remain unnamed in the output. -# The default value is: YES. - -RESOLVE_UNNAMED_PARAMS = YES - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = YES - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = YES - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# declarations. If set to NO, these declarations will be included in the -# documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# With the correct setting of option CASE_SENSE_NAMES doxygen will better be -# able to match the capabilities of the underlying filesystem. In case the -# filesystem is case sensitive (i.e. it supports files in the same directory -# whose names only differ in casing), the option must be set to YES to properly -# deal with such files in case they appear in the input. For filesystems that -# are not case sensitive the option should be set to NO to properly deal with -# output files written for symbols that only differ in casing, such as for two -# classes, one named CLASS and the other named Class, and to also support -# references to files without having to specify the exact matching casing. On -# Windows (including Cygwin) and MacOS, users should typically set this option -# to NO, whereas on Linux or other Unix flavors it should typically be set to -# YES. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class -# will show which file needs to be included to use the class. -# The default value is: YES. - -SHOW_HEADERFILE = YES - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = YES - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = YES - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = NO - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = NO - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. See also section "Changing the -# layout of pages" for information. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = doc/DoxygenLayout.xml - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as documenting some parameters in -# a documented function twice, or documenting parameters that don't exist or -# using markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete -# function parameter documentation. If set to NO, doxygen will accept that some -# parameters have no documentation without warning. -# The default value is: YES. - -WARN_IF_INCOMPLETE_DOC = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong parameter -# documentation, but not about the absence of documentation. If EXTRACT_ALL is -# set to YES then this flag will automatically be disabled. See also -# WARN_IF_INCOMPLETE_DOC -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS -# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but -# at the end of the doxygen process doxygen will return with a non-zero status. -# Possible values are: NO, YES and FAIL_ON_WARNINGS. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# See also: WARN_LINE_FORMAT -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# In the $text part of the WARN_FORMAT command it is possible that a reference -# to a more specific place is given. To make it easier to jump to this place -# (outside of doxygen) the user can define a custom "cut" / "paste" string. -# Example: -# WARN_LINE_FORMAT = "'vi $file +$line'" -# See also: WARN_FORMAT -# The default value is: at line $line of file $file. - -WARN_LINE_FORMAT = "at line $line of file $file" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). In case the file specified cannot be opened for writing the -# warning and error messages are written to standard error. When as file - is -# specified the warning and error messages are written to standard output -# (stdout). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = include examples README.md - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: -# https://www.gnu.org/software/libiconv/) for the list of possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# Note the list of default checked file patterns might differ from the list of -# default file extension mappings. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, -# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C -# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, -# *.vhdl, *.ucf, *.qsf and *.ice. - -FILE_PATTERNS = *.hpp \ - *.cpp - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# ANamespace::AClass, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = std - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = *.cpp - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = README.md - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# entity all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see https://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the -# clang parser (see: -# http://clang.llvm.org/) for more accurate parsing at the cost of reduced -# performance. This can be particularly helpful with template rich C++ code for -# which doxygen's built-in parser lacks the necessary type information. -# Note: The availability of this option depends on whether or not doxygen was -# generated with the -Duse_libclang=ON option for CMake. -# The default value is: NO. - -CLANG_ASSISTED_PARSING = NO - -# If the CLANG_ASSISTED_PARSING tag is set to YES and the CLANG_ADD_INC_PATHS -# tag is set to YES then doxygen will add the directory of each input to the -# include path. -# The default value is: YES. -# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. - -CLANG_ADD_INC_PATHS = YES - -# If clang assisted parsing is enabled you can provide the compiler with command -# line options that you would normally use when invoking the compiler. Note that -# the include paths will already be set by doxygen for the files and directories -# specified with INPUT and INCLUDE_PATH. -# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. - -CLANG_OPTIONS = - -# If clang assisted parsing is enabled you can provide the clang parser with the -# path to the directory containing a file called compile_commands.json. This -# file is the compilation database (see: -# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the -# options used when the source files were built. This is equivalent to -# specifying the -p option to a clang tool, such as clang-check. These options -# will then be passed to the parser. Any options specified with CLANG_OPTIONS -# will be added as well. -# Note: The availability of this option depends on whether or not doxygen was -# generated with the -Duse_libclang=ON option for CMake. - -CLANG_DATABASE_PATH = - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = . - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = doc/doxygen-awesome.css doc/doxygen-awesome-sidebar-only.css - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a color-wheel, see -# https://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use gray-scales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML -# documentation will contain a main index with vertical navigation menus that -# are dynamically created via JavaScript. If disabled, the navigation index will -# consists of multiple levels of tabs that are statically embedded in every HTML -# page. Disable this option to support browsers that do not have JavaScript, -# like the Qt help browser. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_MENUS = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: -# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To -# create a documentation set, doxygen will generate a Makefile in the HTML -# output directory. Running make will produce the docset in that directory and -# running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy -# genXcode/_index.html for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag determines the URL of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDURL = - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# on Windows. In the beginning of 2021 Microsoft took the original page, with -# a.o. the download links, offline the HTML help workshop was already many years -# in maintenance mode). You can download the HTML help workshop from the web -# archives at Installation executable (see: -# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo -# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the main .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location (absolute path -# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to -# run qhelpgenerator on the generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine tune the look of the index (see "Fine-tuning the output"). As an -# example, the default style sheet generated by doxygen has an example that -# shows how to put an image at the root of the tree instead of the PROJECT_NAME. -# Since the tree basically has the same information as the tab index, you could -# consider setting DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = YES - -# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the -# FULL_SIDEBAR option determines if the side bar is limited to only the treeview -# area (value NO) or if it should extend to the full height of the window (value -# YES). Setting this to YES gives a layout similar to -# https://docs.readthedocs.io with more room for contents, but less room for the -# project logo, title, and description. If either GENERATE_TREEVIEW or -# DISABLE_INDEX is set to NO, this option has no effect. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FULL_SIDEBAR = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 0 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email -# addresses. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -OBFUSCATE_EMAILS = YES - -# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg -# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see -# https://inkscape.org) to generate formulas as SVG images instead of PNGs for -# the HTML output. These images will generally look nicer at scaled resolutions. -# Possible values are: png (the default) and svg (looks nicer but requires the -# pdf2svg or inkscape tool). -# The default value is: png. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FORMULA_FORMAT = png - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANSPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands -# to create new LaTeX commands to be used in formulas as building blocks. See -# the section "Including formulas" for details. - -FORMULA_MACROFILE = - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# https://www.mathjax.org) which uses client side JavaScript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = YES - -# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. -# Note that the different versions of MathJax have different requirements with -# regards to the different settings, so it is possible that also other MathJax -# settings have to be changed when switching between the different MathJax -# versions. -# Possible values are: MathJax_2 and MathJax_3. -# The default value is: MathJax_2. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_VERSION = MathJax_2 - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. For more details about the output format see MathJax -# version 2 (see: -# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 -# (see: -# http://docs.mathjax.org/en/latest/web/components/output.html). -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility. This is the name for Mathjax version 2, for MathJax version 3 -# this will be translated into chtml), NativeMML (i.e. MathML. Only supported -# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This -# is the name for Mathjax version 3, for MathJax version 2 this will be -# translated into HTML-CSS) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from https://www.mathjax.org before deployment. The default value is: -# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 -# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/ - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# for MathJax version 2 (see -# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# For example for MathJax version 3 (see -# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): -# MATHJAX_EXTENSIONS = ams -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: -# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /