diff --git a/libs/core/synchronization/include/hpx/synchronization/condition_variable.hpp b/libs/core/synchronization/include/hpx/synchronization/condition_variable.hpp index d29c9ab67779..420b2da18efc 100644 --- a/libs/core/synchronization/include/hpx/synchronization/condition_variable.hpp +++ b/libs/core/synchronization/include/hpx/synchronization/condition_variable.hpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -158,7 +157,7 @@ namespace hpx { /// /// \returns \a notify_one returns \a void. /// - void notify_one(error_code& ec = throws) + void notify_one(error_code& ec = throws) const { std::unique_lock l(data_->mtx_); data_->cond_.notify_one(HPX_MOVE(l), ec); @@ -173,7 +172,7 @@ namespace hpx { /// /// \returns \a notify_all returns \a void. /// - void notify_all(error_code& ec = throws) + void notify_all(error_code& ec = throws) const { std::unique_lock l(data_->mtx_); data_->cond_.notify_all(HPX_MOVE(l), ec); @@ -214,8 +213,7 @@ namespace hpx { auto const data = data_; // keep data alive - util::ignore_all_while_checking const ignore_lock; - HPX_UNUSED(ignore_lock); + [[maybe_unused]] util::ignore_all_while_checking const ignore_lock; std::unique_lock l(data->mtx_); unlock_guard> unlock(lock); @@ -321,8 +319,7 @@ namespace hpx { auto const data = data_; // keep data alive - util::ignore_all_while_checking const ignore_lock; - HPX_UNUSED(ignore_lock); + [[maybe_unused]] util::ignore_all_while_checking const ignore_lock; std::unique_lock l(data->mtx_); unlock_guard> unlock(lock); @@ -620,7 +617,7 @@ namespace hpx { /// /// \returns \a notify_one returns \a void. /// - void notify_one(error_code& ec = throws) + void notify_one(error_code& ec = throws) const { std::unique_lock l(data_->mtx_); data_->cond_.notify_one(HPX_MOVE(l), ec); @@ -651,7 +648,7 @@ namespace hpx { /// /// \returns \a notify_all returns \a void. /// - void notify_all(error_code& ec = throws) + void notify_all(error_code& ec = throws) const { std::unique_lock l(data_->mtx_); data_->cond_.notify_all(HPX_MOVE(l), ec); @@ -702,8 +699,7 @@ namespace hpx { auto const data = data_; // keep data alive - util::ignore_all_while_checking const ignore_lock; - HPX_UNUSED(ignore_lock); + [[maybe_unused]] util::ignore_all_while_checking const ignore_lock; std::unique_lock l(data->mtx_); unlock_guard unlock(lock); @@ -819,8 +815,7 @@ namespace hpx { auto const data = data_; // keep data alive - util::ignore_all_while_checking const ignore_lock; - HPX_UNUSED(ignore_lock); + [[maybe_unused]] util::ignore_all_while_checking const ignore_lock; std::unique_lock l(data->mtx_); unlock_guard unlock(lock); @@ -1069,14 +1064,14 @@ namespace hpx { while (!pred()) { - util::ignore_all_while_checking const ignore_lock; - HPX_UNUSED(ignore_lock); + [[maybe_unused]] util::ignore_all_while_checking const + ignore_lock; std::unique_lock l(data->mtx_); if (stoken.stop_requested()) { // pred() has already evaluated to false since we last - // a acquired lock + // an acquired lock return false; } @@ -1164,8 +1159,8 @@ namespace hpx { { bool should_stop; { - util::ignore_all_while_checking const ignore_lock; - HPX_UNUSED(ignore_lock); + [[maybe_unused]] util::ignore_all_while_checking const + ignore_lock; std::unique_lock l(data->mtx_); if (stoken.stop_requested()) diff --git a/libs/core/synchronization/include/hpx/synchronization/detail/condition_variable.hpp b/libs/core/synchronization/include/hpx/synchronization/detail/condition_variable.hpp index d52159055602..169ac7e32439 100644 --- a/libs/core/synchronization/include/hpx/synchronization/detail/condition_variable.hpp +++ b/libs/core/synchronization/include/hpx/synchronization/detail/condition_variable.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2007-2022 Hartmut Kaiser +// Copyright (c) 2007-2023 Hartmut Kaiser // Copyright (c) 2013-2015 Agustin Berge // // SPDX-License-Identifier: BSL-1.0 @@ -55,17 +55,31 @@ namespace hpx::lcos::local::detail { // Return false if no more threads are waiting (returns true if queue is // non-empty). - HPX_CORE_EXPORT bool notify_one(std::unique_lock lock, - threads::thread_priority priority, error_code& ec = throws); + HPX_CORE_EXPORT bool notify_one(std::unique_lock& lock, + threads::thread_priority priority, bool unlock, + error_code& ec = throws); HPX_CORE_EXPORT void notify_all(std::unique_lock lock, threads::thread_priority priority, error_code& ec = throws); + bool notify_one(std::unique_lock lock, + threads::thread_priority priority, error_code& ec = throws) + { + return notify_one(lock, priority, true, ec); + } + bool notify_one( std::unique_lock lock, error_code& ec = throws) { return notify_one( - HPX_MOVE(lock), threads::thread_priority::default_, ec); + lock, threads::thread_priority::default_, true, ec); + } + + bool notify_one_no_unlock( + std::unique_lock& lock, error_code& ec = throws) + { + return notify_one( + lock, threads::thread_priority::default_, false, ec); } void notify_all( diff --git a/libs/core/synchronization/include/hpx/synchronization/shared_mutex.hpp b/libs/core/synchronization/include/hpx/synchronization/shared_mutex.hpp index 24beb12e22db..1b3d5105ac93 100644 --- a/libs/core/synchronization/include/hpx/synchronization/shared_mutex.hpp +++ b/libs/core/synchronization/include/hpx/synchronization/shared_mutex.hpp @@ -11,226 +11,570 @@ #pragma once #include -#include -#include - +#include +#include +#include +#include +#include + +#include +#include #include -namespace hpx { +namespace hpx::detail { - namespace detail { + /////////////////////////////////////////////////////////////////////////// + template + struct shared_mutex_data + { + using mutex_type = Mutex; - template - class shared_mutex + HPX_HOST_DEVICE_CONSTEXPR shared_mutex_data() noexcept + : count_(1) { - private: - using mutex_type = Mutex; + } + + struct state_data + { + std::uint32_t shared_count; + std::uint8_t tag; // ABA protection + bool exclusive; + bool upgrade; + bool exclusive_waiting_blocked; + }; - struct state_data + struct shared_state + { + union { - unsigned shared_count; - bool exclusive; - bool upgrade; - bool exclusive_waiting_blocked; + std::uint64_t value = 0; + state_data data; }; - state_data state; - mutex_type state_change; - hpx::condition_variable shared_cond; - hpx::condition_variable exclusive_cond; - hpx::condition_variable upgrade_cond; + shared_state() = default; + }; - void release_waiters() - { - exclusive_cond.notify_one(); - shared_cond.notify_all(); - } + util::cache_aligned_data_derived> state; - public: - shared_mutex() - : state{0u, false, false, false} - { - } + using condition_variable = lcos::local::detail::condition_variable; - void lock_shared() - { - std::unique_lock lk(state_change); + util::cache_aligned_data_derived state_change; + util::cache_aligned_data_derived shared_cond; + util::cache_aligned_data_derived exclusive_cond; + util::cache_aligned_data_derived upgrade_cond; + + void release_waiters(std::unique_lock& lk) + { + exclusive_cond.notify_one_no_unlock(lk); + shared_cond.notify_all(HPX_MOVE(lk)); + } + + bool set_state(shared_state& s1, shared_state& s) noexcept + { + ++s.data.tag; + return s1.value == state.load(std::memory_order_relaxed).value && + state.compare_exchange_strong(s1, s, std::memory_order_release); + } + + bool set_state(shared_state& s1, shared_state& s, + std::unique_lock& lk) noexcept + { + if (s1.value != state.load(std::memory_order_relaxed).value) + return false; + + ++s.data.tag; - while (state.exclusive || state.exclusive_waiting_blocked) + lk = std::unique_lock(state_change); + if (state.compare_exchange_strong(s1, s, std::memory_order_release)) + return true; + + lk.unlock(); + return false; + } + + void lock_shared() + { + while (true) + { + auto s = state.load(std::memory_order_acquire); + while (s.data.exclusive || s.data.exclusive_waiting_blocked) { - shared_cond.wait(lk); + { + std::unique_lock lk(state_change); + shared_cond.wait(lk); + } + + s = state.load(std::memory_order_acquire); } - ++state.shared_count; + auto s1 = s; + + ++s.data.shared_count; + if (set_state(s1, s)) + { + break; + } } + } - bool try_lock_shared() + bool try_lock_shared() + { + while (true) { - std::unique_lock lk(state_change); - - if (state.exclusive || state.exclusive_waiting_blocked) + auto s = state.load(std::memory_order_acquire); + if (s.data.exclusive || s.data.exclusive_waiting_blocked) + { return false; + } - else + auto s1 = s; + + ++s.data.shared_count; + if (set_state(s1, s)) { - ++state.shared_count; - return true; + break; } } + return true; + } - void unlock_shared() + void unlock_shared() + { + while (true) { - std::unique_lock lk(state_change); + auto s = state.load(std::memory_order_acquire); + auto s1 = s; - if (/*bool const last_reader = */ !--state.shared_count) + if (--s.data.shared_count == 0) { - if (state.upgrade) + if (s.data.upgrade) { - state.upgrade = false; - state.exclusive = true; - - upgrade_cond.notify_one(); + s.data.upgrade = false; + s.data.exclusive = true; + + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + upgrade_cond.notify_one_no_unlock(lk); + release_waiters(lk); + break; + } } else { - state.exclusive_waiting_blocked = false; + s.data.exclusive_waiting_blocked = false; + + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + release_waiters(lk); + break; + } } - - release_waiters(); + } + else if (set_state(s1, s)) + { + break; } } + } - void lock() + void lock() + { + while (true) { - std::unique_lock lk(state_change); - - while (state.shared_count || state.exclusive) + auto s = state.load(std::memory_order_acquire); + while (s.data.shared_count != 0 || s.data.exclusive) { - state.exclusive_waiting_blocked = true; - exclusive_cond.wait(lk); + auto s1 = s; + + s.data.exclusive_waiting_blocked = true; + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + exclusive_cond.wait(lk); + } + + s = state.load(std::memory_order_acquire); } - state.exclusive = true; + auto s1 = s; + + s.data.exclusive = true; + if (set_state(s1, s)) + { + break; + } } + } - bool try_lock() + bool try_lock() + { + while (true) { - std::unique_lock lk(state_change); - - if (state.shared_count || state.exclusive) + auto s = state.load(std::memory_order_acquire); + if (s.data.shared_count || s.data.exclusive) + { return false; + } - else + auto s1 = s; + + s.data.exclusive = true; + if (set_state(s1, s)) { - state.exclusive = true; - return true; + break; } } + return true; + } - void unlock() + void unlock() + { + while (true) { - std::unique_lock lk(state_change); - state.exclusive = false; - state.exclusive_waiting_blocked = false; - release_waiters(); + auto s = state.load(std::memory_order_acquire); + auto s1 = s; + + s.data.exclusive = false; + s.data.exclusive_waiting_blocked = false; + + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + release_waiters(lk); + break; + } } + } - void lock_upgrade() + void lock_upgrade() + { + while (true) { - std::unique_lock lk(state_change); - - while (state.exclusive || state.exclusive_waiting_blocked || - state.upgrade) + auto s = state.load(std::memory_order_acquire); + while (s.data.exclusive || s.data.exclusive_waiting_blocked || + s.data.upgrade) { - shared_cond.wait(lk); + { + std::unique_lock lk(state_change); + shared_cond.wait(lk); + } + + s = state.load(std::memory_order_acquire); } - ++state.shared_count; - state.upgrade = true; + auto s1 = s; + + ++s.data.shared_count = true; + s.data.upgrade = true; + if (set_state(s1, s)) + { + break; + } } + } - bool try_lock_upgrade() + bool try_lock_upgrade() + { + while (true) { - std::unique_lock lk(state_change); - - if (state.exclusive || state.exclusive_waiting_blocked || - state.upgrade) + auto s = state.load(std::memory_order_acquire); + if (s.data.exclusive || s.data.exclusive_waiting_blocked || + s.data.upgrade) { return false; } - ++state.shared_count; - state.upgrade = true; - return true; + auto s1 = s; + + ++s.data.shared_count; + s.data.upgrade = true; + if (set_state(s1, s)) + { + break; + } } + return true; + } - void unlock_upgrade() + void unlock_upgrade() + { + while (true) { - std::unique_lock lk(state_change); - state.upgrade = false; + auto s = state.load(std::memory_order_acquire); + auto s1 = s; + + bool release = false; + s.data.upgrade = false; + if (--s.data.shared_count == 0) + { + s.data.exclusive_waiting_blocked = false; + release = true; + } - if (/*bool const last_reader = */ !--state.shared_count) + if (release) { - state.exclusive_waiting_blocked = false; - release_waiters(); + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + release_waiters(lk); + break; + } + } + else if (set_state(s1, s)) + { + break; } } + } - void unlock_upgrade_and_lock() + void unlock_upgrade_and_lock() + { + while (true) { - std::unique_lock lk(state_change); - --state.shared_count; + auto s = state.load(std::memory_order_acquire); + auto s1 = s; - while (state.shared_count) + --s.data.shared_count; + if (!set_state(s1, s)) { - upgrade_cond.wait(lk); + continue; + } + + s = state.load(std::memory_order_acquire); + while (s.data.shared_count != 0) + { + { + std::unique_lock lk(state_change); + upgrade_cond.wait(lk); + } + s = state.load(std::memory_order_acquire); } - state.upgrade = false; - state.exclusive = true; + s1 = s; + + s.data.upgrade = false; + s.data.exclusive = true; + if (set_state(s1, s)) + { + break; + } } + } - void unlock_and_lock_upgrade() + void unlock_and_lock_upgrade() + { + while (true) { - std::unique_lock lk(state_change); - state.exclusive = false; - state.upgrade = true; - ++state.shared_count; - state.exclusive_waiting_blocked = false; - release_waiters(); + auto s = state.load(std::memory_order_acquire); + auto s1 = s; + + s.data.exclusive = false; + s.data.exclusive_waiting_blocked = false; + s.data.upgrade = true; + ++s.data.shared_count; + + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + release_waiters(lk); + break; + } } + } - void unlock_and_lock_shared() + void unlock_and_lock_shared() + { + while (true) { - std::unique_lock lk(state_change); - state.exclusive = false; - ++state.shared_count; - state.exclusive_waiting_blocked = false; - release_waiters(); + auto s = state.load(std::memory_order_acquire); + auto s1 = s; + + s.data.exclusive = false; + s.data.exclusive_waiting_blocked = false; + ++s.data.shared_count; + + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + release_waiters(lk); + break; + } } + } - bool try_unlock_shared_and_lock() + bool try_unlock_shared_and_lock() + { + while (true) { - std::unique_lock lk(state_change); - if (!state.exclusive && !state.exclusive_waiting_blocked && - !state.upgrade && state.shared_count == 1) + auto s = state.load(std::memory_order_acquire); + if (s.data.exclusive || s.data.exclusive_waiting_blocked || + s.data.upgrade || s.data.shared_count == 1) + { + return false; + } + + auto s1 = s; + + s.data.shared_count = 0; + s.data.exclusive = true; + if (set_state(s1, s)) { - state.shared_count = 0; - state.exclusive = true; - return true; + break; } - return false; } + return true; + } - void unlock_upgrade_and_lock_shared() + void unlock_upgrade_and_lock_shared() + { + while (true) { - std::unique_lock lk(state_change); - state.upgrade = false; - state.exclusive_waiting_blocked = false; - release_waiters(); + auto s = state.load(std::memory_order_acquire); + auto s1 = s; + + s.data.exclusive_waiting_blocked = false; + s.data.upgrade = false; + + std::unique_lock lk; + if (set_state(s1, s, lk)) + { + HPX_ASSERT_OWNS_LOCK(lk); + release_waiters(lk); + break; + } } - }; - } // namespace detail + } + + private: + friend void intrusive_ptr_add_ref(shared_mutex_data* p) noexcept + { + ++p->count_; + } + + friend void intrusive_ptr_release(shared_mutex_data* p) noexcept + { + if (0 == --p->count_) + { + delete p; + } + } + + hpx::util::atomic_count count_; + }; + + template + class shared_mutex + { + private: + using mutex_type = Mutex; + + using data_type = hpx::intrusive_ptr>; + hpx::util::cache_aligned_data_derived data_; + + using shared_state = typename shared_mutex_data::shared_state; + + public: + shared_mutex() + : data_(new shared_mutex_data, false) + { + } + + void lock_shared() + { + auto data = data_; + data->lock_shared(); + } + + bool try_lock_shared() + { + auto data = data_; + return data->try_lock_shared(); + } + + void unlock_shared() + { + auto data = data_; + data->unlock_shared(); + } + + void lock() + { + auto data = data_; + data->lock(); + } + + bool try_lock() + { + auto data = data_; + return data->try_lock(); + } + + void unlock() + { + auto data = data_; + data->unlock(); + } + + void lock_upgrade() + { + auto data = data_; + data->lock_upgrade(); + } + + bool try_lock_upgrade() + { + auto data = data_; + return data->try_lock_upgrade(); + } + + void unlock_upgrade() + { + auto data = data_; + data->unlock_upgrade(); + } + + void unlock_upgrade_and_lock() + { + auto data = data_; + data->unlock_upgrade_and_lock(); + } + + void unlock_and_lock_upgrade() + { + auto data = data_; + data->unlock_and_lock_upgrade(); + } + + void unlock_and_lock_shared() + { + auto data = data_; + data->unlock_and_lock_shared(); + } + + bool try_unlock_shared_and_lock() + { + auto data = data_; + return data->try_unlock_shared_and_lock(); + } + + void unlock_upgrade_and_lock_shared() + { + auto data = data_; + data->unlock_upgrade_and_lock_shared(); + } + }; +} // namespace hpx::detail + +namespace hpx { /// The \a shared_mutex class is a synchronization primitive that can be /// used to protect shared data from being simultaneously accessed by diff --git a/libs/core/synchronization/src/detail/condition_variable.cpp b/libs/core/synchronization/src/detail/condition_variable.cpp index 7b86274f2af0..3db9fa72b0c6 100644 --- a/libs/core/synchronization/src/detail/condition_variable.cpp +++ b/libs/core/synchronization/src/detail/condition_variable.cpp @@ -108,8 +108,8 @@ namespace hpx::lcos::local::detail { // Return false if no more threads are waiting (returns true if queue // is non-empty). - bool condition_variable::notify_one(std::unique_lock lock, - threads::thread_priority /* priority */, error_code& ec) + bool condition_variable::notify_one(std::unique_lock& lock, + threads::thread_priority priority, bool unlock, error_code& ec) { // Caller failing to hold lock 'lock' before calling function #if defined(HPX_MSVC) @@ -138,7 +138,8 @@ namespace hpx::lcos::local::detail { } bool const not_empty = !queue_.empty(); - lock.unlock(); + if (unlock) + lock.unlock(); ctx.resume(); @@ -148,6 +149,9 @@ namespace hpx::lcos::local::detail { if (&ec != &throws) ec = make_success_code(); + if (unlock) + lock.unlock(); + return false; #if defined(HPX_MSVC) diff --git a/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex1.cpp b/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex1.cpp index a36f3cf9d3af..9994085ce797 100644 --- a/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex1.cpp +++ b/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex1.cpp @@ -31,24 +31,25 @@ void test_multiple_readers() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; - unsigned const number_of_threads = 10; + constexpr unsigned number_of_threads = 10; test::thread_group pool; - hpx::shared_mutex rw_mutex; - unsigned unblocked_count = 0; - unsigned simultaneous_running_count = 0; unsigned max_simultaneous_running = 0; mutex_type unblocked_count_mutex; - hpx::condition_variable unblocked_condition; mutex_type finish_mutex; std::unique_lock finish_lock(finish_mutex); try { + hpx::shared_mutex rw_mutex; + unsigned unblocked_count = 0; + unsigned simultaneous_running_count = 0; + hpx::condition_variable unblocked_condition; + for (unsigned i = 0; i != number_of_threads; ++i) { pool.create_thread( @@ -86,24 +87,25 @@ void test_multiple_readers() void test_only_one_writer_permitted() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; - unsigned const number_of_threads = 10; + constexpr unsigned number_of_threads = 10; test::thread_group pool; - hpx::shared_mutex rw_mutex; unsigned unblocked_count = 0; - unsigned simultaneous_running_count = 0; unsigned max_simultaneous_running = 0; mutex_type unblocked_count_mutex; - hpx::condition_variable unblocked_condition; mutex_type finish_mutex; std::unique_lock finish_lock(finish_mutex); try { + hpx::shared_mutex rw_mutex; + unsigned simultaneous_running_count = 0; + hpx::condition_variable unblocked_condition; + for (unsigned i = 0; i != number_of_threads; ++i) { pool.create_thread( @@ -135,22 +137,23 @@ void test_only_one_writer_permitted() void test_reader_blocks_writer() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; test::thread_group pool; - hpx::shared_mutex rw_mutex; unsigned unblocked_count = 0; - unsigned simultaneous_running_count = 0; unsigned max_simultaneous_running = 0; mutex_type unblocked_count_mutex; - hpx::condition_variable unblocked_condition; mutex_type finish_mutex; std::unique_lock finish_lock(finish_mutex); try { + hpx::shared_mutex rw_mutex; + unsigned simultaneous_running_count = 0; + hpx::condition_variable unblocked_condition; + pool.create_thread( test::locking_thread>(rw_mutex, unblocked_count, unblocked_count_mutex, unblocked_condition, @@ -195,25 +198,26 @@ void test_reader_blocks_writer() void test_unlocking_writer_unblocks_all_readers() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; test::thread_group pool; hpx::shared_mutex rw_mutex; std::unique_lock write_lock(rw_mutex); - unsigned unblocked_count = 0; - unsigned simultaneous_running_count = 0; unsigned max_simultaneous_running = 0; mutex_type unblocked_count_mutex; - hpx::condition_variable unblocked_condition; mutex_type finish_mutex; std::unique_lock finish_lock(finish_mutex); - unsigned const reader_count = 10; + constexpr unsigned reader_count = 10; try { + unsigned unblocked_count = 0; + unsigned simultaneous_running_count = 0; + hpx::condition_variable unblocked_condition; + for (unsigned i = 0; i != reader_count; ++i) { pool.create_thread( @@ -257,29 +261,30 @@ void test_unlocking_writer_unblocks_all_readers() void test_unlocking_last_reader_only_unblocks_one_writer() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; test::thread_group pool; - hpx::shared_mutex rw_mutex; unsigned unblocked_count = 0; - unsigned simultaneous_running_readers = 0; unsigned max_simultaneous_readers = 0; - unsigned simultaneous_running_writers = 0; unsigned max_simultaneous_writers = 0; mutex_type unblocked_count_mutex; - hpx::condition_variable unblocked_condition; mutex_type finish_reading_mutex; std::unique_lock finish_reading_lock(finish_reading_mutex); mutex_type finish_writing_mutex; std::unique_lock finish_writing_lock(finish_writing_mutex); - unsigned const reader_count = 10; - unsigned const writer_count = 10; + constexpr unsigned reader_count = 10; + constexpr unsigned writer_count = 10; try { + hpx::shared_mutex rw_mutex; + unsigned simultaneous_running_readers = 0; + unsigned simultaneous_running_writers = 0; + hpx::condition_variable unblocked_condition; + for (unsigned i = 0; i != reader_count; ++i) { pool.create_thread( @@ -360,7 +365,7 @@ int hpx_main() int main(int argc, char* argv[]) { - // By default this test should run on all available cores + // By default, this test should run on all available cores std::vector const cfg = {"hpx.os_threads=all"}; // Initialize and run HPX diff --git a/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex2.cpp b/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex2.cpp index 9a0cf5fe6ed5..b9ee89713411 100644 --- a/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex2.cpp +++ b/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex2.cpp @@ -1,5 +1,5 @@ // (C) Copyright 2006-7 Anthony Williams -// Copyright (c) 2015-2022 Hartmut Kaiser +// Copyright (c) 2015-2023 Hartmut Kaiser // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See @@ -29,24 +29,25 @@ void test_only_one_upgrade_lock_permitted() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; - unsigned const number_of_threads = 2; + constexpr unsigned number_of_threads = 2; test::thread_group pool; - shared_mutex_type rw_mutex; unsigned unblocked_count = 0; - unsigned simultaneous_running_count = 0; unsigned max_simultaneous_running = 0; mutex_type unblocked_count_mutex; - hpx::condition_variable unblocked_condition; mutex_type finish_mutex; std::unique_lock finish_lock(finish_mutex); try { + shared_mutex_type rw_mutex; + unsigned simultaneous_running_count = 0; + hpx::condition_variable unblocked_condition; + for (unsigned i = 0; i != number_of_threads; ++i) { pool.create_thread( @@ -78,24 +79,25 @@ void test_only_one_upgrade_lock_permitted() void test_can_lock_upgrade_if_currently_locked_shared() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; test::thread_group pool; - shared_mutex_type rw_mutex; unsigned unblocked_count = 0; - unsigned simultaneous_running_count = 0; unsigned max_simultaneous_running = 0; mutex_type unblocked_count_mutex; - hpx::condition_variable unblocked_condition; mutex_type finish_mutex; std::unique_lock finish_lock(finish_mutex); - unsigned const reader_count = 10; + constexpr unsigned reader_count = 10; try { + shared_mutex_type rw_mutex; + unsigned simultaneous_running_count = 0; + hpx::condition_variable unblocked_condition; + for (unsigned i = 0; i != reader_count; ++i) { pool.create_thread( @@ -143,18 +145,18 @@ void test_can_lock_upgrade_if_currently_locked_shared() void test_can_lock_upgrade_to_unique_if_currently_locked_upgrade() { - typedef hpx::shared_mutex shared_mutex_type; + using shared_mutex_type = hpx::shared_mutex; shared_mutex_type mtx; hpx::upgrade_lock l(mtx); - hpx::upgrade_to_unique_lock ul(l); + hpx::upgrade_to_unique_lock const ul(l); HPX_TEST(ul.owns_lock()); } void test_if_other_thread_has_write_lock_try_lock_shared_returns_false() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; shared_mutex_type rw_mutex; mutex_type finish_mutex; @@ -181,8 +183,8 @@ void test_if_other_thread_has_write_lock_try_lock_shared_returns_false() void test_if_other_thread_has_write_lock_try_lock_upgrade_returns_false() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; shared_mutex_type rw_mutex; mutex_type finish_mutex; @@ -209,7 +211,7 @@ void test_if_other_thread_has_write_lock_try_lock_upgrade_returns_false() void test_if_no_thread_has_lock_try_lock_shared_returns_true() { - typedef hpx::shared_mutex shared_mutex_type; + using shared_mutex_type = hpx::shared_mutex; shared_mutex_type rw_mutex; bool const try_succeeded = rw_mutex.try_lock_shared(); @@ -222,7 +224,7 @@ void test_if_no_thread_has_lock_try_lock_shared_returns_true() void test_if_no_thread_has_lock_try_lock_upgrade_returns_true() { - typedef hpx::shared_mutex shared_mutex_type; + using shared_mutex_type = hpx::shared_mutex; shared_mutex_type rw_mutex; bool const try_succeeded = rw_mutex.try_lock_upgrade(); @@ -235,8 +237,8 @@ void test_if_no_thread_has_lock_try_lock_upgrade_returns_true() void test_if_other_thread_has_shared_lock_try_lock_shared_returns_true() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; shared_mutex_type rw_mutex; mutex_type finish_mutex; @@ -263,8 +265,8 @@ void test_if_other_thread_has_shared_lock_try_lock_shared_returns_true() void test_if_other_thread_has_shared_lock_try_lock_upgrade_returns_true() { - typedef hpx::shared_mutex shared_mutex_type; - typedef hpx::mutex mutex_type; + using shared_mutex_type = hpx::shared_mutex; + using mutex_type = hpx::mutex; shared_mutex_type rw_mutex; mutex_type finish_mutex; @@ -307,7 +309,7 @@ int hpx_main() int main(int argc, char* argv[]) { - // By default this test should run on all available cores + // By default, this test should run on all available cores std::vector const cfg = {"hpx.os_threads=all"}; // Initialize and run HPX diff --git a/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex_locking_thread.hpp b/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex_locking_thread.hpp index 8e593a57d5ad..f872454ab898 100644 --- a/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex_locking_thread.hpp +++ b/libs/core/synchronization/tests/unit/shared_mutex/shared_mutex_locking_thread.hpp @@ -1,5 +1,5 @@ // (C) Copyright 2008 Anthony Williams -// Copyright (c) 2015-2022 Hartmut Kaiser +// Copyright (c) 2015-2023 Hartmut Kaiser // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See @@ -45,7 +45,7 @@ namespace test { { } - void operator()() + void operator()() const { // acquire lock Lock lock(rw_mutex); @@ -93,7 +93,7 @@ namespace test { { } - void operator()() + void operator()() const { std::unique_lock lk(rwm); { @@ -124,7 +124,7 @@ namespace test { { } - void operator()() + void operator()() const { std::shared_lock lk(rwm); { diff --git a/libs/core/synchronization/tests/unit/shared_mutex/thread_group.hpp b/libs/core/synchronization/tests/unit/shared_mutex/thread_group.hpp index f98a708af8e9..a95781b36eb1 100644 --- a/libs/core/synchronization/tests/unit/shared_mutex/thread_group.hpp +++ b/libs/core/synchronization/tests/unit/shared_mutex/thread_group.hpp @@ -1,5 +1,5 @@ // (C) Copyright 2007-9 Anthony Williams -// Copyright (c) 2015-2022 Hartmut Kaiser +// Copyright (c) 2015-2023 Hartmut Kaiser // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See @@ -27,10 +27,11 @@ #endif namespace test { + class thread_group { private: - typedef hpx::shared_mutex mutex_type; + using mutex_type = hpx::shared_mutex; public: thread_group() {} @@ -40,16 +41,16 @@ namespace test { ~thread_group() { - for (hpx::thread* t : threads) + for (hpx::thread const* t : threads) delete t; } private: - bool is_this_thread_in() + bool is_this_thread_in() const { - hpx::thread::id id = hpx::this_thread::get_id(); + hpx::thread::id const id = hpx::this_thread::get_id(); std::shared_lock guard(mtx_); - for (hpx::thread* t : threads) + for (hpx::thread const* t : threads) { if (t->get_id() == id) return true; @@ -57,14 +58,14 @@ namespace test { return false; } - bool is_thread_in(hpx::thread* thrd) + bool is_thread_in(hpx::thread const* thrd) const { if (!thrd) return false; - hpx::thread::id id = thrd->get_id(); + hpx::thread::id const id = thrd->get_id(); std::shared_lock guard(mtx_); - for (hpx::thread* t : threads) + for (hpx::thread const* t : threads) { if (t->get_id() == id) return true; @@ -93,7 +94,6 @@ namespace test { "thread_group::add_thread", "resource_deadlock_would_occur: trying to add a " "duplicated thread"); - return; }; std::lock_guard guard(mtx_); @@ -101,7 +101,7 @@ namespace test { } } - void remove_thread(hpx::thread* thrd) + void remove_thread(hpx::thread const* thrd) { std::lock_guard guard(mtx_); std::list::iterator const it = @@ -111,14 +111,13 @@ namespace test { threads.erase(it); } - void join_all() + void join_all() const { if (is_this_thread_in()) { HPX_THROW_EXCEPTION(hpx::error::thread_resource_error, "thread_group::join_all", "resource_deadlock_would_occur: trying joining itself"); - return; } std::shared_lock guard(mtx_); @@ -129,7 +128,7 @@ namespace test { } } - void interrupt_all() + void interrupt_all() const { std::shared_lock guard(mtx_); for (hpx::thread* t : threads)