From 2df09c1d5a44ccc7460a522d8477072384349bab Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Sun, 3 May 2026 17:12:10 +0800 Subject: [PATCH 1/6] benchmark plots: improve dimensions --- tools/bench_plot.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/bench_plot.py b/tools/bench_plot.py index c0be685..3e398ca 100755 --- a/tools/bench_plot.py +++ b/tools/bench_plot.py @@ -136,8 +136,8 @@ def plot_group_benchmarks(grouped, out_prefix, testcase_map=None): means = [p[1] for p in pairs_sorted] # Figure sizing: width scales with number of subjects, height fixed - width = max(6, 0.6 * max(1, len(subjects))) - height = 4 + width = max(8, 0.6 * max(1, len(subjects))) + height = 10 # Default plotting behavior: single axes with linear scale but allow handling # of large outliers via symlog or broken axis. Default thresholds can be @@ -238,6 +238,7 @@ def percentile(data, p): gridspec_kw={"height_ratios": heights}, figsize=(max(8, width), max(3, 2 + k * 1.5)), dpi=100, + layout='constrained', ) # Ensure axes is a list @@ -313,11 +314,11 @@ def percentile(data, p): val = means[i] ax.text(i, val + headroom, f"{val:.1f}", ha="center", va="bottom", fontsize=8) - plt.tight_layout() + # plt.tight_layout() else: # No very large outliers -> simple linear plot - fig, ax = plt.subplots(figsize=(width, height), dpi=100) + fig, ax = plt.subplots(figsize=(width, height), dpi=100, layout='constrained') bars = ax.bar(x, means, color="tab:blue", width=0.6) ax.set_xticks(x) ax.set_xticklabels(subjects, rotation=45, ha="right", fontsize=9) @@ -331,7 +332,7 @@ def percentile(data, p): h = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2.0, h + (top * 0.02), f"{val:.3f}", ha="center", va="bottom", fontsize=8) - plt.tight_layout() + # plt.tight_layout() # Output filename: append sanitized testcase name and bench name safe_testcase = testcase_name.replace(" ", "_").replace("/", "_") From 0797f33ab27737ef9e26351ee0d1023824b38fe0 Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Fri, 1 May 2026 20:43:07 +0800 Subject: [PATCH 2/6] use parameters for customization --- CMakeLists.txt | 93 +++-- Readme.md | 79 +++-- include/nova/sync/detail/backoff.hpp | 2 +- .../nova/sync/event/manual_reset_event.hpp | 88 ----- ...event.hpp => parking_auto_reset_event.hpp} | 72 ++-- .../sync/event/parking_manual_reset_event.hpp | 119 +++++++ .../sync/event/timed_auto_reset_event.hpp | 4 +- .../sync/event/timed_manual_reset_event.hpp | 6 +- include/nova/sync/mutex/eventfd_mutex.hpp | 49 ++- include/nova/sync/mutex/fast_mutex.hpp | 121 ------- include/nova/sync/mutex/kqueue_mutex.hpp | 56 ++- .../nova/sync/mutex/native_async_mutex.hpp | 8 +- include/nova/sync/mutex/parking_mutex.hpp | 228 +++++++++++++ include/nova/sync/mutex/policies.hpp | 87 +++++ include/nova/sync/mutex/pthread_mutex.hpp | 253 ++++++++++++++ include/nova/sync/mutex/pthread_rt_mutex.hpp | 174 ---------- .../sync/mutex/recursive_spinlock_mutex.hpp | 84 ----- .../nova/sync/mutex/shared_spinlock_mutex.hpp | 102 ------ include/nova/sync/mutex/spinlock_mutex.hpp | 319 ++++++++++++++++-- .../{fair_mutex.hpp => ticket_mutex.hpp} | 84 +++-- .../mutex/win32_critical_section_mutex.hpp | 79 ++++- .../nova/sync/semaphore/fast_semaphore.hpp | 171 ---------- .../nova/sync/semaphore/parking_semaphore.hpp | 240 +++++++++++++ source/nova/sync/futex/atomic_wait.cpp | 60 +++- source/nova/sync/mutex/eventfd_mutex.cpp | 73 ++-- source/nova/sync/mutex/kqueue_mutex.cpp | 41 +-- source/nova/sync/mutex/parking_mutex.cpp | 143 ++++++++ .../sync/mutex/recursive_spinlock_mutex.cpp | 31 -- .../nova/sync/mutex/shared_spinlock_mutex.cpp | 70 ---- source/nova/sync/mutex/spinlock_mutex.cpp | 156 ++++++++- source/nova/sync/mutex/ticket_mutex.cpp | 47 +++ test/event_benchmarks.cpp | 4 +- test/event_test.cpp | 4 +- test/mutex_benchmarks.cpp | 6 +- test/mutex_test.cpp | 47 ++- test/mutex_thread_safety_test.cpp | 58 ++-- test/mutex_types.hpp | 191 +++++------ test/negative/acquired_after_wrong_order.cpp | 6 +- test/negative/double_lock_non_reentrant.cpp | 6 +- test/negative/excludes_while_holding.cpp | 6 +- test/negative/guarded_by_without_lock.cpp | 6 +- test/negative/pt_guarded_by_without_lock.cpp | 6 +- test/negative/release_without_lock.cpp | 4 +- test/negative/requires_without_lock.cpp | 6 +- test/negative/shared_lock_write_guarded.cpp | 4 +- test/policy_mutex_test.cpp | 68 ++++ test/semaphore_test.cpp | 2 +- test/semaphore_types.hpp | 2 +- 48 files changed, 2251 insertions(+), 1314 deletions(-) delete mode 100644 include/nova/sync/event/manual_reset_event.hpp rename include/nova/sync/event/{auto_reset_event.hpp => parking_auto_reset_event.hpp} (57%) create mode 100644 include/nova/sync/event/parking_manual_reset_event.hpp delete mode 100644 include/nova/sync/mutex/fast_mutex.hpp create mode 100644 include/nova/sync/mutex/parking_mutex.hpp create mode 100644 include/nova/sync/mutex/policies.hpp create mode 100644 include/nova/sync/mutex/pthread_mutex.hpp delete mode 100644 include/nova/sync/mutex/pthread_rt_mutex.hpp delete mode 100644 include/nova/sync/mutex/recursive_spinlock_mutex.hpp delete mode 100644 include/nova/sync/mutex/shared_spinlock_mutex.hpp rename include/nova/sync/mutex/{fair_mutex.hpp => ticket_mutex.hpp} (51%) delete mode 100644 include/nova/sync/semaphore/fast_semaphore.hpp create mode 100644 include/nova/sync/semaphore/parking_semaphore.hpp create mode 100644 source/nova/sync/mutex/parking_mutex.cpp delete mode 100644 source/nova/sync/mutex/recursive_spinlock_mutex.cpp delete mode 100644 source/nova/sync/mutex/shared_spinlock_mutex.cpp create mode 100644 source/nova/sync/mutex/ticket_mutex.cpp create mode 100644 test/policy_mutex_test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 786bf52..82fc215 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,30 @@ cmake_minimum_required(VERSION 3.25) project(nova_sync VERSION 0.1.0 LANGUAGES CXX C) + +######################################################################################################################## + +function(nova_sync_install_pmr) + if (NOT COMMAND CPMAddPackage) + set(CPM_DOWNLOAD_VERSION 0.42.1) + if(NOT EXISTS "${CMAKE_BINARY_DIR}/cmake/CPM.cmake") + message(STATUS "Downloading CPM.cmake ${CPM_DOWNLOAD_VERSION}...") + file(DOWNLOAD + "https://github.com/cpm-cmake/CPM.cmake/releases/download/v${CPM_DOWNLOAD_VERSION}/CPM.cmake" + "${CMAKE_BINARY_DIR}/cmake/CPM.cmake" + STATUS _cpm_download_status + TLS_VERIFY ON + ) + list(GET _cpm_download_status 0 _cpm_download_error) + if(_cpm_download_error) + list(GET _cpm_download_status 1 _cpm_download_message) + message(FATAL_ERROR "Failed to download CPM.cmake: ${_cpm_download_message}") + endif() + endif() + include("${CMAKE_BINARY_DIR}/cmake/CPM.cmake") + endif() +endfunction() + if (NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 20) endif() @@ -11,6 +35,8 @@ if (PROJECT_IS_TOP_LEVEL AND NOT CMAKE_MSVC_DEBUG_INFORMATION_FORMAT) set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT "Embedded") endif() +######################################################################################################################## + if (PROJECT_IS_TOP_LEVEL) add_custom_target(nova_sync_project_files SOURCES .clang-tidy @@ -24,6 +50,20 @@ if (PROJECT_IS_TOP_LEVEL) ) endif() +######################################################################################################################## + +if (NOT TARGET nova::parameter) + nova_sync_install_pmr() + + CPMAddPackage( + NAME nova_parameter + GITHUB_REPOSITORY timblechmann/nova_parameter + GIT_TAG 2a27452a75aec950baf6967e88b28dfe09ab1bf9 + ) +endif() + +######################################################################################################################## + set(headers include/nova/sync/detail/async_support.hpp include/nova/sync/detail/backoff.hpp @@ -33,12 +73,14 @@ set(headers include/nova/sync/detail/syscall.hpp include/nova/sync/detail/timed_wait.hpp include/nova/sync/futex/atomic_wait.hpp - include/nova/sync/event/auto_reset_event.hpp + include/nova/sync/event/parking_auto_reset_event.hpp include/nova/sync/event/concepts.hpp include/nova/sync/event/detail/async_support.hpp - include/nova/sync/event/manual_reset_event.hpp + include/nova/sync/event/parking_manual_reset_event.hpp include/nova/sync/event/native_auto_reset_event.hpp include/nova/sync/event/native_manual_reset_event.hpp + include/nova/sync/event/parking_auto_reset_event.hpp + include/nova/sync/event/parking_manual_reset_event.hpp include/nova/sync/event/support/boost_asio_support.hpp include/nova/sync/event/support/libdispatch_support.hpp include/nova/sync/event/support/qt_support.hpp @@ -48,10 +90,11 @@ set(headers include/nova/sync/semaphore/detail/async_support.hpp include/nova/sync/semaphore/dispatch_semaphore.hpp include/nova/sync/semaphore/eventfd_semaphore.hpp - include/nova/sync/semaphore/fast_semaphore.hpp + include/nova/sync/semaphore/parking_semaphore.hpp include/nova/sync/semaphore/kqueue_semaphore.hpp include/nova/sync/semaphore/mach_semaphore.hpp include/nova/sync/semaphore/native_async_semaphore.hpp + include/nova/sync/semaphore/parking_semaphore.hpp include/nova/sync/semaphore/posix_semaphore.hpp include/nova/sync/semaphore/support/boost_asio_support.hpp include/nova/sync/semaphore/support/libdispatch_support.hpp @@ -63,15 +106,18 @@ set(headers include/nova/sync/mutex/concepts.hpp include/nova/sync/mutex/detail/async_support.hpp include/nova/sync/mutex/eventfd_mutex.hpp - include/nova/sync/mutex/fair_mutex.hpp - include/nova/sync/mutex/fast_mutex.hpp + include/nova/sync/mutex/ticket_mutex.hpp + include/nova/sync/mutex/parking_mutex.hpp include/nova/sync/mutex/kqueue_mutex.hpp + include/nova/sync/mutex/parking_mutex.hpp + include/nova/sync/mutex/policies.hpp + include/nova/sync/mutex/ticket_mutex.hpp include/nova/sync/thread_safety/locked_object.hpp include/nova/sync/mutex/native_async_mutex.hpp - include/nova/sync/mutex/pthread_rt_mutex.hpp + include/nova/sync/mutex/pthread_mutex.hpp include/nova/sync/mutex/pthread_spinlock_mutex.hpp - include/nova/sync/mutex/recursive_spinlock_mutex.hpp - include/nova/sync/mutex/shared_spinlock_mutex.hpp + + include/nova/sync/mutex/spinlock_mutex.hpp include/nova/sync/mutex/support/async_waiter_guard.hpp include/nova/sync/mutex/support/boost_asio_support.hpp @@ -98,14 +144,11 @@ set(sources source/nova/sync/semaphore/mach_semaphore.cpp source/nova/sync/semaphore/posix_semaphore.cpp source/nova/sync/semaphore/win32_semaphore.cpp + source/nova/sync/mutex/parking_mutex.cpp + source/nova/sync/mutex/spinlock_mutex.cpp + source/nova/sync/mutex/ticket_mutex.cpp source/nova/sync/mutex/eventfd_mutex.cpp - source/nova/sync/mutex/fair_mutex.cpp - source/nova/sync/mutex/fast_mutex.cpp source/nova/sync/mutex/kqueue_mutex.cpp - source/nova/sync/mutex/recursive_spinlock_mutex.cpp - source/nova/sync/mutex/shared_spinlock_mutex.cpp - source/nova/sync/mutex/spinlock_mutex.cpp - source/nova/sync/mutex/win32_critical_section_mutex.cpp source/nova/sync/mutex/win32_event_mutex.cpp source/nova/sync/mutex/win32_mutex.cpp source/nova/sync/mutex/win32_srw_mutex.cpp @@ -127,6 +170,7 @@ target_include_directories(nova_sync PUBLIC $ $ ) +target_link_libraries(nova_sync PUBLIC nova::parameter) if(WIN32) target_compile_definitions(nova_sync PRIVATE @@ -153,26 +197,6 @@ cmake_dependent_option( ) if(NOVA_SYNC_TESTS) - function(nova_sync_install_pmr) - if (NOT COMMAND CPMAddPackage) - set(CPM_DOWNLOAD_VERSION 0.42.1) - if(NOT EXISTS "${CMAKE_BINARY_DIR}/cmake/CPM.cmake") - message(STATUS "Downloading CPM.cmake ${CPM_DOWNLOAD_VERSION}...") - file(DOWNLOAD - "https://github.com/cpm-cmake/CPM.cmake/releases/download/v${CPM_DOWNLOAD_VERSION}/CPM.cmake" - "${CMAKE_BINARY_DIR}/cmake/CPM.cmake" - STATUS _cpm_download_status - TLS_VERIFY ON - ) - list(GET _cpm_download_status 0 _cpm_download_error) - if(_cpm_download_error) - list(GET _cpm_download_status 1 _cpm_download_message) - message(FATAL_ERROR "Failed to download CPM.cmake: ${_cpm_download_message}") - endif() - endif() - include("${CMAKE_BINARY_DIR}/cmake/CPM.cmake") - endif() - endfunction() set(CMAKE_FOLDER "tests") @@ -251,6 +275,7 @@ if(NOVA_SYNC_TESTS) test/semaphore_test.cpp test/semaphore_benchmarks.cpp test/semaphore_async_asio_test.cpp + test/policy_mutex_test.cpp ) if(WIN32) diff --git a/Readme.md b/Readme.md index a7d31b2..b2ae113 100644 --- a/Readme.md +++ b/Readme.md @@ -6,38 +6,79 @@ Synchronization primitives for C++20: specialized mutex and event types optimize | Type | Characteristics | Named Requirement | |------|-----------------|-------------------| -| `spinlock_mutex` | Simple spinlock | `Mutex` | -| `recursive_spinlock_mutex` | Recursive spinlock | `Mutex` | -| `pthread_spinlock_mutex` | `pthread_spinlock_t` based spinlock | `Mutex` | -| `shared_spinlock_mutex` | Shared spinlock | `SharedMutex` | -| `fast_mutex` | Fast general purpose mutex | `Mutex` | -| `fair_mutex` | Ticket lock, FIFO fairness guaranteed | `Mutex` | -| `pthread_priority_ceiling_mutex` | POSIX real-time mutex (PTHREAD_PRIO_PROTECT), Linux/POSIX only | `TimedMutex` | -| `pthread_priority_inherit_mutex` | POSIX real-time mutex (PTHREAD_PRIO_INHERIT), Linux/POSIX only | `TimedMutex` | -| `win32_recursive_mutex` | Win32 CRITICAL_SECTION, Windows only | `Mutex` | +| `parking_mutex<>` | Futex-based mutex, parks immediately | `Mutex` | +| `parking_mutex` | Futex-based mutex, exponential backoff before parking | `Mutex` | +| `parking_mutex` | Futex-based mutex, no spin, timed waits | `TimedMutex` | +| `parking_mutex` | Futex-based mutex, exponential backoff, timed waits | `TimedMutex` | +| `ticket_mutex<>` | Fair FIFO ticket lock, futex sleep | `TimedMutex` | +| `ticket_mutex` | Fair FIFO ticket lock with exponential backoff | `TimedMutex` | +| `spinlock_mutex<>` | Spinlock, CPU-pause hints | `Mutex` | +| `spinlock_mutex` | Spinlock, exponential backoff | `Mutex` | +| `spinlock_mutex` | Recursive spinlock | `Mutex` | +| `spinlock_mutex` | Recursive spinlock with backoff | `Mutex` | +| `spinlock_mutex` | Shared (reader-writer) spinlock | `SharedMutex` | +| `spinlock_mutex` | Shared spinlock with exponential backoff | `SharedMutex` | +| `pthread_spinlock_mutex` | `pthread_spinlock_t` based spinlock, POSIX only | `Mutex` | +| `pthread_mutex<>` | POSIX `pthread_mutex_t` (default type) | `TimedMutex` | +| `pthread_mutex` | Recursive POSIX mutex | `TimedMutex` | +| `pthread_mutex` | Error-checking POSIX mutex | `TimedMutex` | +| `pthread_mutex` | Adaptive-spin POSIX mutex (Linux) | `TimedMutex` | +| `pthread_mutex` | POSIX mutex, priority inheritance (RT) | `TimedMutex` | +| `pthread_mutex>` | POSIX mutex, priority ceiling N (RT) | `TimedMutex` | +| `win32_critical_section_mutex<>` | Win32 CRITICAL_SECTION, recursive, Windows only | `Mutex` | +| `win32_critical_section_mutex>` | Win32 CRITICAL_SECTION with custom spin count | `Mutex` | | `win32_mutex` | Win32 kernel mutex, async-capable, Windows only | `TimedMutex` | | `win32_srw_mutex` | Win32 SRW lock (ultra-lightweight), Windows only | `Mutex` | | `apple_os_unfair_mutex` | Apple `os_unfair_lock`, macOS/iOS only | `Mutex` | -| `kqueue_mutex` | Apple kqueue-based async mutex, macOS/iOS only | `Mutex` | -| `eventfd_mutex` | Linux eventfd-based async mutex | `Mutex` | -| `native_async_mutex` | Cross-platform alias: `win32_mutex` / `kqueue_mutex` / `eventfd_mutex` | `Mutex` | +| `kqueue_mutex<>` | Apple kqueue-based async mutex | `TimedMutex` | +| `kqueue_mutex` | kqueue mutex with exponential backoff | `TimedMutex` | +| `eventfd_mutex<>` | Linux eventfd-based async mutex | `TimedMutex` | +| `eventfd_mutex` | eventfd mutex with exponential backoff | `TimedMutex` | +| `native_async_mutex` | Cross-platform alias: `win32_event_mutex` / `kqueue_mutex` / `eventfd_mutex` | `TimedMutex` | + +### Policy parameters + +All policy types live in `nova/sync/mutex/policies.hpp`: + +| Policy | Effect | +|--------|--------| +| `with_backoff` | Exponential backoff with CPU pause hints before blocking | +| `recursive` | Allow re-entrant locking from the owning thread (`spinlock_mutex` only) | +| `shared` | Enable shared (reader-writer) locking via `lock_shared()` (`spinlock_mutex` only; mutually exclusive with `recursive`) | +| `priority_inherit` | PTHREAD_PRIO_INHERIT — owner boosted to highest waiter priority | +| `priority_ceiling` | PTHREAD_PRIO_PROTECT — all holders elevated to ceiling N | +| `pthread_recursive` | PTHREAD_MUTEX_RECURSIVE — re-entrant locking | +| `pthread_errorcheck` | PTHREAD_MUTEX_ERRORCHECK — error on double-lock | +| `pthread_adaptive` | PTHREAD_MUTEX_ADAPTIVE_NP — adaptive spin (Linux only) | +| `win32_spin_count` | Spin count for `InitializeCriticalSectionAndSpinCount` | + +### Convenience aliases -### `fast_mutex` +```cpp +using pthread_default_mutex = pthread_mutex<>; +using pthread_recursive_mutex = pthread_mutex< pthread_recursive >; +using pthread_priority_inherit_mutex = pthread_mutex< priority_inherit >; +template < int N > +using pthread_priority_ceiling_mutex = pthread_mutex< priority_ceiling< N > >; +``` + +### `parking_mutex` -Lock-free fast path using `std::atomic::wait()`. Superior performance to `std::mutex` under low-to-moderate contention. +Futex-based mutex using `std::atomic::wait()`. Fast path acquires in one CAS; slow path parks the calling thread. With `with_backoff`, spins briefly before parking — lower latency under brief contention. Add `timed` to enable `try_lock_for` / `try_lock_until`. -### `fair_mutex` +### `ticket_mutex` -Ticket lock guaranteeing FIFO lock acquisition order. Prevents starvation under high contention. +FIFO ticket lock guaranteeing strict acquisition order. Prevents starvation under sustained contention. Not suitable for high-throughput low-contention workloads. -### POSIX real-time mutexes +### POSIX mutexes -Priority ceiling and inheritance protocols prevent priority inversion. Significantly higher locking overhead; suitable only for real-time systems requiring deterministic scheduling. +`pthread_mutex<>` wraps `pthread_mutex_t`. Priority-protocol variants (`priority_inherit`, `priority_ceiling`) prevent priority inversion in real-time systems — higher overhead; requires RT scheduling for ceiling variant. ### Platform-specific async mutexes -`win32_mutex`, `kqueue_mutex`, and `eventfd_mutex` (cross-platform alias: `native_async_mutex`) +`win32_event_mutex`, `kqueue_mutex`, `eventfd_mutex` (and their `` policy variants) expose native OS handles enabling integration with event loops (Boost.Asio, libdispatch, epoll, Qt, etc.). +The `native_async_mutex` alias resolves to the fastest variant (`with_backoff`) for the current platform. Handlers receive an `expected, std::error_code>` (`std::expected` or `tl::expected`): diff --git a/include/nova/sync/detail/backoff.hpp b/include/nova/sync/detail/backoff.hpp index 8e2f9a2..7ba68f9 100644 --- a/include/nova/sync/detail/backoff.hpp +++ b/include/nova/sync/detail/backoff.hpp @@ -11,7 +11,7 @@ namespace nova::sync::detail { struct exponential_backoff { - int backoff = 1; + int backoff = 8; static constexpr int spin_limit = 1 << 12; void run() diff --git a/include/nova/sync/event/manual_reset_event.hpp b/include/nova/sync/event/manual_reset_event.hpp deleted file mode 100644 index 6106cb0..0000000 --- a/include/nova/sync/event/manual_reset_event.hpp +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include -#include - -#include -#include - -namespace nova::sync { - -/// @brief Manual-reset event. -/// -/// Once `signal()` is called, all waiters are woken and subsequent `wait()` / -/// `try_wait()` calls return immediately until `reset()` is called. - -class manual_reset_event -{ -public: - /// @brief Constructs the event in the "not set" state. - explicit manual_reset_event( bool initially_set = false ) noexcept : - state_( initially_set ? 1u : 0u ) - {} - - ~manual_reset_event() = default; - manual_reset_event( const manual_reset_event& ) = delete; - manual_reset_event& operator=( const manual_reset_event& ) = delete; - - /// @brief Transitions the event to "set", waking all waiters. - void signal() noexcept - { - if ( state_.exchange( 1u, std::memory_order_release ) == 0u ) - atomic_notify_all( state_ ); - } - - /// @brief Transitions the event back to "not set". - void reset() noexcept - { - state_.store( 0u, std::memory_order_relaxed ); - } - - /// @brief Returns true if the event is currently set, without blocking. - [[nodiscard]] bool try_wait() const noexcept - { - return state_.load( std::memory_order_acquire ) != 0u; - } - - /// @brief Blocks until the event is set. - /// - void wait() noexcept - { - if ( state_.load( std::memory_order_acquire ) != 0u ) - return; - - // Park: block until the value is no longer 0. - // Spurious wakeups are handled by the loop. - while ( state_.load( std::memory_order_relaxed ) == 0u ) - atomic_wait( state_, 0u, std::memory_order_acquire ); - } - - template < class Clock, class Duration > - [[nodiscard]] bool try_wait_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept - { - if ( state_.load( std::memory_order_acquire ) != 0 ) - return true; - - while ( state_.load( std::memory_order_relaxed ) == 0 ) { - if ( !atomic_wait_until( state_, 0u, abs_time, std::memory_order_acquire ) ) { - return state_.load( std::memory_order_acquire ) != 0; - } - } - - return true; - } - - template < class Rep, class Period > - [[nodiscard]] bool try_wait_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept - { - return try_wait_until( std::chrono::steady_clock::now() + rel_time ); - } - -private: - std::atomic< uint32_t > state_; -}; - -} // namespace nova::sync diff --git a/include/nova/sync/event/auto_reset_event.hpp b/include/nova/sync/event/parking_auto_reset_event.hpp similarity index 57% rename from include/nova/sync/event/auto_reset_event.hpp rename to include/nova/sync/event/parking_auto_reset_event.hpp index e16e104..8679737 100644 --- a/include/nova/sync/event/auto_reset_event.hpp +++ b/include/nova/sync/event/parking_auto_reset_event.hpp @@ -4,34 +4,50 @@ #pragma once #include +#include #include #include #include +#include namespace nova::sync { -/// @brief Auto-reset event. +/// @brief Auto-reset event with optional exponential backoff. /// /// Each `signal()` delivers exactly one token. If a thread is blocked in /// `wait()`, it is woken and the token is consumed. Otherwise the token is /// stored for the next `wait()` / `try_wait()` call. -class auto_reset_event +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |---------------|--------------------------------------------------------| +/// | (no exponential_backoff) | Park immediately when no token available (default). | +/// | `with_backoff`| Spin with exponential backoff before parking. | +/// +/// ### Aliases +/// - `parking_auto_reset_event<>` — pure park, no spinning. +/// - `parking_auto_reset_event` — spin-then-park. +/// - `auto_reset_event` — deprecated alias for `parking_auto_reset_event<>`. +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +class parking_auto_reset_event { + std::atomic< int32_t > state_; + + static constexpr bool use_backoff = detail::has_backoff_v< Policies... >; + public: /// @brief Constructs the event. - /// @param initially_set When true the first wait() / try_wait() will - /// succeed without blocking. - explicit auto_reset_event( bool initially_set = false ) noexcept : + /// @param initially_set When true the first wait() / try_wait() succeeds without blocking. + explicit parking_auto_reset_event( bool initially_set = false ) noexcept : state_( initially_set ? 1 : 0 ) {} - ~auto_reset_event() = default; - auto_reset_event( const auto_reset_event& ) = delete; - auto_reset_event& operator=( const auto_reset_event& ) = delete; - - // ----------------------------------------------------------------------- - // Signalling + ~parking_auto_reset_event() = default; + parking_auto_reset_event( const parking_auto_reset_event& ) = delete; + parking_auto_reset_event& operator=( const parking_auto_reset_event& ) = delete; /// @brief Delivers one token, waking exactly one waiter. void signal() noexcept @@ -49,11 +65,8 @@ class auto_reset_event } } - // ----------------------------------------------------------------------- - // Waiting - /// @brief Atomically consumes the signal if set. - /// @return true if a signal was available and consumed, false otherwise. + /// @return `true` if a signal was available and consumed, `false` otherwise. [[nodiscard]] bool try_wait() noexcept { int32_t s = state_.load( std::memory_order_relaxed ); @@ -65,10 +78,17 @@ class auto_reset_event } /// @brief Blocks until a signal is available, then consumes it. - /// void wait() noexcept { - // Fast path: consume an existing signal. + if constexpr ( use_backoff ) { + detail::exponential_backoff backoff; + while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + if ( try_wait() ) + return; + backoff.run(); + } + } + int32_t prev = state_.fetch_sub( 1, std::memory_order_acquire ); if ( prev > 0 ) return; @@ -82,6 +102,8 @@ class auto_reset_event } } + /// @brief Blocks until a signal is available or the deadline is reached. + /// @return `true` if signal consumed, `false` if timed out. template < class Clock, class Duration > [[nodiscard]] bool try_wait_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept { @@ -97,12 +119,9 @@ class auto_reset_event while ( cur <= my_slot ) { if ( !atomic_wait_until( state_, cur, abs_time, std::memory_order_acquire ) ) { - // Timeout — but check one more time cur = state_.load( std::memory_order_relaxed ); - if ( cur > my_slot ) { + if ( cur > my_slot ) return true; - } - // Must undo our wait registration: add 1 back state_.fetch_add( 1, std::memory_order_relaxed ); return false; } @@ -112,14 +131,19 @@ class auto_reset_event return true; } + /// @brief Blocks until a signal is available or the duration expires. + /// @return `true` if signal consumed, `false` if timed out. template < class Rep, class Period > [[nodiscard]] bool try_wait_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept { return try_wait_until( std::chrono::steady_clock::now() + rel_time ); } - -private: - std::atomic< int32_t > state_; }; +//---------------------------------------------------------------------------------------------------------------------- +// Convenience alias + +/// @brief Deprecated alias for `parking_auto_reset_event<>`. +using auto_reset_event = parking_auto_reset_event<>; + } // namespace nova::sync diff --git a/include/nova/sync/event/parking_manual_reset_event.hpp b/include/nova/sync/event/parking_manual_reset_event.hpp new file mode 100644 index 0000000..2d4a09a --- /dev/null +++ b/include/nova/sync/event/parking_manual_reset_event.hpp @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#pragma once + +#include +#include + +#include +#include +#include +#include + +namespace nova::sync { + +/// @brief Manual-reset event with optional exponential backoff. +/// +/// Once `signal()` is called, all waiters are woken and subsequent `wait()` / +/// `try_wait()` calls return immediately until `reset()` is called. +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |---------------|--------------------------------------------------------| +/// | (no exponential_backoff) | Park immediately when not set (default). | +/// | `with_backoff`| Spin with exponential backoff before parking. | +/// +/// ### Aliases +/// - `parking_manual_reset_event<>` — pure park, no spinning. +/// - `parking_manual_reset_event` — spin-then-park. +/// - `manual_reset_event` — deprecated alias for `parking_manual_reset_event<>`. +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +class parking_manual_reset_event +{ + std::atomic< uint32_t > state_; + + static constexpr bool use_backoff = detail::has_backoff_v< Policies... >; + +public: + /// @brief Constructs the event in the "not set" state. + explicit parking_manual_reset_event( bool initially_set = false ) noexcept : + state_( initially_set ? 1u : 0u ) + {} + + ~parking_manual_reset_event() = default; + parking_manual_reset_event( const parking_manual_reset_event& ) = delete; + parking_manual_reset_event& operator=( const parking_manual_reset_event& ) = delete; + + /// @brief Transitions the event to "set", waking all waiters. + void signal() noexcept + { + if ( state_.exchange( 1u, std::memory_order_release ) == 0u ) + atomic_notify_all( state_ ); + } + + /// @brief Transitions the event back to "not set". + void reset() noexcept + { + state_.store( 0u, std::memory_order_relaxed ); + } + + /// @brief Returns `true` if the event is currently set, without blocking. + [[nodiscard]] bool try_wait() const noexcept + { + return state_.load( std::memory_order_acquire ) != 0u; + } + + /// @brief Blocks until the event is set. + void wait() noexcept + { + if constexpr ( use_backoff ) { + detail::exponential_backoff backoff; + while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + if ( state_.load( std::memory_order_acquire ) != 0u ) + return; + backoff.run(); + } + } + + if ( state_.load( std::memory_order_acquire ) != 0u ) + return; + + while ( state_.load( std::memory_order_relaxed ) == 0u ) + atomic_wait( state_, 0u, std::memory_order_acquire ); + } + + /// @brief Blocks until the event is set or the deadline is reached. + /// @return `true` if set, `false` if timed out. + template < class Clock, class Duration > + [[nodiscard]] bool try_wait_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept + { + if ( state_.load( std::memory_order_acquire ) != 0 ) + return true; + + while ( state_.load( std::memory_order_relaxed ) == 0 ) { + if ( !atomic_wait_until( state_, 0u, abs_time, std::memory_order_acquire ) ) + return state_.load( std::memory_order_acquire ) != 0; + } + + return true; + } + + /// @brief Blocks until the event is set or the duration expires. + /// @return `true` if set, `false` if timed out. + template < class Rep, class Period > + [[nodiscard]] bool try_wait_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept + { + return try_wait_until( std::chrono::steady_clock::now() + rel_time ); + } +}; + +//---------------------------------------------------------------------------------------------------------------------- +// Convenience alias + +/// @brief Deprecated alias for `parking_manual_reset_event<>`. +using manual_reset_event = parking_manual_reset_event<>; + +} // namespace nova::sync diff --git a/include/nova/sync/event/timed_auto_reset_event.hpp b/include/nova/sync/event/timed_auto_reset_event.hpp index 9540214..a5e466c 100644 --- a/include/nova/sync/event/timed_auto_reset_event.hpp +++ b/include/nova/sync/event/timed_auto_reset_event.hpp @@ -11,9 +11,7 @@ #include -#if defined( __linux__ ) || defined( _WIN32 ) -# include "nova/sync/event/auto_reset_event.hpp" -#endif +#include "nova/sync/event/parking_auto_reset_event.hpp" namespace nova::sync { namespace impl { diff --git a/include/nova/sync/event/timed_manual_reset_event.hpp b/include/nova/sync/event/timed_manual_reset_event.hpp index dc64b3b..75f4f26 100644 --- a/include/nova/sync/event/timed_manual_reset_event.hpp +++ b/include/nova/sync/event/timed_manual_reset_event.hpp @@ -11,11 +11,7 @@ #include -#if defined( __linux__ ) || defined( _WIN32 ) -# include "nova/sync/event/manual_reset_event.hpp" -#else - -#endif +#include "nova/sync/event/parking_manual_reset_event.hpp" namespace nova::sync { diff --git a/include/nova/sync/mutex/eventfd_mutex.hpp b/include/nova/sync/mutex/eventfd_mutex.hpp index 80fb183..ff1e936 100644 --- a/include/nova/sync/mutex/eventfd_mutex.hpp +++ b/include/nova/sync/mutex/eventfd_mutex.hpp @@ -14,14 +14,17 @@ # include # include # include +# include # include # include namespace nova::sync { +namespace detail { + /// @brief Simple async-capable mutex implemented via Linux `eventfd` in semaphore mode. /// -class NOVA_SYNC_CAPABILITY( "mutex" ) eventfd_mutex +class NOVA_SYNC_CAPABILITY( "mutex" ) eventfd_mutex_impl { public: /// @brief The native handle type — a POSIX file descriptor. @@ -30,10 +33,10 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) eventfd_mutex using duration_type = std::chrono::nanoseconds; /// @brief Constructs an unlocked eventfd mutex. - eventfd_mutex(); - ~eventfd_mutex(); - eventfd_mutex( const eventfd_mutex& ) = delete; - eventfd_mutex& operator=( const eventfd_mutex& ) = delete; + eventfd_mutex_impl(); + ~eventfd_mutex_impl(); + eventfd_mutex_impl( const eventfd_mutex_impl& ) = delete; + eventfd_mutex_impl& operator=( const eventfd_mutex_impl& ) = delete; /// @brief Acquires the lock, blocking as necessary. void lock() noexcept NOVA_SYNC_ACQUIRE(); @@ -104,18 +107,18 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) eventfd_mutex /// @brief Fast async-capable mutex with user-space fast path and eventfd kernel fallback. /// -class NOVA_SYNC_CAPABILITY( "mutex" ) fast_eventfd_mutex +class NOVA_SYNC_CAPABILITY( "mutex" ) fast_eventfd_mutex_impl { public: /// @brief The native handle type — a POSIX file descriptor. using native_handle_type = int; using duration_type = std::chrono::nanoseconds; - /// @brief Constructs an unlocked fast_eventfd_mutex. - fast_eventfd_mutex(); - ~fast_eventfd_mutex(); - fast_eventfd_mutex( const fast_eventfd_mutex& ) = delete; - fast_eventfd_mutex& operator=( const fast_eventfd_mutex& ) = delete; + /// @brief Constructs an unlocked fast_eventfd_mutex_impl. + fast_eventfd_mutex_impl(); + ~fast_eventfd_mutex_impl(); + fast_eventfd_mutex_impl( const fast_eventfd_mutex_impl& ) = delete; + fast_eventfd_mutex_impl& operator=( const fast_eventfd_mutex_impl& ) = delete; /// @brief Acquires the lock, blocking as necessary. void lock() noexcept NOVA_SYNC_ACQUIRE() @@ -177,7 +180,7 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fast_eventfd_mutex // Register as waiter before calling ppoll_until s = add_async_waiter(); // returns state after +2 - detail::async_waiter_guard< fast_eventfd_mutex > guard( *this, detail::adopt_async_waiter ); + detail::async_waiter_guard< fast_eventfd_mutex_impl > guard( *this, detail::adopt_async_waiter ); while ( true ) { if ( ( s & 1u ) == 0 ) { @@ -234,6 +237,28 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fast_eventfd_mutex bool try_lock_for_ns( duration_type rel_ns ) noexcept; }; +} // namespace detail + +//---------------------------------------------------------------------------------------------------------------------- + +/// @brief Linux eventfd-based async-capable mutex with optional exponential backoff. +/// +/// Blocking is performed via `ppoll`; the underlying file descriptor is exposed for +/// integration with event loops (Boost.Asio, Qt, epoll, etc.). +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |----------------|---------------------------------------------------------------| +/// | `with_backoff` | Spin with exponential backoff before parking via kernel wait. | +/// +/// Without `with_backoff`, threads park immediately via `ppoll`. +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +class NOVA_SYNC_CAPABILITY( "mutex" ) eventfd_mutex : + public std::conditional_t< detail::has_backoff_v< Policies... >, detail::fast_eventfd_mutex_impl, detail::eventfd_mutex_impl > +{}; + } // namespace nova::sync #endif // NOVA_SYNC_HAS_EVENTFD_MUTEX diff --git a/include/nova/sync/mutex/fast_mutex.hpp b/include/nova/sync/mutex/fast_mutex.hpp deleted file mode 100644 index 5c05864..0000000 --- a/include/nova/sync/mutex/fast_mutex.hpp +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include -#include - -#include -#include - -namespace nova::sync { - -/// @brief Fast mutex with spinning and park/unpark. -class NOVA_SYNC_CAPABILITY( "mutex" ) fast_mutex -{ -public: - /// @brief Constructs an unlocked fast mutex. - fast_mutex() = default; - ~fast_mutex() = default; - fast_mutex( const fast_mutex& ) = delete; - fast_mutex& operator=( const fast_mutex& ) = delete; - - /// @brief Acquires the lock, spinning and parking as necessary. - inline void lock() noexcept NOVA_SYNC_ACQUIRE() - { - uint32_t expected = 0; - if ( state_.compare_exchange_weak( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; - - lock_slow( expected ); - } - - /// @brief Attempts to acquire the lock without blocking. - /// @return `true` if lock acquired, `false` if already locked. - [[nodiscard]] inline bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) - { - uint32_t expected = 0; - return state_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ); - } - - /// @brief Releases the lock and wakes one waiting thread if any. - inline void unlock() noexcept NOVA_SYNC_RELEASE() - { - uint32_t prev = state_.fetch_and( ~1u, std::memory_order_release ); - - if ( prev > 1 ) - atomic_notify_one( state_ ); - } - - /// @brief Tries to acquire the lock within a relative timeout. - /// @return `true` if lock acquired, `false` if timed out. - template < class Rep, class Period > - [[nodiscard]] bool try_lock_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept - NOVA_SYNC_TRY_ACQUIRE( true ) - { - return try_lock_until( std::chrono::steady_clock::now() + rel_time ); - } - - /// @brief Tries to acquire the lock until an absolute deadline. - /// @return `true` if lock acquired, `false` if timed out. - template < class Clock, class Duration > - [[nodiscard]] bool try_lock_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept - NOVA_SYNC_TRY_ACQUIRE( true ) - { - // Fast path - if ( try_lock() ) - return true; - - return lock_slow_until( abs_time ); - } - -private: - // State layout: - // Bit 0 : Lock status (0 = Free, 1 = Locked) - // Bits 1-31 : Number of sleeping threads (Waiter count) - std::atomic< uint32_t > state_ { 0 }; - - void lock_slow( uint32_t expected ) noexcept; - - template < class Clock, class Duration > - bool lock_slow_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept - { - // Register as waiter - state_.fetch_add( 2, std::memory_order_relaxed ); - uint32_t expected = state_.load( std::memory_order_relaxed ); - - while ( true ) { - if ( ( expected & 1 ) == 0 ) { - uint32_t desired = ( expected - 2 ) | 1; - if ( state_.compare_exchange_weak( - expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; - continue; // CAS failed, retry - } - - if ( !atomic_wait_until( state_, expected, abs_time ) ) { - // Timed out — undo waiter registration - expected = state_.load( std::memory_order_relaxed ); - while ( true ) { - // Try to grab lock while unregistering - if ( ( expected & 1 ) == 0 ) { - uint32_t desired = ( expected - 2 ) | 1; - if ( state_.compare_exchange_weak( - expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; // grabbed it at last moment - continue; - } - // Lock still held: just decrement waiter count - if ( state_.compare_exchange_weak( - expected, expected - 2, std::memory_order_relaxed, std::memory_order_relaxed ) ) - return false; - } - } - - expected = state_.load( std::memory_order_relaxed ); - } - } -}; - -} // namespace nova::sync diff --git a/include/nova/sync/mutex/kqueue_mutex.hpp b/include/nova/sync/mutex/kqueue_mutex.hpp index a71658b..bb99a4c 100644 --- a/include/nova/sync/mutex/kqueue_mutex.hpp +++ b/include/nova/sync/mutex/kqueue_mutex.hpp @@ -14,14 +14,17 @@ # include # include # include +# include # include # include namespace nova::sync { +namespace detail { + /// @brief Simple async-capable mutex implemented via Apple `kqueue` with `EVFILT_USER`. /// -class NOVA_SYNC_CAPABILITY( "mutex" ) kqueue_mutex +class NOVA_SYNC_CAPABILITY( "mutex" ) kqueue_mutex_impl { public: /// @brief The native handle type — a POSIX file descriptor. @@ -30,10 +33,10 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) kqueue_mutex using duration_type = std::chrono::nanoseconds; /// @brief Constructs an unlocked kqueue mutex. - kqueue_mutex(); - ~kqueue_mutex(); - kqueue_mutex( const kqueue_mutex& ) = delete; - kqueue_mutex& operator=( const kqueue_mutex& ) = delete; + kqueue_mutex_impl(); + ~kqueue_mutex_impl(); + kqueue_mutex_impl( const kqueue_mutex_impl& ) = delete; + kqueue_mutex_impl& operator=( const kqueue_mutex_impl& ) = delete; /// @brief Acquires the lock, blocking as necessary. void lock() noexcept NOVA_SYNC_ACQUIRE(); @@ -103,7 +106,7 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) kqueue_mutex /// @brief Fast async-capable mutex with user-space fast path and kqueue kernel fallback. /// -class NOVA_SYNC_CAPABILITY( "mutex" ) fast_kqueue_mutex +class NOVA_SYNC_CAPABILITY( "mutex" ) fast_kqueue_mutex_impl { public: /// @brief The native handle type — a POSIX file descriptor. @@ -111,11 +114,11 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fast_kqueue_mutex /// @brief Effective timeout resolution (kevent timespec is nanosecond-precise). using duration_type = std::chrono::nanoseconds; - /// @brief Constructs an unlocked fast_kqueue_mutex. - fast_kqueue_mutex(); - ~fast_kqueue_mutex(); - fast_kqueue_mutex( const fast_kqueue_mutex& ) = delete; - fast_kqueue_mutex& operator=( const fast_kqueue_mutex& ) = delete; + /// @brief Constructs an unlocked fast_kqueue_mutex_impl. + fast_kqueue_mutex_impl(); + ~fast_kqueue_mutex_impl(); + fast_kqueue_mutex_impl( const fast_kqueue_mutex_impl& ) = delete; + fast_kqueue_mutex_impl& operator=( const fast_kqueue_mutex_impl& ) = delete; /// @brief Acquires the lock, blocking as necessary. void lock() noexcept NOVA_SYNC_ACQUIRE() @@ -161,25 +164,22 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fast_kqueue_mutex if constexpr ( std::is_same_v< Clock, std::chrono::system_clock > || std::is_same_v< Clock, std::chrono::steady_clock > ) { - // Register as async waiter so unlock() will trigger the kevent. - auto s = add_async_waiter(); - detail::async_waiter_guard< fast_kqueue_mutex > guard( *this, detail::adopt_async_waiter ); + auto s = add_async_waiter(); + detail::async_waiter_guard< fast_kqueue_mutex_impl > guard( *this, detail::adopt_async_waiter ); while ( true ) { if ( ( s & 1u ) == 0 ) { uint32_t desired = ( s - 2u ) | 1u; if ( state_.compare_exchange_weak( s, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) { - // CAS succeeded: consume any pending NOTE_TRIGGER. consume_lock(); - guard.dismiss(); // waiter count already decremented in CAS + guard.dismiss(); return true; } continue; } if ( !detail::kevent_until( kqfd_, 1, abs_time ) ) - // Timed out — guard destructor calls remove_async_waiter(). return try_lock(); // one last attempt after timeout consume_lock(); @@ -220,6 +220,28 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fast_kqueue_mutex bool try_lock_for_impl( std::chrono::nanoseconds ) noexcept; }; +} // namespace detail + +//---------------------------------------------------------------------------------------------------------------------- + +/// @brief Apple kqueue-based async-capable mutex with optional exponential backoff. +/// +/// Blocks via `kevent` with `EVFILT_USER`; the underlying file descriptor is exposed +/// for integration with event loops (Boost.Asio, libdispatch, etc.). +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |----------------|---------------------------------------------------------------| +/// | `with_backoff` | Spin with exponential backoff before parking via kernel wait. | +/// +/// Without `with_backoff`, threads park immediately via `kevent`. +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +class NOVA_SYNC_CAPABILITY( "mutex" ) kqueue_mutex : + public std::conditional_t< detail::has_backoff_v< Policies... >, detail::fast_kqueue_mutex_impl, detail::kqueue_mutex_impl > +{}; + } // namespace nova::sync #endif // NOVA_SYNC_HAS_KQUEUE_MUTEX diff --git a/include/nova/sync/mutex/native_async_mutex.hpp b/include/nova/sync/mutex/native_async_mutex.hpp index db3f361..a6cc5f1 100644 --- a/include/nova/sync/mutex/native_async_mutex.hpp +++ b/include/nova/sync/mutex/native_async_mutex.hpp @@ -27,13 +27,13 @@ using native_async_mutex = win32_event_mutex; #elif defined( __APPLE__ ) -using native_fast_async_mutex = fast_kqueue_mutex; -using native_async_mutex = kqueue_mutex; +using native_fast_async_mutex = kqueue_mutex< with_backoff >; +using native_async_mutex = kqueue_mutex<>; #elif defined( __linux__ ) -using native_fast_async_mutex = fast_eventfd_mutex; -using native_async_mutex = eventfd_mutex; +using native_fast_async_mutex = eventfd_mutex< with_backoff >; +using native_async_mutex = eventfd_mutex<>; #endif diff --git a/include/nova/sync/mutex/parking_mutex.hpp b/include/nova/sync/mutex/parking_mutex.hpp new file mode 100644 index 0000000..2a0b4bb --- /dev/null +++ b/include/nova/sync/mutex/parking_mutex.hpp @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + + +namespace nova::sync { + +//---------------------------------------------------------------------------------------------------------------------- +// Concrete implementation classes (slow paths compiled into a .cpp TU) + +namespace impl { + +/// @brief parking_mutex without backoff, using std::atomic::wait (untimed). +class NOVA_SYNC_CAPABILITY( "mutex" ) parking_mutex_plain +{ +public: + parking_mutex_plain() = default; + ~parking_mutex_plain() = default; + parking_mutex_plain( const parking_mutex_plain& ) = delete; + parking_mutex_plain& operator=( const parking_mutex_plain& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + uint32_t expected = 0; + if ( state_.compare_exchange_weak( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + lock_slow( expected ); + } + + [[nodiscard]] inline bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + uint32_t expected = 0; + return state_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ); + } + + inline void unlock() noexcept NOVA_SYNC_RELEASE() + { + uint32_t prev = state_.fetch_and( ~1u, std::memory_order_release ); + if ( prev > 1 ) { +#ifdef __linux__ + atomic_notify_one( state_ ); +#else + state_.notify_one(); +#endif + } + } + +protected: + // State layout: + // Bit 0 : Lock status (0 = Free, 1 = Locked) + // Bits 1-31 : Number of sleeping threads (Waiter count) + std::atomic< uint32_t > state_ { 0 }; + +private: + void lock_slow( uint32_t expected ) noexcept; +}; + +/// @brief parking_mutex with exponential backoff, using std::atomic::wait (untimed). +class NOVA_SYNC_CAPABILITY( "mutex" ) parking_mutex_backoff : protected parking_mutex_plain +{ +public: + parking_mutex_backoff() = default; + ~parking_mutex_backoff() = default; + parking_mutex_backoff( const parking_mutex_backoff& ) = delete; + parking_mutex_backoff& operator=( const parking_mutex_backoff& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + uint32_t expected = 0; + if ( state_.compare_exchange_weak( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + lock_slow( expected ); + } + + using parking_mutex_plain::try_lock; + using parking_mutex_plain::unlock; + +private: + void lock_slow( uint32_t expected ) noexcept; +}; + +/// @brief parking_mutex without backoff, using futex-based atomic_wait (timed-capable). +class NOVA_SYNC_CAPABILITY( "mutex" ) parking_mutex_timed +{ +public: + parking_mutex_timed() = default; + ~parking_mutex_timed() = default; + parking_mutex_timed( const parking_mutex_timed& ) = delete; + parking_mutex_timed& operator=( const parking_mutex_timed& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + uint32_t expected = 0; + if ( state_.compare_exchange_weak( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + lock_slow( expected ); + } + + [[nodiscard]] inline bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + uint32_t expected = 0; + return state_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ); + } + + inline void unlock() noexcept NOVA_SYNC_RELEASE() + { + uint32_t prev = state_.fetch_and( ~1u, std::memory_order_release ); + if ( prev > 1 ) + atomic_notify_one( state_ ); + }; + + template < class Rep, class Period > + [[nodiscard]] bool try_lock_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept + NOVA_SYNC_TRY_ACQUIRE( true ) + { + return try_lock_until( std::chrono::steady_clock::now() + rel_time ); + } + + template < class Clock, class Duration > + [[nodiscard]] bool try_lock_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept + NOVA_SYNC_TRY_ACQUIRE( true ) + { + if ( try_lock() ) + return true; + return lock_slow_until( abs_time ); + } + +protected: + std::atomic< uint32_t > state_ { 0 }; + +private: + void lock_slow( uint32_t expected ) noexcept; + + template < class Clock, class Duration > + bool lock_slow_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept + { + state_.fetch_add( 2, std::memory_order_relaxed ); + uint32_t expected = state_.load( std::memory_order_relaxed ); + + while ( true ) { + if ( ( expected & 1 ) == 0 ) { + uint32_t desired = ( expected - 2 ) | 1; + if ( state_.compare_exchange_weak( + expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return true; + continue; + } + + if ( !atomic_wait_until( state_, expected, abs_time, std::memory_order_relaxed ) ) { + // Timed out — undo waiter registration + expected = state_.load( std::memory_order_relaxed ); + while ( true ) { + if ( ( expected & 1 ) == 0 ) { + uint32_t desired = ( expected - 2 ) | 1; + if ( state_.compare_exchange_weak( + expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return true; + continue; + } + if ( state_.compare_exchange_weak( + expected, expected - 2, std::memory_order_relaxed, std::memory_order_relaxed ) ) + return false; + } + } + + expected = state_.load( std::memory_order_relaxed ); + } + } +}; + +/// @brief parking_mutex with exponential backoff, using futex-based atomic_wait (timed-capable). +class NOVA_SYNC_CAPABILITY( "mutex" ) parking_mutex_timed_backoff : protected parking_mutex_timed +{ +public: + parking_mutex_timed_backoff() = default; + ~parking_mutex_timed_backoff() = default; + parking_mutex_timed_backoff( const parking_mutex_timed_backoff& ) = delete; + parking_mutex_timed_backoff& operator=( const parking_mutex_timed_backoff& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + uint32_t expected = 0; + if ( state_.compare_exchange_weak( expected, 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + lock_slow( expected ); + } + + using parking_mutex_timed::try_lock; + using parking_mutex_timed::try_lock_for; + using parking_mutex_timed::try_lock_until; + using parking_mutex_timed::unlock; + +private: + void lock_slow( uint32_t expected ) noexcept; +}; + +} // namespace impl + +//---------------------------------------------------------------------------------------------------------------------- + +/// @brief Futex-based parking mutex with optional exponential backoff and timed waits. +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |----------------|--------------------------------------------------------------| +/// | `with_backoff` | Spin with exponential backoff before parking. | +/// | `timed` | Use futex-based waits (enables try_lock_for/try_lock_until). | +template < typename... Policies > + requires( parameter::valid_parameters< std::tuple< detail::exponential_backoff_tag, detail::timed_tag >, Policies... > ) +class NOVA_SYNC_CAPABILITY( "mutex" ) parking_mutex : + public std::conditional_t< + detail::has_timed_v< Policies... >, + std::conditional_t< detail::has_backoff_v< Policies... >, impl::parking_mutex_timed_backoff, impl::parking_mutex_timed >, + std::conditional_t< detail::has_backoff_v< Policies... >, impl::parking_mutex_backoff, impl::parking_mutex_plain > > +{}; + +} // namespace nova::sync diff --git a/include/nova/sync/mutex/policies.hpp b/include/nova/sync/mutex/policies.hpp new file mode 100644 index 0000000..79f7df2 --- /dev/null +++ b/include/nova/sync/mutex/policies.hpp @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#pragma once + +#include + +namespace nova::sync { + +//---------------------------------------------------------------------------------------------------------------------- +// Backoff policy tags + +namespace detail { + +struct exponential_backoff_tag +{}; +struct recursive_tag +{}; +struct timed_tag +{}; +struct async_capable_tag +{}; +struct shared_tag +{}; +struct fair_tag +{}; + +} // namespace detail + +//---------------------------------------------------------------------------------------------------------------------- +// Backoff policies + +/// @brief Policy: exponential backoff before parking. +using with_backoff = parameter::flag_param< detail::exponential_backoff_tag >; + +/// @brief Policy: use futex-based timed wait in parking mutex. +using timed = parameter::flag_param< detail::timed_tag >; + +//---------------------------------------------------------------------------------------------------------------------- +// select_mutex trait policies + +/// @brief Trait: select a mutex that supports recursive locking. +using recursive = parameter::flag_param< detail::recursive_tag >; + +/// @brief Trait: select a mutex that supports timed waits (try_lock_for / try_lock_until). +using timed = parameter::flag_param< detail::timed_tag >; + +/// @brief Trait: select a mutex that supports async acquisition (native_async_mutex protocol). +using async_capable = parameter::flag_param< detail::async_capable_tag >; + +/// @brief Trait: select a mutex that supports shared (reader-writer) locking. +using shared = parameter::flag_param< detail::shared_tag >; + +/// @brief Trait: select a ticket/fair mutex instead of a parking mutex. +using fair = parameter::flag_param< detail::fair_tag >; + +//---------------------------------------------------------------------------------------------------------------------- +// Internal extraction helpers + +namespace detail { + +using backoff_allowed_tags = std::tuple< exponential_backoff_tag >; + +using select_mutex_allowed_tags + = std::tuple< recursive_tag, timed_tag, async_capable_tag, shared_tag, fair_tag, exponential_backoff_tag >; + +template < typename... Policies > +inline constexpr bool has_backoff_v = parameter::has_parameter_v< exponential_backoff_tag, Policies... >; + +template < typename... Policies > +inline constexpr bool has_timed_v = parameter::has_parameter_v< timed_tag, Policies... >; + +template < typename... Policies > +inline constexpr bool has_recursive_v = parameter::has_parameter_v< recursive_tag, Policies... >; + +template < typename... Policies > +inline constexpr bool has_async_capable_v = parameter::has_parameter_v< async_capable_tag, Policies... >; + +template < typename... Policies > +inline constexpr bool has_shared_v = parameter::has_parameter_v< shared_tag, Policies... >; + +template < typename... Policies > +inline constexpr bool has_fair_v = parameter::has_parameter_v< fair_tag, Policies... >; + +} // namespace detail + +} // namespace nova::sync diff --git a/include/nova/sync/mutex/pthread_mutex.hpp b/include/nova/sync/mutex/pthread_mutex.hpp new file mode 100644 index 0000000..d1cbcf0 --- /dev/null +++ b/include/nova/sync/mutex/pthread_mutex.hpp @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#pragma once + +#if __has_include( ) && __has_include( ) +# include +# include +# if defined( _POSIX_THREADS ) && _POSIX_THREADS >= 0 +# define NOVA_SYNC_HAS_PTHREAD_MUTEX 1 +# endif +# if defined( _POSIX_THREAD_PRIO_PROTECT ) && _POSIX_THREAD_PRIO_PROTECT >= 0 +# define NOVA_SYNC_HAS_PTHREAD_RT_MUTEX 1 +# endif +#endif + +#ifdef NOVA_SYNC_HAS_PTHREAD_MUTEX + +# include +# include +# include + +# include +# include +# include +# include + +namespace nova::sync { + +/// @brief POSIX mutex with configurable policy. +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |-------------------------|---------------------------------------------------------------------| +/// | `priority_inherit` | PTHREAD_PRIO_INHERIT — owner is boosted to highest waiter priority. | +/// | `priority_ceiling` | PTHREAD_PRIO_PROTECT — all holders are elevated to ceiling N. | +/// | `pthread_recursive` | PTHREAD_MUTEX_RECURSIVE — allows re-entrant locking. | +/// | `pthread_errorcheck` | PTHREAD_MUTEX_ERRORCHECK — returns error on double-lock. | +/// | `pthread_adaptive` | PTHREAD_MUTEX_ADAPTIVE_NP — adaptive spin before blocking (Linux). | +/// +/// At most one of `priority_inherit` / `priority_ceiling` may be given (mutually exclusive). +/// At most one of `pthread_recursive` / `pthread_errorcheck` / `pthread_adaptive` may be given. +/// +/// ### Availability +/// +/// This header is available when the system provides POSIX threads (`_POSIX_THREADS`). +/// Priority protocol features (`priority_inherit`, `priority_ceiling`) additionally require +/// `_POSIX_THREAD_PRIO_PROTECT`. +// pthread-specific policy definitions (kept here because they're pthread-only) +namespace pthread_policy { +namespace tags { +struct priority_inherit_tag +{}; +struct priority_ceiling_tag +{}; +struct pthread_errorcheck_tag +{}; +struct pthread_adaptive_tag +{}; +} // namespace tags + +using priority_inherit = parameter::flag_param< tags::priority_inherit_tag >; +template < int Ceiling > +using priority_ceiling = parameter::integral_param< tags::priority_ceiling_tag, int, Ceiling >; +using pthread_errorcheck = parameter::flag_param< tags::pthread_errorcheck_tag >; +using pthread_adaptive = parameter::flag_param< tags::pthread_adaptive_tag >; + +using pthread_allowed_tags = std::tuple< tags::priority_inherit_tag, + tags::priority_ceiling_tag, + detail::recursive_tag, + tags::pthread_errorcheck_tag, + tags::pthread_adaptive_tag >; + +template < typename... Policies > +inline constexpr bool has_priority_inherit_v = parameter::has_parameter_v< tags::priority_inherit_tag, Policies... >; + +template < typename... Policies > +inline constexpr bool has_priority_ceiling_v = parameter::has_parameter_v< tags::priority_ceiling_tag, Policies... >; + +template < typename... Policies > +inline constexpr int extract_priority_ceiling_v + = parameter::extract_integral_v< tags::priority_ceiling_tag, int, 0, Policies... >; + +template < typename... Policies > +inline constexpr bool has_pthread_errorcheck_v = parameter::has_parameter_v< tags::pthread_errorcheck_tag, Policies... >; + +template < typename... Policies > +inline constexpr bool has_pthread_adaptive_v = parameter::has_parameter_v< tags::pthread_adaptive_tag, Policies... >; +} // namespace pthread_policy + +template < typename... Policies > + requires( parameter::valid_parameters< pthread_policy::pthread_allowed_tags, Policies... > + && !( pthread_policy::has_priority_inherit_v< Policies... > + && pthread_policy::has_priority_ceiling_v< Policies... > ) + && !( detail::has_recursive_v< Policies... > && pthread_policy::has_pthread_errorcheck_v< Policies... > ) + && !( detail::has_recursive_v< Policies... > && pthread_policy::has_pthread_adaptive_v< Policies... > ) + && !(pthread_policy::has_pthread_errorcheck_v< Policies... > + && pthread_policy::has_pthread_adaptive_v< Policies... >)) +class NOVA_SYNC_CAPABILITY( "mutex" ) NOVA_SYNC_REENTRANT_CAPABILITY pthread_mutex +{ + pthread_mutex_t mutex_ = PTHREAD_MUTEX_INITIALIZER; + + static constexpr bool use_inherit = pthread_policy::has_priority_inherit_v< Policies... >; + static constexpr bool use_ceiling = pthread_policy::has_priority_ceiling_v< Policies... >; + static constexpr int ceiling_val = pthread_policy::extract_priority_ceiling_v< Policies... >; + static constexpr bool use_rt = use_inherit || use_ceiling; + static constexpr int protocol = use_inherit ? PTHREAD_PRIO_INHERIT : PTHREAD_PRIO_PROTECT; + + static constexpr bool use_recursive = detail::has_recursive_v< Policies... >; + static constexpr bool use_errorcheck = pthread_policy::has_pthread_errorcheck_v< Policies... >; + static constexpr bool use_adaptive = pthread_policy::has_pthread_adaptive_v< Policies... >; + +public: + /// @brief Constructs the mutex (policy determined by template parameters). + explicit pthread_mutex() + { + initialize(); + } + + /// @brief Destroys the mutex. + ~pthread_mutex() + { + pthread_mutex_destroy( &mutex_ ); + } + + pthread_mutex( const pthread_mutex& ) = delete; + pthread_mutex& operator=( const pthread_mutex& ) = delete; + + /// @brief Acquires the lock, blocking until available. + void lock() NOVA_SYNC_ACQUIRE() + { + pthread_mutex_lock( &mutex_ ); + } + + /// @brief Attempts to acquire the lock without blocking. + /// @return `true` if acquired, `false` if already locked. + [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + return pthread_mutex_trylock( &mutex_ ) == 0; + } + + /// @brief Releases the lock. + void unlock() noexcept NOVA_SYNC_RELEASE() + { + pthread_mutex_unlock( &mutex_ ); + } + + /// @brief Attempts to acquire the lock for up to the given duration. + /// @return `true` if acquired, `false` if timeout or error. + template < class Rep, class Period > + bool try_lock_for( const std::chrono::duration< Rep, Period >& rel_time ) NOVA_SYNC_TRY_ACQUIRE( true ) + { + return try_lock_until( std::chrono::steady_clock::now() + rel_time ); + } + + /// @brief Attempts to acquire the lock until the given time point. + /// @return `true` if acquired, `false` if timeout or error. + template < class Clock, class Duration > + bool try_lock_until( const std::chrono::time_point< Clock, Duration >& abs_time ) NOVA_SYNC_TRY_ACQUIRE( true ) + { + if constexpr ( std::is_same_v< Clock, std::chrono::system_clock > ) { + auto ns = std::chrono::time_point_cast< std::chrono::nanoseconds >( abs_time ).time_since_epoch(); + auto secs = std::chrono::duration_cast< std::chrono::seconds >( ns ); + auto nsecs = ns - secs; + + struct timespec ts { + .tv_sec = time_t( secs.count() ), + .tv_nsec = long( nsecs.count() ), + }; + + return pthread_mutex_timedlock( &mutex_, &ts ) == 0; + } else { + auto remaining = abs_time - Clock::now(); + if ( remaining <= std::chrono::nanoseconds::zero() ) + return try_lock(); + auto sys_deadline = std::chrono::system_clock::now() + remaining; + return try_lock_until( sys_deadline ); + } + } + +private: + void initialize() + { + pthread_mutexattr_t attr; + int result = pthread_mutexattr_init( &attr ); + assert( result == 0 && "pthread_mutexattr_init failed" ); + + if constexpr ( use_recursive ) { + result = pthread_mutexattr_settype( &attr, PTHREAD_MUTEX_RECURSIVE ); + assert( result == 0 && "pthread_mutexattr_settype(RECURSIVE) failed" ); + } else if constexpr ( use_errorcheck ) { + result = pthread_mutexattr_settype( &attr, PTHREAD_MUTEX_ERRORCHECK ); + assert( result == 0 && "pthread_mutexattr_settype(ERRORCHECK) failed" ); + } +# ifdef __linux__ + else if constexpr ( use_adaptive ) { + result = pthread_mutexattr_settype( &attr, PTHREAD_MUTEX_ADAPTIVE_NP ); + assert( result == 0 && "pthread_mutexattr_settype(ADAPTIVE_NP) failed" ); + } +# endif + +# ifdef NOVA_SYNC_HAS_PTHREAD_RT_MUTEX + if constexpr ( use_rt ) { + result = pthread_mutexattr_setprotocol( &attr, protocol ); + assert( result == 0 && "pthread_mutexattr_setprotocol failed" ); + + if constexpr ( use_ceiling ) { + result = pthread_mutexattr_setprioceiling( &attr, ceiling_val ); + assert( result == 0 && "pthread_mutexattr_setprioceiling failed" ); + } + } +# endif + + result = pthread_mutex_init( &mutex_, &attr ); + assert( result == 0 && "pthread_mutex_init failed" ); + + pthread_mutexattr_destroy( &attr ); + } +}; + +//---------------------------------------------------------------------------------------------------------------------- +// Convenience aliases + +/// @brief Default pthread mutex. +using pthread_default_mutex = pthread_mutex<>; + +/// @brief Priority-inherit pthread mutex. +using pthread_priority_inherit_mutex = pthread_mutex< pthread_policy::priority_inherit >; + +/// @brief Priority-ceiling pthread mutex with compile-time ceiling value. +/// @tparam Ceiling The priority ceiling level. +template < int Ceiling > +using pthread_priority_ceiling_mutex = pthread_mutex< pthread_policy::priority_ceiling< Ceiling > >; + +/// @brief Recursive pthread mutex. +using pthread_recursive_mutex = pthread_mutex< recursive >; + +//---------------------------------------------------------------------------------------------------------------------- +// Concept specializations + +namespace concepts { + +template < typename... Policies > + requires( detail::has_recursive_v< Policies... > ) +struct concepts_is_recursive< nova::sync::pthread_mutex< Policies... > > : std::true_type +{}; + +} // namespace concepts + +} // namespace nova::sync + +#endif // NOVA_SYNC_HAS_PTHREAD_MUTEX diff --git a/include/nova/sync/mutex/pthread_rt_mutex.hpp b/include/nova/sync/mutex/pthread_rt_mutex.hpp deleted file mode 100644 index 6b2ca83..0000000 --- a/include/nova/sync/mutex/pthread_rt_mutex.hpp +++ /dev/null @@ -1,174 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#if __has_include( ) && __has_include( ) -# include -# include -# if defined( _POSIX_THREAD_PRIO_PROTECT ) && _POSIX_THREAD_PRIO_PROTECT >= 0 -# define NOVA_SYNC_HAS_PTHREAD_RT_MUTEX 1 -# endif -#endif - -#ifdef NOVA_SYNC_HAS_PTHREAD_RT_MUTEX - -# include -# include -# include - -# include -# include - -namespace nova::sync { - -/// @brief Priority protocol for pthread mutexes. -enum class pthread_mutex_policy : uint8_t -{ - /// @brief Priority ceiling (PTHREAD_PRIO_PROTECT). - /// Any thread acquiring the lock is temporarily elevated to the ceiling priority. - priority_ceiling = PTHREAD_PRIO_PROTECT, - - /// @brief Priority inheritance (PTHREAD_PRIO_INHERIT). - /// The lock owner is boosted to any waiting higher-priority thread's level. - priority_inherit = PTHREAD_PRIO_INHERIT, -}; - -/// @brief Priority ceiling value for pthread_priority_ceiling_mutex. -struct priority_ceiling -{ - int value; ///< The priority ceiling level. - - /// @brief Constructs a priority ceiling value. - explicit constexpr priority_ceiling( int v ) noexcept : - value( v ) - {} -}; - -/// @brief POSIX real-time mutex with priority ceiling or inheritance protocol. -/// -/// Requires SCHED_FIFO or SCHED_RR scheduling for effective priority protection. -/// -/// @tparam Policy `pthread_mutex_policy::priority_ceiling` or `priority_inherit`. -template < pthread_mutex_policy Policy > -class NOVA_SYNC_CAPABILITY( "mutex" ) pthread_rt_mutex -{ - pthread_mutex_t mutex_ = PTHREAD_MUTEX_INITIALIZER; - -public: - /// @brief Constructs a priority-protected mutex with the given ceiling. - /// @param ceiling Priority ceiling value. - /// @throws std::runtime_error if underlying POSIX calls fail. - explicit pthread_rt_mutex( priority_ceiling ceiling ) - requires( Policy == pthread_mutex_policy::priority_ceiling ) - { - initialize( ceiling ); - } - - /// @brief Constructs a priority-inherited mutex. - /// @throws std::runtime_error if underlying POSIX calls fail. - explicit pthread_rt_mutex() - requires( Policy == pthread_mutex_policy::priority_inherit ) - { - initialize(); - } - - /// @brief Destroys the mutex. - ~pthread_rt_mutex() - { - pthread_mutex_destroy( &mutex_ ); - } - - pthread_rt_mutex( const pthread_rt_mutex& ) = delete; - pthread_rt_mutex& operator=( const pthread_rt_mutex& ) = delete; - - /// @brief Acquires the lock, blocking until available. - void lock() NOVA_SYNC_ACQUIRE() - { - pthread_mutex_lock( &mutex_ ); - } - - /// @brief Attempts to acquire the lock without blocking. - /// @return `true` if acquired, `false` if already locked. - [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) - { - return pthread_mutex_trylock( &mutex_ ) == 0; - } - - /// @brief Releases the lock. - void unlock() noexcept NOVA_SYNC_RELEASE() - { - pthread_mutex_unlock( &mutex_ ); - } - - /// @brief Attempts to acquire the lock for up to the given duration. - /// @param rel_time Maximum duration to wait. - /// @return `true` if acquired, `false` if timeout or error. - template < class Rep, class Period > - bool try_lock_for( const std::chrono::duration< Rep, Period >& rel_time ) NOVA_SYNC_TRY_ACQUIRE( true ) - { - return try_lock_until( std::chrono::steady_clock::now() + rel_time ); - } - - /// @brief Attempts to acquire the lock until the given time point. - /// @param abs_time Absolute deadline. - /// @return `true` if acquired, `false` if timeout or error. - template < class Clock, class Duration > - bool try_lock_until( const std::chrono::time_point< Clock, Duration >& abs_time ) NOVA_SYNC_TRY_ACQUIRE( true ) - { - if constexpr ( std::is_same_v< Clock, std::chrono::system_clock > ) { - auto ns = std::chrono::time_point_cast< std::chrono::nanoseconds >( abs_time ).time_since_epoch(); - auto secs = std::chrono::duration_cast< std::chrono::seconds >( ns ); - auto nsecs = ns - secs; - - struct timespec ts { - .tv_sec = time_t( secs.count() ), - .tv_nsec = long( nsecs.count() ), - }; - - int ret = pthread_mutex_timedlock( &mutex_, &ts ); - return ret == 0; - } else { - auto remaining = abs_time - Clock::now(); - if ( remaining <= std::chrono::nanoseconds::zero() ) - return try_lock(); - auto sys_deadline = std::chrono::system_clock::now() + remaining; - return try_lock_until( sys_deadline ); - } - } - -private: - void initialize( std::optional< priority_ceiling > ceiling = std::nullopt ) - { - pthread_mutexattr_t attr; - if ( pthread_mutexattr_init( &attr ) != 0 ) - throw std::runtime_error( "pthread_mutexattr_init failed" ); - - if ( pthread_mutexattr_setprotocol( &attr, int( Policy ) ) != 0 ) { - pthread_mutexattr_destroy( &attr ); - throw std::runtime_error( "pthread_mutexattr_setprotocol failed" ); - } - - if constexpr ( Policy == pthread_mutex_policy::priority_ceiling ) { - if ( pthread_mutexattr_setprioceiling( &attr, ceiling->value ) != 0 ) { - pthread_mutexattr_destroy( &attr ); - throw std::runtime_error( "pthread_mutexattr_setprioceiling failed" ); - } - } - - if ( pthread_mutex_init( &mutex_, &attr ) != 0 ) { - pthread_mutexattr_destroy( &attr ); - throw std::runtime_error( "pthread_mutex_init failed" ); - } - - pthread_mutexattr_destroy( &attr ); - } -}; - -/// Convenience aliases -using pthread_priority_ceiling_mutex = pthread_rt_mutex< pthread_mutex_policy::priority_ceiling >; -using pthread_priority_inherit_mutex = pthread_rt_mutex< pthread_mutex_policy::priority_inherit >; - -} // namespace nova::sync - -#endif // NOVA_SYNC_HAS_PTHREAD_RT_MUTEX diff --git a/include/nova/sync/mutex/recursive_spinlock_mutex.hpp b/include/nova/sync/mutex/recursive_spinlock_mutex.hpp deleted file mode 100644 index 49415a1..0000000 --- a/include/nova/sync/mutex/recursive_spinlock_mutex.hpp +++ /dev/null @@ -1,84 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include -#include - -#include -#include - -namespace nova::sync { - -/// @brief Recursive spinlock-based mutex. -class NOVA_SYNC_CAPABILITY( "mutex" ) NOVA_SYNC_REENTRANT_CAPABILITY recursive_spinlock_mutex -{ - std::atomic< std::thread::id > owner_ { std::thread::id {} }; - std::size_t recursion_count_ { 0 }; - -public: - /// @brief Constructs an unowned recursive spinlock mutex. - recursive_spinlock_mutex() = default; - recursive_spinlock_mutex( const recursive_spinlock_mutex& ) = delete; - recursive_spinlock_mutex& operator=( const recursive_spinlock_mutex& ) = delete; - - /// @brief Acquires the lock, allowing recursion from the current owner. - void lock() noexcept NOVA_SYNC_ACQUIRE() - { - const std::thread::id tid = std::this_thread::get_id(); - std::thread::id expected {}; - - if ( owner_.compare_exchange_strong( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { - recursion_count_ = 1; - return; - } - - if ( expected == tid ) { - ++recursion_count_; - return; - } - - lock_slow( tid ); - } - - /// @brief Attempts to acquire the lock without blocking. - /// @return `true` if lock acquired or already owned by current thread, `false` otherwise. - [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) - { - const std::thread::id tid = std::this_thread::get_id(); - std::thread::id expected {}; - - if ( owner_.compare_exchange_strong( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { - recursion_count_ = 1; - return true; - } - - if ( expected == tid ) { - ++recursion_count_; - return true; - } - - return false; - } - - /// @brief Releases the lock (decrements recursion depth). - void unlock() noexcept NOVA_SYNC_RELEASE() - { - if ( --recursion_count_ == 0 ) - owner_.store( std::thread::id {}, std::memory_order_release ); - } - -private: - void lock_slow( std::thread::id tid ) noexcept; -}; - -namespace concepts { - -template <> -struct concepts_is_recursive< nova::sync::recursive_spinlock_mutex > : std::true_type -{}; - -} // namespace concepts - -} // namespace nova::sync diff --git a/include/nova/sync/mutex/shared_spinlock_mutex.hpp b/include/nova/sync/mutex/shared_spinlock_mutex.hpp deleted file mode 100644 index d1c2724..0000000 --- a/include/nova/sync/mutex/shared_spinlock_mutex.hpp +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include - -#include - -namespace nova::sync { - -/// @brief Spinlock-based shared mutex. -class NOVA_SYNC_CAPABILITY( "mutex" ) shared_spinlock_mutex -{ - // 32-bit State Bitmask Layout: - // Bit 31: Exclusive Write Lock Active - // Bit 30: Pending Write Request (Writer Fairness / Anti-Starvation) - // Bits 0-29: Active Reader Count (Up to 1.07 billion concurrent readers) - static constexpr uint32_t write_locked = 1U << 31; - static constexpr uint32_t write_pending = 1U << 30; - static constexpr uint32_t readers_mask = ~( write_locked | write_pending ); - - std::atomic< uint32_t > state_ { 0 }; - -public: - /// @brief Constructs an unlocked shared spinlock mutex. - shared_spinlock_mutex() = default; - shared_spinlock_mutex( const shared_spinlock_mutex& ) = delete; - shared_spinlock_mutex& operator=( const shared_spinlock_mutex& ) = delete; - - /// @brief Acquires the exclusive write lock. - void lock() noexcept NOVA_SYNC_ACQUIRE() - { - uint32_t expected = 0; - if ( state_.compare_exchange_strong( - expected, write_locked, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; - - lock_slow(); - } - - /// @brief Attempts to acquire the exclusive write lock without blocking. - /// @return `true` if lock acquired, `false` if already locked or readers present. - [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) - { - uint32_t expected = state_.load( std::memory_order_relaxed ); - - if ( ( expected & readers_mask ) == 0 && ( expected & write_locked ) == 0 ) { - uint32_t desired = ( expected & ~write_pending ) | write_locked; - return state_.compare_exchange_strong( expected, - desired, - std::memory_order_acquire, - std::memory_order_relaxed ); - } - return false; - } - - /// @brief Releases the exclusive write lock and wakes waiting readers. - void unlock() noexcept NOVA_SYNC_RELEASE() - { - state_.fetch_and( ~write_locked, std::memory_order_release ); - } - - /// @brief Acquires a shared read lock (allows concurrent readers). - void lock_shared() noexcept NOVA_SYNC_ACQUIRE_SHARED() - { - uint32_t expected = state_.load( std::memory_order_relaxed ); - - if ( ( expected & ( write_locked | write_pending ) ) == 0 ) { - if ( state_.compare_exchange_strong( - expected, expected + 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; - } - lock_shared_slow(); - } - - /// @brief Attempts to acquire a shared read lock without blocking. - /// @return `true` if lock acquired, `false` if writer or pending writer present. - [[nodiscard]] bool try_lock_shared() noexcept NOVA_SYNC_TRY_ACQUIRE_SHARED( true ) - { - uint32_t expected = state_.load( std::memory_order_relaxed ); - if ( ( expected & ( write_locked | write_pending ) ) == 0 ) { - return state_.compare_exchange_strong( expected, - expected + 1, - std::memory_order_acquire, - std::memory_order_relaxed ); - } - return false; - } - - /// @brief Releases a shared read lock. - void unlock_shared() noexcept NOVA_SYNC_RELEASE_SHARED() - { - state_.fetch_sub( 1, std::memory_order_release ); - } - -private: - void lock_slow() noexcept; - void lock_shared_slow() noexcept; -}; - -} // namespace nova::sync diff --git a/include/nova/sync/mutex/spinlock_mutex.hpp b/include/nova/sync/mutex/spinlock_mutex.hpp index 5c926a0..f720bdf 100644 --- a/include/nova/sync/mutex/spinlock_mutex.hpp +++ b/include/nova/sync/mutex/spinlock_mutex.hpp @@ -4,51 +4,332 @@ #pragma once #include +#include +#include +#include +#include #include namespace nova::sync { -/// @brief Spinlock-based mutex. -class NOVA_SYNC_CAPABILITY( "mutex" ) spinlock_mutex -{ - std::atomic< bool > locked_ { false }; +//---------------------------------------------------------------------------------------------------------------------- +// Concrete implementation classes (slow paths compiled into a .cpp TU) -public: - /// @brief Constructs an unlocked spinlock mutex. - spinlock_mutex() = default; +namespace impl { - // Non-copyable & non-movable - spinlock_mutex( const spinlock_mutex& ) = delete; - spinlock_mutex& operator=( const spinlock_mutex& ) = delete; +/// @brief Plain spinlock (CPU-pause hints only). +class NOVA_SYNC_CAPABILITY( "mutex" ) spinlock_plain +{ +public: + spinlock_plain() = default; + spinlock_plain( const spinlock_plain& ) = delete; + spinlock_plain& operator=( const spinlock_plain& ) = delete; - /// @brief Acquires the lock, spinning if necessary. - void lock() noexcept NOVA_SYNC_ACQUIRE() + inline void lock() noexcept NOVA_SYNC_ACQUIRE() { if ( !locked_.exchange( true, std::memory_order_acquire ) ) return; - lock_slow(); } - /// @brief Tries to acquire the lock without spinning. - /// @return true if the lock was successfully acquired, false otherwise. - [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + [[nodiscard]] inline bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) { if ( locked_.load( std::memory_order_relaxed ) ) return false; - return !locked_.exchange( true, std::memory_order_acquire ); } - /// @brief Releases the lock. - void unlock() noexcept NOVA_SYNC_RELEASE() + inline void unlock() noexcept NOVA_SYNC_RELEASE() { locked_.store( false, std::memory_order_release ); } +protected: + std::atomic< bool > locked_ { false }; + private: void lock_slow() noexcept; }; +/// @brief Spinlock with exponential backoff. +class NOVA_SYNC_CAPABILITY( "mutex" ) spinlock_backoff : protected spinlock_plain +{ +public: + spinlock_backoff() = default; + spinlock_backoff( const spinlock_backoff& ) = delete; + spinlock_backoff& operator=( const spinlock_backoff& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + if ( !locked_.exchange( true, std::memory_order_acquire ) ) + return; + lock_slow(); + } + + using spinlock_plain::try_lock; + using spinlock_plain::unlock; + +private: + void lock_slow() noexcept; +}; + +/// @brief Recursive spinlock (CPU-pause hints only). +class NOVA_SYNC_CAPABILITY( "mutex" ) NOVA_SYNC_REENTRANT_CAPABILITY recursive_spinlock_plain +{ +public: + recursive_spinlock_plain() = default; + recursive_spinlock_plain( const recursive_spinlock_plain& ) = delete; + recursive_spinlock_plain& operator=( const recursive_spinlock_plain& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + const std::thread::id tid = std::this_thread::get_id(); + std::thread::id expected {}; + + if ( owner_.compare_exchange_strong( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { + recursion_count_ = 1; + return; + } + if ( expected == tid ) { + ++recursion_count_; + return; + } + lock_slow( tid ); + } + + [[nodiscard]] inline bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + const std::thread::id tid = std::this_thread::get_id(); + std::thread::id expected {}; + + if ( owner_.compare_exchange_strong( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { + recursion_count_ = 1; + return true; + } + if ( expected == tid ) { + ++recursion_count_; + return true; + } + return false; + } + + inline void unlock() noexcept NOVA_SYNC_RELEASE() + { + if ( --recursion_count_ == 0 ) + owner_.store( std::thread::id {}, std::memory_order_release ); + } + +protected: + std::atomic< std::thread::id > owner_ { std::thread::id {} }; + std::size_t recursion_count_ { 0 }; + +private: + void lock_slow( std::thread::id tid ) noexcept; +}; + +/// @brief Recursive spinlock with exponential backoff. +class NOVA_SYNC_CAPABILITY( "mutex" ) NOVA_SYNC_REENTRANT_CAPABILITY recursive_spinlock_backoff : + protected recursive_spinlock_plain +{ +public: + recursive_spinlock_backoff() = default; + recursive_spinlock_backoff( const recursive_spinlock_backoff& ) = delete; + recursive_spinlock_backoff& operator=( const recursive_spinlock_backoff& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + const std::thread::id tid = std::this_thread::get_id(); + std::thread::id expected {}; + + if ( owner_.compare_exchange_strong( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { + recursion_count_ = 1; + return; + } + if ( expected == tid ) { + ++recursion_count_; + return; + } + lock_slow( tid ); + } + + using recursive_spinlock_plain::try_lock; + using recursive_spinlock_plain::unlock; + +private: + void lock_slow( std::thread::id tid ) noexcept; +}; + +/// @brief Shared (reader-writer) spinlock (CPU-pause hints only). +class NOVA_SYNC_CAPABILITY( "mutex" ) shared_spinlock_plain +{ +public: + shared_spinlock_plain() = default; + shared_spinlock_plain( const shared_spinlock_plain& ) = delete; + shared_spinlock_plain& operator=( const shared_spinlock_plain& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + uint32_t expected = 0; + if ( state_.compare_exchange_strong( + expected, write_locked, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + lock_slow(); + } + + [[nodiscard]] inline bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + uint32_t expected = state_.load( std::memory_order_relaxed ); + if ( ( expected & readers_mask ) == 0 && ( expected & write_locked ) == 0 ) { + uint32_t desired = ( expected & ~write_pending ) | write_locked; + return state_.compare_exchange_strong( expected, + desired, + std::memory_order_acquire, + std::memory_order_relaxed ); + } + return false; + } + + inline void unlock() noexcept NOVA_SYNC_RELEASE() + { + state_.fetch_and( ~write_locked, std::memory_order_release ); + } + + inline void lock_shared() noexcept NOVA_SYNC_ACQUIRE_SHARED() + { + uint32_t expected = state_.load( std::memory_order_relaxed ); + if ( ( expected & ( write_locked | write_pending ) ) == 0 ) { + if ( state_.compare_exchange_strong( + expected, expected + 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } + lock_shared_slow(); + } + + [[nodiscard]] inline bool try_lock_shared() noexcept NOVA_SYNC_TRY_ACQUIRE_SHARED( true ) + { + uint32_t expected = state_.load( std::memory_order_relaxed ); + if ( ( expected & ( write_locked | write_pending ) ) == 0 ) { + return state_.compare_exchange_strong( expected, + expected + 1, + std::memory_order_acquire, + std::memory_order_relaxed ); + } + return false; + } + + inline void unlock_shared() noexcept NOVA_SYNC_RELEASE_SHARED() + { + state_.fetch_sub( 1, std::memory_order_release ); + } + +protected: + static constexpr uint32_t write_locked = 1U << 31; + static constexpr uint32_t write_pending = 1U << 30; + static constexpr uint32_t readers_mask = ~( write_locked | write_pending ); + + std::atomic< uint32_t > state_ { 0 }; + +private: + void lock_slow() noexcept; + void lock_shared_slow() noexcept; +}; + +/// @brief Shared (reader-writer) spinlock with exponential backoff. +class NOVA_SYNC_CAPABILITY( "mutex" ) shared_spinlock_backoff : protected shared_spinlock_plain +{ +public: + shared_spinlock_backoff() = default; + shared_spinlock_backoff( const shared_spinlock_backoff& ) = delete; + shared_spinlock_backoff& operator=( const shared_spinlock_backoff& ) = delete; + + inline void lock() noexcept NOVA_SYNC_ACQUIRE() + { + uint32_t expected = 0; + if ( state_.compare_exchange_strong( + expected, write_locked, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + lock_slow(); + } + + inline void lock_shared() noexcept NOVA_SYNC_ACQUIRE_SHARED() + { + uint32_t expected = state_.load( std::memory_order_relaxed ); + if ( ( expected & ( write_locked | write_pending ) ) == 0 ) { + if ( state_.compare_exchange_strong( + expected, expected + 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } + lock_shared_slow(); + } + + using shared_spinlock_plain::try_lock; + using shared_spinlock_plain::try_lock_shared; + using shared_spinlock_plain::unlock; + using shared_spinlock_plain::unlock_shared; + +private: + void lock_slow() noexcept; + void lock_shared_slow() noexcept; +}; + +} // namespace impl + +//---------------------------------------------------------------------------------------------------------------------- + +namespace detail { + +using spinlock_allowed_tags = std::tuple< exponential_backoff_tag, recursive_tag, shared_tag >; + +} // namespace detail + +//---------------------------------------------------------------------------------------------------------------------- + +/// @brief Spinlock mutex with optional policies for backoff, recursive, or shared locking. +/// +/// `recursive` and `shared` are mutually exclusive. +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |----------------|---------------------------------------------------------------| +/// | `with_backoff` | Exponential backoff with CPU pause hints before re-testing. | +/// | `recursive` | Allow re-entrant locking from the owning thread. | +/// | `shared` | Enable shared (reader-writer) locking via lock_shared(). | +/// +/// Without any policy, spins using only CPU pause hints. +template < typename... Policies > + requires( parameter::valid_parameters< detail::spinlock_allowed_tags, Policies... > + && !(detail::has_recursive_v< Policies... > && detail::has_shared_v< Policies... >)) +class NOVA_SYNC_CAPABILITY( "mutex" ) spinlock_mutex : + public std::conditional_t< + detail::has_recursive_v< Policies... >, + std::conditional_t< detail::has_backoff_v< Policies... >, impl::recursive_spinlock_backoff, impl::recursive_spinlock_plain >, + std::conditional_t< + detail::has_shared_v< Policies... >, + std::conditional_t< detail::has_backoff_v< Policies... >, impl::shared_spinlock_backoff, impl::shared_spinlock_plain >, + std::conditional_t< detail::has_backoff_v< Policies... >, impl::spinlock_backoff, impl::spinlock_plain > > > +{}; + +/// @brief Recursive spinlock alias. Prefer `spinlock_mutex`. +template < typename... Policies > +using recursive_spinlock_mutex = spinlock_mutex< recursive, Policies... >; + +/// @brief Shared spinlock alias. Prefer `spinlock_mutex`. +template < typename... Policies > +using shared_spinlock_mutex = spinlock_mutex< shared, Policies... >; + + +//---------------------------------------------------------------------------------------------------------------------- +// concepts_is_recursive / concepts_is_shared specializations + +namespace concepts { + +template < typename... Policies > + requires( detail::has_recursive_v< Policies... > ) +struct concepts_is_recursive< nova::sync::spinlock_mutex< Policies... > > : std::true_type +{}; + +} // namespace concepts + } // namespace nova::sync diff --git a/include/nova/sync/mutex/fair_mutex.hpp b/include/nova/sync/mutex/ticket_mutex.hpp similarity index 51% rename from include/nova/sync/mutex/fair_mutex.hpp rename to include/nova/sync/mutex/ticket_mutex.hpp index b3a625c..adf75d3 100644 --- a/include/nova/sync/mutex/fair_mutex.hpp +++ b/include/nova/sync/mutex/ticket_mutex.hpp @@ -8,30 +8,45 @@ #include #include +#include #include namespace nova::sync { -/// @brief Fair mutex with FIFO lock acquisition order (ticket lock). +/// @brief Fair FIFO ticket mutex with optional exponential backoff. /// -/// @note `try_lock_for` / `try_lock_until` do **not** acquire a ticket and -/// therefore do not participate in the FIFO order. They retry -/// `try_lock()` (opportunistically grabbing the next ticket only when -/// the lock is free) until the deadline, sleeping between attempts via -/// the futex-based `atomic_wait_until`. This avoids ticket starvation -/// but means timed waiters are not strictly fair against blocked -/// `lock()` callers. -class NOVA_SYNC_CAPABILITY( "mutex" ) fair_mutex +/// Threads acquire tickets in order; each `unlock()` serves the next ticket, +/// guaranteeing strict FIFO ordering for `lock()` callers. +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |----------------|--------------------------------------------------------------------| +/// | `with_backoff` | Spin with exponential backoff before sleeping on the futex. | +/// +/// Without `with_backoff`, threads sleep on the futex immediately after taking a ticket. +/// +/// @note `try_lock_for` / `try_lock_until` do **not** acquire a ticket and therefore do +/// not participate in FIFO order. They opportunistically retry `try_lock()` until +/// the deadline, sleeping between attempts. Timed waiters are not strictly fair +/// against blocked `lock()` callers. +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +class NOVA_SYNC_CAPABILITY( "mutex" ) ticket_mutex { + alignas( detail::hardware_destructive_interference_size ) std::atomic< uint32_t > serving_ticket_ { 0 }; + alignas( detail::hardware_destructive_interference_size ) std::atomic< uint32_t > next_ticket_ { 0 }; + + static constexpr bool use_backoff = detail::has_backoff_v< Policies... >; + public: - /// @brief Constructs an unlocked fair mutex. - fair_mutex() = default; - ~fair_mutex() = default; - fair_mutex( const fair_mutex& ) = delete; - fair_mutex& operator=( const fair_mutex& ) = delete; - - /// @brief Acquires the lock in FIFO order. - inline void lock() noexcept NOVA_SYNC_ACQUIRE() + ticket_mutex() = default; + ~ticket_mutex() = default; + ticket_mutex( const ticket_mutex& ) = delete; + ticket_mutex& operator=( const ticket_mutex& ) = delete; + + /// @brief Acquires the lock in FIFO ticket order. + void lock() noexcept NOVA_SYNC_ACQUIRE() { uint32_t my_ticket = next_ticket_.fetch_add( 1, std::memory_order_relaxed ); @@ -42,34 +57,29 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fair_mutex } /// @brief Attempts to acquire the lock without waiting. - /// @return `true` if lock acquired, `false` if already locked. - [[nodiscard]] inline bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + /// @return `true` if acquired, `false` if already locked. + [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) { uint32_t current_serving = serving_ticket_.load( std::memory_order_acquire ); uint32_t expected = current_serving; - return next_ticket_.compare_exchange_strong( expected, current_serving + 1, std::memory_order_acquire, std::memory_order_relaxed ); } - /// @brief Releases the lock and serves the next waiting thread. - inline void unlock() noexcept NOVA_SYNC_RELEASE() + /// @brief Releases the lock and wakes the next waiting thread. + void unlock() noexcept NOVA_SYNC_RELEASE() { uint32_t next_serving = serving_ticket_.load( std::memory_order_relaxed ) + 1; - serving_ticket_.store( next_serving, std::memory_order_release ); - - // Always notify: both ticket-queue waiters (lock_slow) and timed - // waiters (try_lock_until) watch serving_ticket_. atomic_notify_all( serving_ticket_ ); } - /// @brief Tries to acquire the lock within a relative timeout. + /// @brief Attempts to acquire the lock within a relative timeout. /// /// Does not take a ticket; see class documentation for fairness notes. - /// @return `true` if lock acquired, `false` if timed out. + /// @return `true` if acquired, `false` if timed out. template < class Rep, class Period > [[nodiscard]] bool try_lock_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept NOVA_SYNC_TRY_ACQUIRE( true ) @@ -77,10 +87,10 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fair_mutex return try_lock_until( std::chrono::steady_clock::now() + rel_time ); } - /// @brief Tries to acquire the lock until an absolute deadline. + /// @brief Attempts to acquire the lock until an absolute deadline. /// /// Does not take a ticket; see class documentation for fairness notes. - /// @return `true` if lock acquired, `false` if timed out. + /// @return `true` if acquired, `false` if timed out. template < class Clock, class Duration > [[nodiscard]] bool try_lock_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept NOVA_SYNC_TRY_ACQUIRE( true ) @@ -91,20 +101,24 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fair_mutex uint32_t serving = serving_ticket_.load( std::memory_order_relaxed ); - // Return false if already past deadline if ( Clock::now() >= abs_time ) return false; - // Sleep until serving_ticket_ changes (= someone unlocked) or deadline atomic_wait_until( serving_ticket_, serving, abs_time ); } } private: - alignas( detail::hardware_destructive_interference_size ) std::atomic< uint32_t > serving_ticket_ { 0 }; - alignas( detail::hardware_destructive_interference_size ) std::atomic< uint32_t > next_ticket_ { 0 }; + NOVA_SYNC_NOINLINE void lock_slow( uint32_t my_ticket ) noexcept + { + if constexpr ( use_backoff ) + lock_slow_backoff( my_ticket ); + else + lock_slow_plain( my_ticket ); + } - void lock_slow( uint32_t my_ticket ) noexcept; + void lock_slow_plain( uint32_t my_ticket ) noexcept; + void lock_slow_backoff( uint32_t my_ticket ) noexcept; }; } // namespace nova::sync diff --git a/include/nova/sync/mutex/win32_critical_section_mutex.hpp b/include/nova/sync/mutex/win32_critical_section_mutex.hpp index fcac5f0..5b01bd3 100644 --- a/include/nova/sync/mutex/win32_critical_section_mutex.hpp +++ b/include/nova/sync/mutex/win32_critical_section_mutex.hpp @@ -10,44 +10,95 @@ #ifdef NOVA_SYNC_HAS_WIN32_CRITICAL_SECTION_MUTEX +# include +# include + # include +# include # include namespace nova::sync { /// @brief Recursive mutex implemented using Win32 CRITICAL_SECTION. /// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |----------------------|-----------------------------------------------------------| +/// | `win32_spin_count`| Spin count for InitializeCriticalSectionAndSpinCount. | +/// +// Win32-specific policy: spin count for InitializeCriticalSectionAndSpinCount +namespace win32_policy { +namespace tags { +struct win32_spin_count_tag +{}; +} // namespace tags + +template < unsigned Count > +using win32_spin_count = parameter::integral_param< tags::win32_spin_count_tag, unsigned, Count >; + +using win32_cs_allowed_tags = std::tuple< tags::win32_spin_count_tag >; + +template < typename... Policies > +inline constexpr unsigned extract_win32_spin_count_v + = parameter::extract_integral_v< tags::win32_spin_count_tag, unsigned, 4000u, Policies... >; +} // namespace win32_policy + +template < typename... Policies > + requires( parameter::valid_parameters< win32_policy::win32_cs_allowed_tags, Policies... > ) class NOVA_SYNC_CAPABILITY( "mutex" ) NOVA_SYNC_REENTRANT_CAPABILITY win32_critical_section_mutex { + static constexpr unsigned storage_size = 48; + static constexpr unsigned storage_align = 8; + static constexpr unsigned spin_count = win32_policy::extract_win32_spin_count_v< Policies... >; + + alignas( storage_align ) unsigned char storage_[ storage_size ] {}; + public: /// @brief Constructs an unlocked mutex. - win32_critical_section_mutex(); - - ~win32_critical_section_mutex(); + win32_critical_section_mutex() + { + static_assert( sizeof( CRITICAL_SECTION ) <= storage_size, "CRITICAL_SECTION is larger than allocated storage" ); + CRITICAL_SECTION* cs = reinterpret_cast< CRITICAL_SECTION* >( storage_ ); + BOOL result = ::InitializeCriticalSectionAndSpinCount( cs, spin_count ); + assert( result && "InitializeCriticalSectionAndSpinCount failed" ); + } + + ~win32_critical_section_mutex() + { + CRITICAL_SECTION* cs = reinterpret_cast< CRITICAL_SECTION* >( storage_ ); + ::DeleteCriticalSection( cs ); + } win32_critical_section_mutex( const win32_critical_section_mutex& ) = delete; win32_critical_section_mutex& operator=( const win32_critical_section_mutex& ) = delete; /// @brief Acquires the lock, blocking as necessary. Re-entrant. - void lock() noexcept NOVA_SYNC_ACQUIRE(); + void lock() noexcept NOVA_SYNC_ACQUIRE() + { + CRITICAL_SECTION* cs = reinterpret_cast< CRITICAL_SECTION* >( storage_ ); + ::EnterCriticalSection( cs ); + } /// @brief Attempts to acquire the lock without blocking. /// @return `true` if lock acquired, `false` if already locked by another thread. - [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ); + [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + CRITICAL_SECTION* cs = reinterpret_cast< CRITICAL_SECTION* >( storage_ ); + return ::TryEnterCriticalSection( cs ) != 0; + } /// @brief Releases one level of recursion. - void unlock() noexcept NOVA_SYNC_RELEASE(); - -private: - static constexpr unsigned storage_size = 48; - static constexpr unsigned storage_align = 8; - - alignas( storage_align ) unsigned char storage_[ storage_size ] {}; + void unlock() noexcept NOVA_SYNC_RELEASE() + { + CRITICAL_SECTION* cs = reinterpret_cast< CRITICAL_SECTION* >( storage_ ); + ::LeaveCriticalSection( cs ); + } }; namespace concepts { -template <> -struct concepts_is_recursive< nova::sync::win32_critical_section_mutex > : std::true_type +template < typename... Policies > +struct concepts_is_recursive< nova::sync::win32_critical_section_mutex< Policies... > > : std::true_type {}; } // namespace concepts diff --git a/include/nova/sync/semaphore/fast_semaphore.hpp b/include/nova/sync/semaphore/fast_semaphore.hpp deleted file mode 100644 index 3160899..0000000 --- a/include/nova/sync/semaphore/fast_semaphore.hpp +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include -#include - -namespace nova::sync { - -/// Lock-free counting semaphore using atomic wait/notify. -class fast_semaphore -{ -public: - explicit fast_semaphore( std::ptrdiff_t initial = 0 ) noexcept : - count_( int32_t( initial ) ) - { - assert( initial >= 0 ); - } - - ~fast_semaphore() = default; - fast_semaphore( const fast_semaphore& ) = delete; - fast_semaphore& operator=( const fast_semaphore& ) = delete; - - /// Adds @p n tokens and wakes up to @p n blocked waiters. - void release( std::ptrdiff_t n = 1 ) noexcept - { - assert( n >= 0 ); - auto prev = count_.fetch_add( int32_t( n ), std::memory_order_release ); - if ( prev < 0 ) { - auto to_wake = std::min( int32_t( n ), -prev ); - if ( to_wake == 1 ) - count_.notify_one(); - else - count_.notify_all(); - } - } - - /// Blocks until a token is available, then consumes one. - void acquire() noexcept - { - auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); - if ( prev > 0 ) - return; - - while ( true ) { - auto c = count_.load( std::memory_order_relaxed ); - if ( c >= 0 ) - return; - count_.wait( c, std::memory_order_acquire ); - } - } - - /// Consumes a token if available. Returns `true` on success, `false` if none available. - [[nodiscard]] bool try_acquire() noexcept - { - auto c = count_.load( std::memory_order_relaxed ); - while ( c > 0 ) { - if ( count_.compare_exchange_strong( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; - } - return false; - } - -private: - std::atomic< int32_t > count_; -}; - - -/// Lock-free counting semaphore using atomic wait/notify. Supports timed waits -class fast_timed_semaphore -{ -public: - explicit fast_timed_semaphore( std::ptrdiff_t initial = 0 ) noexcept : - count_( int32_t( initial ) ) - { - assert( initial >= 0 ); - } - - ~fast_timed_semaphore() = default; - fast_timed_semaphore( const fast_timed_semaphore& ) = delete; - fast_timed_semaphore& operator=( const fast_timed_semaphore& ) = delete; - - /// Adds @p n tokens and wakes up to @p n blocked waiters. - void release( std::ptrdiff_t n = 1 ) noexcept - { - assert( n >= 0 ); - auto prev = count_.fetch_add( int32_t( n ), std::memory_order_release ); - if ( prev < 0 ) { - auto to_wake = std::min( int32_t( n ), -prev ); - if ( to_wake == 1 ) - atomic_notify_one( count_ ); - else - atomic_notify_all( count_ ); - } - } - - /// Blocks until a token is available, then consumes one. - void acquire() noexcept - { - auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); - if ( prev > 0 ) - return; - - while ( true ) { - auto c = count_.load( std::memory_order_relaxed ); - if ( c >= 0 ) - return; - atomic_wait( count_, c, std::memory_order_acquire ); - } - } - - /// Consumes a token if available. Returns `true` on success, `false` if none available. - [[nodiscard]] bool try_acquire() noexcept - { - auto c = count_.load( std::memory_order_relaxed ); - while ( c > 0 ) { - if ( count_.compare_exchange_strong( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; - } - return false; - } - - template < class Clock, class Duration > - [[nodiscard]] bool try_acquire_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept - { - if ( try_acquire() ) - return true; - - auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); - if ( prev > 0 ) - return true; - - while ( true ) { - auto c = count_.load( std::memory_order_relaxed ); - if ( c >= 0 ) - return true; - - if ( !atomic_wait_until( count_, c, abs_time, std::memory_order_acquire ) ) { - // Timeout — try to undo our registration - auto restored = count_.fetch_add( 1, std::memory_order_relaxed ); - if ( restored >= 0 ) { - // A release happened concurrently and granted us a token. - // Our fetch_add(1) undid the registration, but the token was - // meant for us. Re-consume it to maintain the invariant. - count_.fetch_sub( 1, std::memory_order_relaxed ); - return true; - } - return false; - } - } - } - - template < class Rep, class Period > - [[nodiscard]] bool try_acquire_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept - { - return try_acquire_until( std::chrono::steady_clock::now() + rel_time ); - } - -private: - std::atomic< int32_t > count_; -}; - -} // namespace nova::sync diff --git a/include/nova/sync/semaphore/parking_semaphore.hpp b/include/nova/sync/semaphore/parking_semaphore.hpp new file mode 100644 index 0000000..9d7f30f --- /dev/null +++ b/include/nova/sync/semaphore/parking_semaphore.hpp @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace nova::sync { + +/// @brief Lock-free counting semaphore with optional exponential backoff. +/// +/// Uses futex-based atomic wait/notify for blocking. With `with_backoff`, the +/// `acquire()` slow path spins before sleeping. +/// +/// Policy parameters (from `nova/sync/mutex/policies.hpp`): +/// +/// | Policy | Effect | +/// |---------------|-------------------------------------------------------| +/// | (no exponential_backoff) | Park immediately when count is negative (default). | +/// | `with_backoff`| Spin with exponential backoff before parking. | +/// +/// ### Aliases +/// - `parking_semaphore<>` — pure park, no spinning. +/// - `parking_semaphore` — spin-then-park. +/// - `fast_semaphore` — deprecated alias for `parking_semaphore<>`. +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +class parking_semaphore +{ + std::atomic< int32_t > count_; + + static constexpr bool use_backoff = detail::has_backoff_v< Policies... >; + +public: + explicit parking_semaphore( std::ptrdiff_t initial = 0 ) noexcept : + count_( int32_t( initial ) ) + { + assert( initial >= 0 ); + } + + ~parking_semaphore() = default; + parking_semaphore( const parking_semaphore& ) = delete; + parking_semaphore& operator=( const parking_semaphore& ) = delete; + + /// @brief Adds @p n tokens and wakes up to @p n blocked waiters. + void release( std::ptrdiff_t n = 1 ) noexcept + { + assert( n >= 0 ); + auto prev = count_.fetch_add( int32_t( n ), std::memory_order_release ); + if ( prev < 0 ) { + auto to_wake = std::min( int32_t( n ), -prev ); + if ( to_wake == 1 ) + atomic_notify_one( count_ ); + else + atomic_notify_all( count_ ); + } + } + + /// @brief Blocks until a token is available, then consumes one. + void acquire() noexcept + { + if constexpr ( use_backoff ) { + detail::exponential_backoff backoff; + while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + auto c = count_.load( std::memory_order_relaxed ); + if ( c > 0 ) { + if ( count_.compare_exchange_weak( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } + backoff.run(); + } + } + + auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); + if ( prev > 0 ) + return; + + while ( true ) { + auto c = count_.load( std::memory_order_relaxed ); + if ( c >= 0 ) + return; + atomic_wait( count_, c, std::memory_order_acquire ); + } + } + + /// @brief Consumes a token if available. + /// @return `true` on success, `false` if none available. + [[nodiscard]] bool try_acquire() noexcept + { + auto c = count_.load( std::memory_order_relaxed ); + while ( c > 0 ) { + if ( count_.compare_exchange_strong( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return true; + } + return false; + } +}; + +/// @brief Timed lock-free counting semaphore with optional exponential backoff. +/// +/// Adds `try_acquire_for` / `try_acquire_until` to `parking_semaphore`. +/// +/// | Policy | Effect | +/// |---------------|-------------------------------------------------------| +/// | (absence of `with_backoff`) | Park immediately when count is negative (default). | +/// | `with_backoff`| Spin with exponential backoff before parking. | +/// +/// ### Aliases +/// - `timed_semaphore<>` — pure park, no spinning. +/// - `timed_semaphore` — spin-then-park. +/// - `fast_timed_semaphore` — deprecated alias for `timed_semaphore<>`. +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +class timed_semaphore +{ + std::atomic< int32_t > count_; + + static constexpr bool use_backoff = detail::has_backoff_v< Policies... >; + +public: + explicit timed_semaphore( std::ptrdiff_t initial = 0 ) noexcept : + count_( int32_t( initial ) ) + { + assert( initial >= 0 ); + } + + ~timed_semaphore() = default; + timed_semaphore( const timed_semaphore& ) = delete; + timed_semaphore& operator=( const timed_semaphore& ) = delete; + + /// @brief Adds @p n tokens and wakes up to @p n blocked waiters. + void release( std::ptrdiff_t n = 1 ) noexcept + { + assert( n >= 0 ); + auto prev = count_.fetch_add( int32_t( n ), std::memory_order_release ); + if ( prev < 0 ) { + auto to_wake = std::min( int32_t( n ), -prev ); + if ( to_wake == 1 ) + atomic_notify_one( count_ ); + else + atomic_notify_all( count_ ); + } + } + + /// @brief Blocks until a token is available, then consumes one. + void acquire() noexcept + { + if constexpr ( use_backoff ) { + detail::exponential_backoff backoff; + while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + auto c = count_.load( std::memory_order_relaxed ); + if ( c > 0 ) { + if ( count_.compare_exchange_weak( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } + backoff.run(); + } + } + + auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); + if ( prev > 0 ) + return; + + while ( true ) { + auto c = count_.load( std::memory_order_relaxed ); + if ( c >= 0 ) + return; + atomic_wait( count_, c, std::memory_order_acquire ); + } + } + + /// @brief Consumes a token if available. + /// @return `true` on success, `false` if none available. + [[nodiscard]] bool try_acquire() noexcept + { + auto c = count_.load( std::memory_order_relaxed ); + while ( c > 0 ) { + if ( count_.compare_exchange_strong( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return true; + } + return false; + } + + /// @brief Blocks until a token is available or the deadline is reached. + /// @return `true` if acquired, `false` if timed out. + template < class Clock, class Duration > + [[nodiscard]] bool try_acquire_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept + { + if ( try_acquire() ) + return true; + + auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); + if ( prev > 0 ) + return true; + + while ( true ) { + auto c = count_.load( std::memory_order_relaxed ); + if ( c >= 0 ) + return true; + + if ( !atomic_wait_until( count_, c, abs_time, std::memory_order_acquire ) ) { + auto restored = count_.fetch_add( 1, std::memory_order_relaxed ); + if ( restored >= 0 ) { + count_.fetch_sub( 1, std::memory_order_relaxed ); + return true; + } + return false; + } + } + } + + /// @brief Blocks until a token is available or the duration expires. + /// @return `true` if acquired, `false` if timed out. + template < class Rep, class Period > + [[nodiscard]] bool try_acquire_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept + { + return try_acquire_until( std::chrono::steady_clock::now() + rel_time ); + } +}; + +//---------------------------------------------------------------------------------------------------------------------- +// Convenience aliases + +/// @brief Deprecated alias for `parking_semaphore<>` (pure park, no backoff). +using fast_semaphore = parking_semaphore<>; + +/// @brief Deprecated alias for `timed_semaphore<>` (pure park, no backoff). +using fast_timed_semaphore = timed_semaphore<>; + +} // namespace nova::sync diff --git a/source/nova/sync/futex/atomic_wait.cpp b/source/nova/sync/futex/atomic_wait.cpp index bf2b06b..8513c16 100644 --- a/source/nova/sync/futex/atomic_wait.cpp +++ b/source/nova/sync/futex/atomic_wait.cpp @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann +#include #include #include @@ -8,6 +9,7 @@ #include #include #include +#include // ============================================================================= // Platform selection @@ -48,7 +50,8 @@ namespace nova::sync { namespace { -int futex_syscall( std::atomic< int32_t >* addr, int op, int32_t val, const struct timespec* timeout, int32_t val3 ) noexcept +inline int +futex_syscall( std::atomic< int32_t >* addr, int op, int32_t val, const struct timespec* timeout, int32_t val3 ) noexcept { return static_cast< int >( ::syscall( SYS_futex, reinterpret_cast< int32_t* >( addr ), op, val, timeout, nullptr, val3 ) ); @@ -80,6 +83,8 @@ struct timespec to_abs_timespec( std::chrono::nanoseconds ns_since_epoch ) noexc // Acquire fence must precede load for synchronization with notify's release fence. // See [atomics.fences] p4/p8: fence(acquire) A synchronizes-with fence(release) B // when load Y is sequenced after A and reads value stored before B. + + inline bool acquire_and_check( std::atomic< int32_t >& atom, int32_t old, std::memory_order order ) noexcept { if ( order != std::memory_order_relaxed ) @@ -89,24 +94,71 @@ inline bool acquire_and_check( std::atomic< int32_t >& atom, int32_t old, std::m } // namespace +// void atomic_wait( std::atomic< int32_t >& atom, int32_t old, std::memory_order order ) noexcept +// { +// { +// auto load_order = ( order != std::memory_order_relaxed ) ? std::memory_order_acquire : +// std::memory_order_relaxed; if ( atom.load( load_order ) != old ) +// return; +// } + +// while ( true ) { +// int rc = futex_syscall( &atom, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, old, nullptr, 0 ); + +// if ( rc == 0 || ( rc < 0 && errno == EAGAIN ) ) { +// if ( acquire_and_check( atom, old, order ) ) +// return; +// continue; +// } +// if ( rc < 0 && errno == EINTR ) +// continue; +// return; +// } +// } + void atomic_wait( std::atomic< int32_t >& atom, int32_t old, std::memory_order order ) noexcept { - { - auto load_order = ( order != std::memory_order_relaxed ) ? std::memory_order_acquire : std::memory_order_relaxed; + auto load_order = ( order != std::memory_order_relaxed ) ? std::memory_order_acquire : std::memory_order_relaxed; + + // Phase 1: CPU Yielding (Active Spinning with Exponential Backoff) + // Avoids the syscall completely if the atomic changes within microseconds. + int pause_count = 1; + constexpr int max_active_spins = 10; + + for ( int i = 0; i < max_active_spins; ++i ) { if ( atom.load( load_order ) != old ) return; + + for ( int j = 0; j < pause_count; ++j ) + detail::pause(); + + pause_count *= 2; // Exponential backoff } + // Phase 2: OS Yielding (Passive Spinning) + // The lock is held slightly longer than a few cycles. Surrender our + // timeslice so the thread holding the lock has CPU time to release it. + constexpr int max_yield_spins = 4; + for ( int i = 0; i < max_yield_spins; ++i ) { + if ( atom.load( load_order ) != old ) + return; + + std::this_thread::yield(); + } + + // Phase 3: Slow Path (OS-Level Blocking Wait) + // The condition is taking a long time; fall back to the kernel safely. while ( true ) { int rc = futex_syscall( &atom, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, old, nullptr, 0 ); if ( rc == 0 || ( rc < 0 && errno == EAGAIN ) ) { + // EAGAIN means the futex value changed just as we entered the kernel. if ( acquire_and_check( atom, old, order ) ) return; continue; } if ( rc < 0 && errno == EINTR ) - continue; + continue; // Interrupted by a signal, try again return; } } diff --git a/source/nova/sync/mutex/eventfd_mutex.cpp b/source/nova/sync/mutex/eventfd_mutex.cpp index e1c5832..577f64c 100644 --- a/source/nova/sync/mutex/eventfd_mutex.cpp +++ b/source/nova/sync/mutex/eventfd_mutex.cpp @@ -16,27 +16,27 @@ # include -namespace nova::sync { +namespace nova::sync::detail { using namespace std::chrono_literals; // --------------------------------------------------------------------------- -// eventfd_mutex — simple poll-based variant, no user-space waiter count +// eventfd_mutex_impl — simple poll-based variant, no user-space waiter count // --------------------------------------------------------------------------- -eventfd_mutex::eventfd_mutex() : +eventfd_mutex_impl::eventfd_mutex_impl() : evfd_ { ::eventfd( 1, EFD_NONBLOCK | EFD_SEMAPHORE ) } { assert( evfd_ >= 0 && "eventfd() failed" ); } -eventfd_mutex::~eventfd_mutex() +eventfd_mutex_impl::~eventfd_mutex_impl() { if ( evfd_ >= 0 ) ::close( evfd_ ); } -void eventfd_mutex::lock() noexcept +void eventfd_mutex_impl::lock() noexcept { while ( !try_lock() ) { struct pollfd pfd { @@ -48,39 +48,36 @@ void eventfd_mutex::lock() noexcept } } -bool eventfd_mutex::try_lock() noexcept +bool eventfd_mutex_impl::try_lock() noexcept { std::array< uint64_t, 1 > val {}; return detail::read_intr( evfd_, std::as_writable_bytes( std::span( val ) ) ) == ssize_t( sizeof( uint64_t ) ); } -void eventfd_mutex::unlock() noexcept +void eventfd_mutex_impl::unlock() noexcept { const std::array< uint64_t, 1 > one { 1 }; detail::write_intr( evfd_, std::as_bytes( std::span( one ) ) ); } // --------------------------------------------------------------------------- -// fast_eventfd_mutex — fast-path variant with user-space waiter count +// fast_eventfd_mutex_impl — fast-path variant with user-space waiter count // --------------------------------------------------------------------------- -fast_eventfd_mutex::fast_eventfd_mutex() +fast_eventfd_mutex_impl::fast_eventfd_mutex_impl() { evfd_ = ::eventfd( 0, EFD_NONBLOCK | EFD_SEMAPHORE ); assert( evfd_ >= 0 && "eventfd() failed" ); } -fast_eventfd_mutex::~fast_eventfd_mutex() +fast_eventfd_mutex_impl::~fast_eventfd_mutex_impl() { if ( evfd_ >= 0 ) ::close( evfd_ ); } -void fast_eventfd_mutex::unlock() noexcept +void fast_eventfd_mutex_impl::unlock() noexcept { - // Clear the lock bit and check if any waiters were registered. - // If prev > 1 (waiter count > 0), write a token to wake one waiter. - // If prev == 1 (no waiters), skip the kernel call — fast-path optimization. uint32_t prev = state_.fetch_and( ~1u, std::memory_order_release ); if ( prev > 1 ) { @@ -89,29 +86,13 @@ void fast_eventfd_mutex::unlock() noexcept } } -void fast_eventfd_mutex::consume_lock() const noexcept +void fast_eventfd_mutex_impl::consume_lock() const noexcept { std::array< uint64_t, 1 > val {}; detail::read_intr( evfd_, std::as_writable_bytes( std::span( val ) ) ); } -// Slow path for lock acquisition, entered after try_lock() fails. -// -// Phase 1 — Spin: Attempt CAS in a tight loop with exponential backoff. -// No waiter registration yet, so unlock() won't touch the kernel. -// -// Phase 2 — Register & wait: Once spinning is exhausted, register as a -// waiter via async_waiter_guard (add_async_waiter / fetch_add(2)) -// and enter the blocking loop: -// a) If the lock bit is clear, try an atomic CAS that simultaneously -// decrements the waiter count and sets the lock bit: -// desired = (s - 2) | 1. On success, call consume_lock() to -// drain any eventfd token unlock() may have written between our -// registration and this CAS. Then call guard.dismiss() to avoid -// double-decrement (the CAS already subtracted 2). -// b) Otherwise, block on poll() until the eventfd is readable, -// consume the token, then retry the CAS loop. -void fast_eventfd_mutex::lock_slow() noexcept +void fast_eventfd_mutex_impl::lock_slow() noexcept { detail::exponential_backoff backoff; @@ -127,23 +108,15 @@ void fast_eventfd_mutex::lock_slow() noexcept s = state_.load( std::memory_order_relaxed ); } - // Phase 2: register as a waiter; the guard will call remove_async_waiter() - // on any exit path where the CAS below doesn't acquire the lock. - s = add_async_waiter(); // returns state *after* the +2 increment - detail::async_waiter_guard< fast_eventfd_mutex > guard( *this, detail::adopt_async_waiter ); + s = add_async_waiter(); + detail::async_waiter_guard< fast_eventfd_mutex_impl > guard( *this, detail::adopt_async_waiter ); while ( true ) { if ( ( s & 1u ) == 0 ) { - // Atomically decrement waiter count and set lock bit in one CAS. uint32_t desired = ( s - 2u ) | 1u; if ( state_.compare_exchange_weak( s, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) { - // We claimed ownership via the CAS path while registered as a - // waiter. Between our poll() return (or initial registration) - // and this CAS, unlock() may have seen prev > 1 and written an - // eventfd token. Drain it so subsequent waiters don't see a - // spurious wakeup. consume_lock(); - guard.dismiss(); // waiter count already decremented in the CAS above + guard.dismiss(); return; } continue; @@ -161,30 +134,26 @@ void fast_eventfd_mutex::lock_slow() noexcept } } -bool fast_eventfd_mutex::try_lock_for_ns( duration_type rel ) noexcept +bool fast_eventfd_mutex_impl::try_lock_for_ns( duration_type rel ) noexcept { if ( rel <= 0ns ) return try_lock(); - auto s = add_async_waiter(); - detail::async_waiter_guard< fast_eventfd_mutex > guard( *this, detail::adopt_async_waiter ); + auto s = add_async_waiter(); + detail::async_waiter_guard< fast_eventfd_mutex_impl > guard( *this, detail::adopt_async_waiter ); while ( true ) { if ( ( s & 1u ) == 0 ) { uint32_t desired = ( s - 2u ) | 1u; if ( state_.compare_exchange_weak( s, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) { - // We claimed ownership via the CAS path while registered as an - // async waiter — consume any pending eventfd token to avoid - // leaving a stray notification for subsequent waiters. consume_lock(); - guard.dismiss(); // waiter count already decremented in the CAS above + guard.dismiss(); return true; } continue; } if ( !detail::ppoll_for( evfd_, rel ) ) { - // Timed out — guard destructor calls remove_async_waiter(). return false; } @@ -193,6 +162,6 @@ bool fast_eventfd_mutex::try_lock_for_ns( duration_type rel ) noexcept } } -} // namespace nova::sync +} // namespace nova::sync::detail #endif // NOVA_SYNC_HAS_EVENTFD_MUTEX diff --git a/source/nova/sync/mutex/kqueue_mutex.cpp b/source/nova/sync/mutex/kqueue_mutex.cpp index ebc332f..7db4a4a 100644 --- a/source/nova/sync/mutex/kqueue_mutex.cpp +++ b/source/nova/sync/mutex/kqueue_mutex.cpp @@ -17,17 +17,17 @@ # include # include -namespace nova::sync { +namespace nova::sync::detail { using namespace std::chrono_literals; static constexpr uintptr_t kqueue_ident = 1; // --------------------------------------------------------------------------- -// kqueue_mutex — simple kevent-based variant, no user-space waiter count +// kqueue_mutex_impl — simple kevent-based variant, no user-space waiter count // --------------------------------------------------------------------------- -kqueue_mutex::kqueue_mutex() : +kqueue_mutex_impl::kqueue_mutex_impl() : kqfd_ { ::kqueue(), } @@ -40,26 +40,26 @@ kqueue_mutex::kqueue_mutex() : assert( r == 0 && "kevent EV_ADD EVFILT_USER failed" ); } -kqueue_mutex::~kqueue_mutex() +kqueue_mutex_impl::~kqueue_mutex_impl() { if ( kqfd_ >= 0 ) ::close( kqfd_ ); } -void kqueue_mutex::lock() noexcept +void kqueue_mutex_impl::lock() noexcept { struct kevent out {}; ::kevent( kqfd_, nullptr, 0, &out, 1, nullptr ); } -bool kqueue_mutex::try_lock() noexcept +bool kqueue_mutex_impl::try_lock() noexcept { struct kevent out {}; struct timespec ts { 0, 0 }; return ::kevent( kqfd_, nullptr, 0, &out, 1, &ts ) > 0; } -void kqueue_mutex::unlock() noexcept +void kqueue_mutex_impl::unlock() noexcept { struct kevent kev {}; EV_SET( &kev, kqueue_ident, EVFILT_USER, 0, NOTE_TRIGGER, 0, nullptr ); @@ -67,12 +67,12 @@ void kqueue_mutex::unlock() noexcept } // --------------------------------------------------------------------------- -// fast_kqueue_mutex — fast-path variant with user-space waiter count +// fast_kqueue_mutex_impl — fast-path variant with user-space waiter count // --------------------------------------------------------------------------- static constexpr uintptr_t fast_kqueue_ident = 2; -fast_kqueue_mutex::fast_kqueue_mutex() : +fast_kqueue_mutex_impl::fast_kqueue_mutex_impl() : kqfd_ { ::kqueue(), } @@ -85,17 +85,14 @@ fast_kqueue_mutex::fast_kqueue_mutex() : assert( r == 0 && "kevent EV_ADD EVFILT_USER failed" ); } -fast_kqueue_mutex::~fast_kqueue_mutex() +fast_kqueue_mutex_impl::~fast_kqueue_mutex_impl() { if ( kqfd_ >= 0 ) ::close( kqfd_ ); } -void fast_kqueue_mutex::unlock() noexcept +void fast_kqueue_mutex_impl::unlock() noexcept { - // Clear the lock bit and check if any waiters were registered. - // If prev > 1 (waiter count > 0), post NOTE_TRIGGER to wake one waiter. - // If prev == 1 (no waiters), skip the kernel call — fast-path optimization. uint32_t prev = state_.fetch_and( ~1u, std::memory_order_release ); if ( prev > 1 ) { @@ -105,14 +102,14 @@ void fast_kqueue_mutex::unlock() noexcept } } -void fast_kqueue_mutex::consume_lock() const noexcept +void fast_kqueue_mutex_impl::consume_lock() const noexcept { struct kevent out {}; struct timespec ts { 0, 0 }; ::kevent( kqfd_, nullptr, 0, &out, 1, &ts ); } -void fast_kqueue_mutex::lock_slow() noexcept +void fast_kqueue_mutex_impl::lock_slow() noexcept { detail::exponential_backoff backoff; @@ -129,7 +126,7 @@ void fast_kqueue_mutex::lock_slow() noexcept } s = add_async_waiter(); - detail::async_waiter_guard< fast_kqueue_mutex > guard( *this, detail::adopt_async_waiter ); + detail::async_waiter_guard< fast_kqueue_mutex_impl > guard( *this, detail::adopt_async_waiter ); while ( true ) { if ( ( s & 1u ) == 0 ) { @@ -148,7 +145,7 @@ void fast_kqueue_mutex::lock_slow() noexcept } } -bool fast_kqueue_mutex::try_lock_for_impl( std::chrono::nanoseconds rel ) noexcept +bool fast_kqueue_mutex_impl::try_lock_for_impl( std::chrono::nanoseconds rel ) noexcept { if ( rel <= 0ns ) return try_lock(); @@ -157,8 +154,8 @@ bool fast_kqueue_mutex::try_lock_for_impl( std::chrono::nanoseconds rel ) noexce if ( state_.compare_exchange_weak( expected, 1u, std::memory_order_acquire, std::memory_order_relaxed ) ) return true; - auto s = add_async_waiter(); - detail::async_waiter_guard< fast_kqueue_mutex > guard( *this, detail::adopt_async_waiter ); + auto s = add_async_waiter(); + detail::async_waiter_guard< fast_kqueue_mutex_impl > guard( *this, detail::adopt_async_waiter ); while ( true ) { if ( ( s & 1u ) == 0 ) { @@ -172,15 +169,13 @@ bool fast_kqueue_mutex::try_lock_for_impl( std::chrono::nanoseconds rel ) noexce } if ( !detail::kevent_for( kqfd_, rel ) ) - // Timed out — guard destructor calls remove_async_waiter(). return false; - consume_lock(); s = state_.load( std::memory_order_acquire ); } } -} // namespace nova::sync +} // namespace nova::sync::detail #endif // NOVA_SYNC_HAS_KQUEUE_MUTEX diff --git a/source/nova/sync/mutex/parking_mutex.cpp b/source/nova/sync/mutex/parking_mutex.cpp new file mode 100644 index 0000000..3aa93f1 --- /dev/null +++ b/source/nova/sync/mutex/parking_mutex.cpp @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#include + +#include +#include +#include + +namespace nova::sync::impl { + +//---------------------------------------------------------------------------------------------------------------------- +// parking_mutex_plain — no backoff + +NOVA_SYNC_NOINLINE +void parking_mutex_plain::lock_slow( uint32_t expected ) noexcept +{ + // Spinning failed, now we need to sleep. + // To register ourselves as a waiter, we add 2, which increments the waiter count in the upper 31 bits. + state_.fetch_add( 2, std::memory_order_relaxed ); + expected = state_.load( std::memory_order_relaxed ); + + while ( true ) { + if ( ( expected & 1 ) == 0 ) { + // Lock is free. Acquire the lock AND unregister ourselves as a waiter. + uint32_t desired = ( expected - 2 ) | 1; + if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } else { +#ifdef __linux__ + // On Linux, use atomic_wait (futex-based) for better performance + atomic_wait( state_, expected, std::memory_order_relaxed ); +#else + // On other platforms, use std::atomic::wait + state_.wait( expected, std::memory_order_relaxed ); +#endif + expected = state_.load( std::memory_order_relaxed ); + } + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// parking_mutex_backoff — exponential backoff + +NOVA_SYNC_NOINLINE +void parking_mutex_backoff::lock_slow( uint32_t expected ) noexcept +{ + detail::exponential_backoff backoff; + + while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + if ( ( expected & 1 ) == 0 ) { + if ( state_.compare_exchange_weak( + expected, expected | 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + // CAS failed — expected was updated; re-evaluate immediately without backing off + continue; + } + + backoff.run(); + expected = state_.load( std::memory_order_relaxed ); + } + + // Spinning exhausted — register as waiter and park + state_.fetch_add( 2, std::memory_order_relaxed ); + expected = state_.load( std::memory_order_relaxed ); + + while ( true ) { + if ( ( expected & 1 ) == 0 ) { + uint32_t desired = ( expected - 2 ) | 1; + if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } else { +#ifdef __linux__ + // On Linux, use atomic_wait (futex-based) for better performance + atomic_wait( state_, expected, std::memory_order_relaxed ); +#else + // On other platforms, use std::atomic::wait + state_.wait( expected, std::memory_order_relaxed ); +#endif + expected = state_.load( std::memory_order_relaxed ); + } + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// parking_mutex_timed — no backoff, futex-based atomic_wait + +NOVA_SYNC_NOINLINE +void parking_mutex_timed::lock_slow( uint32_t expected ) noexcept +{ + state_.fetch_add( 2, std::memory_order_relaxed ); + expected = state_.load( std::memory_order_relaxed ); + + while ( true ) { + if ( ( expected & 1 ) == 0 ) { + uint32_t desired = ( expected - 2 ) | 1; + if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } else { + atomic_wait( state_, expected, std::memory_order_relaxed ); + expected = state_.load( std::memory_order_relaxed ); + } + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// parking_mutex_timed_backoff — exponential backoff, futex-based atomic_wait + +NOVA_SYNC_NOINLINE +void parking_mutex_timed_backoff::lock_slow( uint32_t expected ) noexcept +{ + detail::exponential_backoff backoff; + + while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + if ( ( expected & 1 ) == 0 ) { + if ( state_.compare_exchange_weak( + expected, expected | 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + // CAS failed — expected was updated; re-evaluate immediately without backing off + continue; + } + + backoff.run(); + expected = state_.load( std::memory_order_relaxed ); + } + + // Spinning exhausted — register as waiter and park + state_.fetch_add( 2, std::memory_order_relaxed ); + expected = state_.load( std::memory_order_relaxed ); + + while ( true ) { + if ( ( expected & 1 ) == 0 ) { + uint32_t desired = ( expected - 2 ) | 1; + if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } else { + atomic_wait( state_, expected, std::memory_order_relaxed ); + expected = state_.load( std::memory_order_relaxed ); + } + } +} + +} // namespace nova::sync::impl diff --git a/source/nova/sync/mutex/recursive_spinlock_mutex.cpp b/source/nova/sync/mutex/recursive_spinlock_mutex.cpp deleted file mode 100644 index 8622e07..0000000 --- a/source/nova/sync/mutex/recursive_spinlock_mutex.cpp +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#include - -#include -#include - -namespace nova::sync { - -NOVA_SYNC_NOINLINE -void nova::sync::recursive_spinlock_mutex::lock_slow( const std::thread::id tid ) noexcept -{ - detail::exponential_backoff backoff; - const std::thread::id empty_id {}; - - while ( true ) { - while ( owner_.load( std::memory_order_relaxed ) != empty_id ) - backoff.run(); - - std::thread::id expected = empty_id; - - if ( owner_.compare_exchange_weak( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { - recursion_count_ = 1; - return; - } - } -} - - -} // namespace nova::sync diff --git a/source/nova/sync/mutex/shared_spinlock_mutex.cpp b/source/nova/sync/mutex/shared_spinlock_mutex.cpp deleted file mode 100644 index d72906a..0000000 --- a/source/nova/sync/mutex/shared_spinlock_mutex.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#include - -#include -#include - -namespace nova::sync { - -NOVA_SYNC_NOINLINE -void shared_spinlock_mutex::lock_slow() noexcept -{ - detail::exponential_backoff backoff; - - while ( true ) { - uint32_t expected = state_.load( std::memory_order_relaxed ); - - // Spin while another writer holds the lock - while ( expected & write_locked ) { - backoff.run(); - expected = state_.load( std::memory_order_relaxed ); - } - - if ( ( expected & readers_mask ) > 0 ) { - // Broadcast to new readers that a writer is waiting. - if ( ( expected & write_pending ) == 0 ) { - if ( !state_.compare_exchange_weak( expected, expected | write_pending, std::memory_order_relaxed ) ) - continue; - - expected |= write_pending; - } - - // Spin until readers drain to 0 AND no other writer sneaks in. - while ( ( ( expected & readers_mask ) > 0 ) || ( expected & write_locked ) ) { - backoff.run(); - expected = state_.load( std::memory_order_relaxed ); - } - } - - uint32_t desired = ( expected & ~write_pending ) | write_locked; - if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; - } -} - -NOVA_SYNC_NOINLINE -void shared_spinlock_mutex::lock_shared_slow() noexcept -{ - detail::exponential_backoff backoff; - - while ( true ) { - uint32_t expected = state_.load( std::memory_order_relaxed ); - - // Wait while a writer is active OR waiting - while ( expected & ( write_locked | write_pending ) ) { - backoff.run(); - expected = state_.load( std::memory_order_relaxed ); - } - - // Try to increment the reader count - if ( state_.compare_exchange_weak( - expected, expected + 1, std::memory_order_acquire, std::memory_order_relaxed ) ) { - return; - } - } -} - - -} // namespace nova::sync diff --git a/source/nova/sync/mutex/spinlock_mutex.cpp b/source/nova/sync/mutex/spinlock_mutex.cpp index 3efa823..565b732 100644 --- a/source/nova/sync/mutex/spinlock_mutex.cpp +++ b/source/nova/sync/mutex/spinlock_mutex.cpp @@ -4,22 +4,164 @@ #include #include -#include +#include -namespace nova::sync { +namespace nova::sync::impl { -NOVA_SYNC_NOINLINE -void spinlock_mutex::lock_slow() noexcept +//---------------------------------------------------------------------------------------------------------------------- +// spinlock_plain + +void spinlock_plain::lock_slow() noexcept { - detail::exponential_backoff backoff; + while ( true ) { + while ( locked_.load( std::memory_order_relaxed ) ) + detail::pause(); + if ( !locked_.exchange( true, std::memory_order_acquire ) ) + return; + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// spinlock_backoff +void spinlock_backoff::lock_slow() noexcept +{ + detail::exponential_backoff backoff; while ( true ) { while ( locked_.load( std::memory_order_relaxed ) ) backoff.run(); - if ( !locked_.exchange( true, std::memory_order_acquire ) ) return; } } -} // namespace nova::sync +//---------------------------------------------------------------------------------------------------------------------- +// recursive_spinlock_plain + +void recursive_spinlock_plain::lock_slow( std::thread::id tid ) noexcept +{ + const std::thread::id empty_id {}; + while ( true ) { + while ( owner_.load( std::memory_order_relaxed ) != empty_id ) + detail::pause(); + + std::thread::id expected = empty_id; + if ( owner_.compare_exchange_weak( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { + recursion_count_ = 1; + return; + } + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// recursive_spinlock_backoff + +void recursive_spinlock_backoff::lock_slow( std::thread::id tid ) noexcept +{ + const std::thread::id empty_id {}; + detail::exponential_backoff backoff; + while ( true ) { + while ( owner_.load( std::memory_order_relaxed ) != empty_id ) + backoff.run(); + + std::thread::id expected = empty_id; + if ( owner_.compare_exchange_weak( expected, tid, std::memory_order_acquire, std::memory_order_relaxed ) ) { + recursion_count_ = 1; + return; + } + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// shared_spinlock_plain + +void shared_spinlock_plain::lock_slow() noexcept +{ + while ( true ) { + uint32_t expected = state_.load( std::memory_order_relaxed ); + + while ( expected & write_locked ) { + detail::pause(); + expected = state_.load( std::memory_order_relaxed ); + } + + if ( ( expected & readers_mask ) > 0 ) { + if ( ( expected & write_pending ) == 0 ) { + if ( !state_.compare_exchange_weak( expected, expected | write_pending, std::memory_order_relaxed ) ) + continue; + expected |= write_pending; + } + + while ( ( ( expected & readers_mask ) > 0 ) || ( expected & write_locked ) ) { + detail::pause(); + expected = state_.load( std::memory_order_relaxed ); + } + } + + uint32_t desired = ( expected & ~write_pending ) | write_locked; + if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } +} + +void shared_spinlock_plain::lock_shared_slow() noexcept +{ + while ( true ) { + uint32_t expected = state_.load( std::memory_order_relaxed ); + while ( expected & ( write_locked | write_pending ) ) { + detail::pause(); + expected = state_.load( std::memory_order_relaxed ); + } + if ( state_.compare_exchange_weak( expected, expected + 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } +} + +//---------------------------------------------------------------------------------------------------------------------- +// shared_spinlock_backoff + +void shared_spinlock_backoff::lock_slow() noexcept +{ + detail::exponential_backoff backoff; + while ( true ) { + uint32_t expected = state_.load( std::memory_order_relaxed ); + + while ( expected & write_locked ) { + backoff.run(); + expected = state_.load( std::memory_order_relaxed ); + } + + if ( ( expected & readers_mask ) > 0 ) { + if ( ( expected & write_pending ) == 0 ) { + if ( !state_.compare_exchange_weak( expected, expected | write_pending, std::memory_order_relaxed ) ) + continue; + expected |= write_pending; + } + + while ( ( ( expected & readers_mask ) > 0 ) || ( expected & write_locked ) ) { + backoff.run(); + expected = state_.load( std::memory_order_relaxed ); + } + } + + uint32_t desired = ( expected & ~write_pending ) | write_locked; + if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } +} + +void shared_spinlock_backoff::lock_shared_slow() noexcept +{ + detail::exponential_backoff backoff; + while ( true ) { + uint32_t expected = state_.load( std::memory_order_relaxed ); + while ( expected & ( write_locked | write_pending ) ) { + backoff.run(); + expected = state_.load( std::memory_order_relaxed ); + } + if ( state_.compare_exchange_weak( expected, expected + 1, std::memory_order_acquire, std::memory_order_relaxed ) ) + return; + } +} + +} // namespace nova::sync::impl diff --git a/source/nova/sync/mutex/ticket_mutex.cpp b/source/nova/sync/mutex/ticket_mutex.cpp new file mode 100644 index 0000000..892302f --- /dev/null +++ b/source/nova/sync/mutex/ticket_mutex.cpp @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#include + +#include +#include +#include + +namespace nova::sync { + +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +void ticket_mutex< Policies... >::lock_slow_plain( uint32_t my_ticket ) noexcept +{ + while ( true ) { + uint32_t current_serving = serving_ticket_.load( std::memory_order_acquire ); + if ( current_serving == my_ticket ) + return; + atomic_wait_for( serving_ticket_, current_serving, std::chrono::hours( 24 ) ); + } +} + +template < typename... Policies > + requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) +void ticket_mutex< Policies... >::lock_slow_backoff( uint32_t my_ticket ) noexcept +{ + detail::exponential_backoff backoff; + while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + if ( serving_ticket_.load( std::memory_order_acquire ) == my_ticket ) + return; + backoff.run(); + } + + while ( true ) { + uint32_t current_serving = serving_ticket_.load( std::memory_order_acquire ); + if ( current_serving == my_ticket ) + return; + atomic_wait_for( serving_ticket_, current_serving, std::chrono::hours( 24 ) ); + } +} + +// Explicit instantiations +template class ticket_mutex<>; +template class ticket_mutex< with_backoff >; + +} // namespace nova::sync diff --git a/test/event_benchmarks.cpp b/test/event_benchmarks.cpp index 6561065..8f4b787 100644 --- a/test/event_benchmarks.cpp +++ b/test/event_benchmarks.cpp @@ -3,10 +3,10 @@ #include -#include -#include #include #include +#include +#include #include #include diff --git a/test/event_test.cpp b/test/event_test.cpp index 6d1aa01..daccf73 100644 --- a/test/event_test.cpp +++ b/test/event_test.cpp @@ -3,11 +3,11 @@ #include -#include #include -#include #include #include +#include +#include #include #include diff --git a/test/mutex_benchmarks.cpp b/test/mutex_benchmarks.cpp index ffae0c9..6eb32d2 100644 --- a/test/mutex_benchmarks.cpp +++ b/test/mutex_benchmarks.cpp @@ -32,11 +32,7 @@ TEMPLATE_TEST_CASE( "mutex benchmarks", std::timed_mutex, std::recursive_mutex, std::recursive_timed_mutex, - nova::sync::fair_mutex, - nova::sync::fast_mutex, - nova::sync::spinlock_mutex, - nova::sync::recursive_spinlock_mutex, - nova::sync::shared_spinlock_mutex NOVA_SYNC_MUTEX_TEST_EXTRA_TYPES NOVA_SYNC_QT_MUTEX_TYPE ) + NOVA_SYNC_ALL_MUTEX_TYPES NOVA_SYNC_QT_MUTEX_TYPE ) { using mutex_t = TestType; diff --git a/test/mutex_test.cpp b/test/mutex_test.cpp index eb96a0d..9a4031f 100644 --- a/test/mutex_test.cpp +++ b/test/mutex_test.cpp @@ -350,9 +350,9 @@ TEMPLATE_TEST_CASE( "mutex: shared locking", "[mutex]", NOVA_SYNC_SHARED_MUTEX_T // Fair mutex — FIFO ordering test // --------------------------------------------------------------------------- -TEST_CASE( "mutex: fair_mutex FIFO ordering", "[mutex]" ) +TEST_CASE( "mutex: ticket_mutex FIFO ordering", "[mutex]" ) { - nova::sync::fair_mutex m; + nova::sync::ticket_mutex<> m; const unsigned threads = std::min( 8u, std::max( 2u, std::thread::hardware_concurrency() ) ); @@ -415,8 +415,8 @@ TEST_CASE( "mutex: fair_mutex FIFO ordering", "[mutex]" ) #ifdef NOVA_SYNC_TIMED_MUTEX_TYPES -static_assert( nova::sync::concepts::timed_mutex< nova::sync::fast_mutex > ); -static_assert( nova::sync::concepts::timed_mutex< nova::sync::fair_mutex > ); +static_assert( nova::sync::concepts::timed_mutex< nova::sync::parking_mutex< nova::sync::timed > > ); +static_assert( nova::sync::concepts::timed_mutex< nova::sync::ticket_mutex<> > ); TEMPLATE_TEST_CASE( "mutex: timed locking", "[mutex]", NOVA_SYNC_TIMED_MUTEX_TYPES ) { @@ -608,7 +608,7 @@ bool set_thread_sched( int policy, int priority ) TEMPLATE_TEST_CASE( "pthread_rt_mutex: prevents priority inversion", "[pthread_rt_mutex][.]", - nova::sync::pthread_priority_ceiling_mutex, + nova::sync::pthread_priority_ceiling_mutex< 30 >, nova::sync::pthread_priority_inherit_mutex, std::mutex ) { @@ -622,8 +622,8 @@ TEMPLATE_TEST_CASE( "pthread_rt_mutex: prevents priority inversion", REQUIRE( set_thread_sched( SCHED_FIFO, prio_Main ) ); auto m = [] { - if constexpr ( std::is_same_v< MutexType, nova::sync::pthread_priority_ceiling_mutex > ) { - return MutexType( nova::sync::priority_ceiling( prio_H ) ); + if constexpr ( std::is_same_v< MutexType, nova::sync::pthread_priority_ceiling_mutex< 30 > > ) { + return MutexType(); } else { return MutexType(); } @@ -884,31 +884,26 @@ TEMPLATE_TEST_CASE( "mutex: steady_clock try_lock_until", "[mutex]", nova::sync: using mutex_t = TestType; []() NOVA_SYNC_NO_THREAD_SAFETY_ANALYSIS { - try { - mutex_t mtx; + mutex_t mtx; - // Lock the mutex first - mtx.lock(); + // Lock the mutex first + mtx.lock(); - // Try to acquire with steady_clock timeout - auto deadline = std::chrono::steady_clock::now() + 10ms; - bool acquired = mtx.try_lock_until( deadline ); + // Try to acquire with steady_clock timeout + auto deadline = std::chrono::steady_clock::now() + 10ms; + bool acquired = mtx.try_lock_until( deadline ); - // Should timeout (mutex is locked) - REQUIRE( acquired == false ); + // Should timeout (mutex is locked) + REQUIRE( acquired == false ); - mtx.unlock(); + mtx.unlock(); - // Now should succeed - deadline = std::chrono::steady_clock::now() + 10ms; - acquired = mtx.try_lock_until( deadline ); - REQUIRE( acquired == true ); + // Now should succeed + deadline = std::chrono::steady_clock::now() + 10ms; + acquired = mtx.try_lock_until( deadline ); + REQUIRE( acquired == true ); - mtx.unlock(); - } catch ( const std::runtime_error& ) { - // pthread_rt_mutex might not be available on all systems - SKIP( "pthread_rt_mutex not available" ); - } + mtx.unlock(); }(); } diff --git a/test/mutex_thread_safety_test.cpp b/test/mutex_thread_safety_test.cpp index dcec548..895c96f 100644 --- a/test/mutex_thread_safety_test.cpp +++ b/test/mutex_thread_safety_test.cpp @@ -3,11 +3,9 @@ #include -#include -#include -#include -#include +#include #include +#include #include #include @@ -137,8 +135,8 @@ TEST_CASE( "thread-safety annotations: shared mutex GUARDED_BY", "[annotations][ { struct shared_counter { - mutable nova::sync::shared_spinlock_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::shared_spinlock_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void write( int v ) NOVA_SYNC_EXCLUDES( mtx ) { @@ -166,8 +164,8 @@ TEST_CASE( "thread-safety annotations: recursive mutex REENTRANT_CAPABILITY", "[ { struct recursive_counter { - mutable nova::sync::recursive_spinlock_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::recursive_spinlock_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void increment() #if defined( __clang__ ) && ( __clang_major__ >= 22 ) @@ -280,12 +278,12 @@ TEST_CASE( "tsa_mutex_adapter: try_lock_shared forwarded for std::shared_mutex", mtx.unlock_shared(); } -TEST_CASE( "tsa_mutex_adapter: wraps nova::sync::fast_mutex with GUARDED_BY", "[annotations][tsa_mutex_adapter]" ) +TEST_CASE( "tsa_mutex_adapter: wraps nova::sync::parking_mutex with GUARDED_BY", "[annotations][tsa_mutex_adapter]" ) { struct guarded { - mutable nova::sync::tsa_mutex_adapter< nova::sync::fast_mutex > mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::tsa_mutex_adapter< nova::sync::parking_mutex<> > mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void increment() NOVA_SYNC_EXCLUDES( mtx ) { @@ -316,18 +314,18 @@ TEST_CASE( "lock_guard: acquires and releases", "[annotations][lock_guard]" ) { struct guarded { - mutable nova::sync::spinlock_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::spinlock_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void increment() NOVA_SYNC_EXCLUDES( mtx ) { - nova::sync::lock_guard< nova::sync::spinlock_mutex > guard( mtx ); + nova::sync::lock_guard< nova::sync::spinlock_mutex<> > guard( mtx ); ++value; } int get() const NOVA_SYNC_EXCLUDES( mtx ) { - nova::sync::lock_guard< nova::sync::spinlock_mutex > guard( mtx ); + nova::sync::lock_guard< nova::sync::spinlock_mutex<> > guard( mtx ); return value; } }; @@ -340,10 +338,10 @@ TEST_CASE( "lock_guard: acquires and releases", "[annotations][lock_guard]" ) TEST_CASE( "lock_guard: adopt_lock does not re-lock", "[annotations][lock_guard]" ) { - nova::sync::spinlock_mutex mtx; + nova::sync::spinlock_mutex<> mtx; mtx.lock(); { - nova::sync::lock_guard< nova::sync::spinlock_mutex > guard( mtx, std::adopt_lock ); + nova::sync::lock_guard< nova::sync::spinlock_mutex<> > guard( mtx, std::adopt_lock ); } // mutex is now unlocked — we can lock again bool locked = mtx.try_lock(); @@ -369,7 +367,7 @@ TEST_CASE( "tsa_recursive_mutex_adapter: wraps recursive_spinlock_mutex", "[anno { struct guarded { - mutable nova::sync::tsa_recursive_mutex_adapter< nova::sync::recursive_spinlock_mutex > mtx; + mutable nova::sync::tsa_recursive_mutex_adapter< nova::sync::recursive_spinlock_mutex<> > mtx; int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void increment() NOVA_SYNC_EXCLUDES( mtx ) @@ -438,8 +436,8 @@ TEST_CASE( "thread-safety annotations: ACQUIRED_BEFORE lock ordering", "[annotat { struct double_guarded { - mutable nova::sync::spinlock_mutex mtx1 NOVA_SYNC_ACQUIRED_BEFORE( mtx2 ); - mutable nova::sync::spinlock_mutex mtx2; + mutable nova::sync::spinlock_mutex<> mtx1 NOVA_SYNC_ACQUIRED_BEFORE( mtx2 ); + mutable nova::sync::spinlock_mutex<> mtx2; int value1 NOVA_SYNC_GUARDED_BY( mtx1 ) { 1 }; int value2 NOVA_SYNC_GUARDED_BY( mtx2 ) { 2 }; @@ -463,8 +461,8 @@ TEST_CASE( "thread-safety annotations: ASSERT_CAPABILITY", "[annotations][thread { struct runtime_checked { - mutable nova::sync::spinlock_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 42 }; + mutable nova::sync::spinlock_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 42 }; void assert_is_locked() const NOVA_SYNC_ASSERT_CAPABILITY( mtx ) { @@ -490,8 +488,8 @@ TEST_CASE( "thread-safety annotations: ACQUIRED_AFTER lock ordering", "[annotati { struct double_guarded { - mutable nova::sync::spinlock_mutex mtx1; - mutable nova::sync::spinlock_mutex mtx2 NOVA_SYNC_ACQUIRED_AFTER( mtx1 ); + mutable nova::sync::spinlock_mutex<> mtx1; + mutable nova::sync::spinlock_mutex<> mtx2 NOVA_SYNC_ACQUIRED_AFTER( mtx1 ); int value1 NOVA_SYNC_GUARDED_BY( mtx1 ) { 1 }; int value2 NOVA_SYNC_GUARDED_BY( mtx2 ) { 2 }; @@ -515,8 +513,8 @@ TEST_CASE( "thread-safety annotations: shared capability with REQUIRES_SHARED", { struct shared_guarded { - mutable nova::sync::shared_spinlock_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 99 }; + mutable nova::sync::shared_spinlock_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 99 }; int read_shared() const NOVA_SYNC_REQUIRES_SHARED( mtx ) { @@ -562,18 +560,18 @@ TEST_CASE( "lock_guard: works with shared_spinlock_mutex", "[annotations][lock_g { struct guarded_shared { - mutable nova::sync::shared_spinlock_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::shared_spinlock_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void increment() NOVA_SYNC_EXCLUDES( mtx ) { - nova::sync::lock_guard< nova::sync::shared_spinlock_mutex > guard( mtx ); + nova::sync::lock_guard< nova::sync::shared_spinlock_mutex<> > guard( mtx ); ++value; } int get() const NOVA_SYNC_EXCLUDES( mtx ) { - nova::sync::lock_guard< nova::sync::shared_spinlock_mutex > guard( mtx ); + nova::sync::lock_guard< nova::sync::shared_spinlock_mutex<> > guard( mtx ); return value; } }; diff --git a/test/mutex_types.hpp b/test/mutex_types.hpp index e13ac18..25177c9 100644 --- a/test/mutex_types.hpp +++ b/test/mutex_types.hpp @@ -2,14 +2,12 @@ #include #include -#include -#include #include -#include +#include +#include #include -#include -#include #include +#include #include #include #include @@ -21,6 +19,16 @@ # define NOVA_SYNC_HAS_PTHREAD_SPINLOCK_arg #endif +#ifdef NOVA_SYNC_HAS_PTHREAD_MUTEX +# define NOVA_SYNC_PTHREAD_NONRECURSIVE_MUTEX_arg \ + , nova::sync::pthread_default_mutex, nova::sync::pthread_mutex< nova::sync::pthread_policy::pthread_adaptive >, \ + nova::sync::pthread_mutex< nova::sync::pthread_policy::pthread_errorcheck > +# define NOVA_SYNC_PTHREAD_RECURSIVE_MUTEX_arg , nova::sync::pthread_mutex< nova::sync::recursive > +#else +# define NOVA_SYNC_PTHREAD_NONRECURSIVE_MUTEX_arg +# define NOVA_SYNC_PTHREAD_RECURSIVE_MUTEX_arg +#endif + #ifdef NOVA_SYNC_HAS_PTHREAD_RT_MUTEX # define NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg , nova::sync::pthread_priority_inherit_mutex #else @@ -28,7 +36,7 @@ #endif #ifdef _WIN32 -# define NOVA_SYNC_WIN32_CRITICAL_SECTION_MUTEX_arg , nova::sync::win32_critical_section_mutex +# define NOVA_SYNC_WIN32_CRITICAL_SECTION_MUTEX_arg , nova::sync::win32_critical_section_mutex<> # define NOVA_SYNC_WIN32_EVENT_MUTEX_arg , nova::sync::win32_event_mutex # define NOVA_SYNC_WIN32_MUTEX_arg , nova::sync::win32_mutex # define NOVA_SYNC_WIN32_SRW_MUTEX_arg , nova::sync::win32_srw_mutex @@ -40,14 +48,6 @@ # define NOVA_SYNC_WIN32_SRW_MUTEX_arg #endif -#ifdef NOVA_SYNC_HAS_WIN32_MUTEX -#else -#endif - -#ifdef NOVA_SYNC_HAS_WIN32_SRW_MUTEX -#else -#endif - #ifdef NOVA_SYNC_HAS_APPLE_OS_UNFAIR_MUTEX # define NOVA_SYNC_APPLE_OS_UNFAIR_MUTEX_arg , nova::sync::apple_os_unfair_mutex #else @@ -55,132 +55,101 @@ #endif #ifdef NOVA_SYNC_HAS_KQUEUE_MUTEX -# define NOVA_SYNC_KQUEUE_MUTEX_arg , nova::sync::kqueue_mutex -# define NOVA_SYNC_FAST_KQUEUE_MUTEX_arg , nova::sync::fast_kqueue_mutex -# define NOVA_SYNC_ASYNC_MUTEX_TYPES nova::sync::kqueue_mutex, nova::sync::fast_kqueue_mutex +# define NOVA_SYNC_KQUEUE_MUTEX_arg , nova::sync::kqueue_mutex<> +# define NOVA_SYNC_FAST_KQUEUE_MUTEX_arg , nova::sync::kqueue_mutex< nova::sync::with_backoff > +# define NOVA_SYNC_ASYNC_MUTEX_TYPES nova::sync::kqueue_mutex<>, nova::sync::kqueue_mutex< nova::sync::with_backoff > #else # define NOVA_SYNC_KQUEUE_MUTEX_arg # define NOVA_SYNC_FAST_KQUEUE_MUTEX_arg #endif #ifdef NOVA_SYNC_HAS_EVENTFD_MUTEX -# define NOVA_SYNC_EVENTFD_MUTEX_arg , nova::sync::eventfd_mutex -# define NOVA_SYNC_FAST_EVENTFD_MUTEX_arg , nova::sync::fast_eventfd_mutex -# define NOVA_SYNC_ASYNC_MUTEX_TYPES nova::sync::eventfd_mutex, nova::sync::fast_eventfd_mutex +# define NOVA_SYNC_EVENTFD_MUTEX_arg , nova::sync::eventfd_mutex<> +# define NOVA_SYNC_FAST_EVENTFD_MUTEX_arg , nova::sync::eventfd_mutex< nova::sync::with_backoff > +# define NOVA_SYNC_ASYNC_MUTEX_TYPES nova::sync::eventfd_mutex<>, nova::sync::eventfd_mutex< nova::sync::with_backoff > #else # define NOVA_SYNC_EVENTFD_MUTEX_arg # define NOVA_SYNC_FAST_EVENTFD_MUTEX_arg #endif // clang-format off -#define NOVA_SYNC_MUTEX_TEST_EXTRA_TYPES \ -NOVA_SYNC_HAS_PTHREAD_SPINLOCK_arg \ - NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg \ - NOVA_SYNC_WIN32_CRITICAL_SECTION_MUTEX_arg \ - NOVA_SYNC_WIN32_MUTEX_arg \ - NOVA_SYNC_WIN32_EVENT_MUTEX_arg \ - NOVA_SYNC_WIN32_SRW_MUTEX_arg \ - NOVA_SYNC_APPLE_OS_UNFAIR_MUTEX_arg \ - NOVA_SYNC_KQUEUE_MUTEX_arg \ - NOVA_SYNC_FAST_KQUEUE_MUTEX_arg \ - NOVA_SYNC_EVENTFD_MUTEX_arg \ + +#define NOVA_SYNC_MUTEX_TEST_EXTRA_TYPES \ + NOVA_SYNC_HAS_PTHREAD_SPINLOCK_arg \ + NOVA_SYNC_PTHREAD_RECURSIVE_MUTEX_arg \ + NOVA_SYNC_PTHREAD_NONRECURSIVE_MUTEX_arg \ + NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg \ + NOVA_SYNC_WIN32_CRITICAL_SECTION_MUTEX_arg \ + NOVA_SYNC_WIN32_MUTEX_arg \ + NOVA_SYNC_WIN32_EVENT_MUTEX_arg \ + NOVA_SYNC_WIN32_SRW_MUTEX_arg \ + NOVA_SYNC_APPLE_OS_UNFAIR_MUTEX_arg \ + NOVA_SYNC_KQUEUE_MUTEX_arg \ + NOVA_SYNC_FAST_KQUEUE_MUTEX_arg \ + NOVA_SYNC_EVENTFD_MUTEX_arg \ NOVA_SYNC_FAST_EVENTFD_MUTEX_arg -// clang-format on /// @brief All mutex types available on this platform. /// First type has no leading comma; the rest come via _arg macros that supply their own. /// Use after explicit types in TEMPLATE_TEST_CASE or as the sole type-list. + // clang-format off -#define NOVA_SYNC_ALL_MUTEX_TYPES \ - nova::sync::fast_mutex, \ - nova::sync::fair_mutex, \ - nova::sync::spinlock_mutex, \ - nova::sync::recursive_spinlock_mutex, \ - nova::sync::shared_spinlock_mutex \ + +using parking_timed_mutex_with_backoff = nova::sync::parking_mutex< nova::sync::timed, nova::sync::with_backoff >; +using spinlock_mutex_with_backoff = nova::sync::spinlock_mutex< nova::sync::with_backoff >; +using spinlock_mutex_recursive = nova::sync::spinlock_mutex< nova::sync::recursive >; +using spinlock_mutex_recursive_with_backoff = nova::sync::spinlock_mutex< nova::sync::recursive, nova::sync::with_backoff >; +using spinlock_mutex_shared = nova::sync::spinlock_mutex< nova::sync::shared >; +using spinlock_mutex_shared_with_backoff = nova::sync::spinlock_mutex< nova::sync::shared, nova::sync::with_backoff >; + +#define NOVA_SYNC_ALL_MUTEX_TYPES \ + nova::sync::parking_mutex<>, \ + nova::sync::parking_mutex, \ + nova::sync::parking_mutex, \ + parking_timed_mutex_with_backoff, \ + nova::sync::ticket_mutex<>, \ + nova::sync::ticket_mutex, \ + nova::sync::spinlock_mutex<>, \ + spinlock_mutex_with_backoff, \ + spinlock_mutex_recursive, \ + spinlock_mutex_recursive_with_backoff, \ + spinlock_mutex_shared, \ + spinlock_mutex_shared_with_backoff \ NOVA_SYNC_MUTEX_TEST_EXTRA_TYPES -// clang-format on -/// @brief All non-recursive mutex types available on this platform. -/// These satisfy `concepts::mutex` but NOT `concepts::recursive_mutex`. -// clang-format off -#define NOVA_SYNC_NON_RECURSIVE_MUTEX_TYPES \ - nova::sync::fast_mutex, \ - nova::sync::fair_mutex, \ - nova::sync::spinlock_mutex, \ - nova::sync::shared_spinlock_mutex \ - NOVA_SYNC_HAS_PTHREAD_SPINLOCK_arg \ - NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg \ - NOVA_SYNC_WIN32_EVENT_MUTEX_arg \ - NOVA_SYNC_WIN32_MUTEX_arg \ - NOVA_SYNC_WIN32_SRW_MUTEX_arg \ - NOVA_SYNC_APPLE_OS_UNFAIR_MUTEX_arg \ - NOVA_SYNC_KQUEUE_MUTEX_arg \ - NOVA_SYNC_FAST_KQUEUE_MUTEX_arg \ - NOVA_SYNC_EVENTFD_MUTEX_arg \ - NOVA_SYNC_FAST_EVENTFD_MUTEX_arg -// clang-format on +#define NOVA_SYNC_NON_RECURSIVE_MUTEX_TYPES \ + nova::sync::parking_mutex<>, \ + nova::sync::parking_mutex, \ + nova::sync::ticket_mutex<>, \ + nova::sync::ticket_mutex, \ + nova::sync::spinlock_mutex<>, \ + spinlock_mutex_shared \ + NOVA_SYNC_MUTEX_TEST_EXTRA_TYPES -/// @brief All recursive mutex types available on this platform. -/// These satisfy `concepts::recursive_mutex`. -// clang-format off -#define NOVA_SYNC_RECURSIVE_MUTEX_TYPES \ - nova::sync::recursive_spinlock_mutex \ +#define NOVA_SYNC_RECURSIVE_MUTEX_TYPES \ + spinlock_mutex_recursive \ + NOVA_SYNC_PTHREAD_RECURSIVE_MUTEX_arg \ NOVA_SYNC_WIN32_CRITICAL_SECTION_MUTEX_arg -// clang-format on -/// @brief All timed mutex types available on this platform. -/// These satisfy `concepts::timed_mutex` (have try_lock_for / try_lock_until). -/// The first type in each platform branch has no leading comma so this macro -/// can be used as the sole type list in TEMPLATE_TEST_CASE. -// clang-format off -#ifdef NOVA_SYNC_HAS_KQUEUE_MUTEX -# define NOVA_SYNC_TIMED_MUTEX_TYPES \ - nova::sync::fast_mutex, \ - nova::sync::fair_mutex, \ - nova::sync::kqueue_mutex, \ - nova::sync::fast_kqueue_mutex \ - NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg -#elif defined( NOVA_SYNC_HAS_EVENTFD_MUTEX ) -# define NOVA_SYNC_TIMED_MUTEX_TYPES \ - nova::sync::fast_mutex, \ - nova::sync::fair_mutex, \ - nova::sync::eventfd_mutex, \ - nova::sync::fast_eventfd_mutex \ - NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg -#elif defined( _WIN32 ) -# define NOVA_SYNC_TIMED_MUTEX_TYPES \ - nova::sync::fast_mutex, \ - nova::sync::fair_mutex, \ - nova::sync::win32_event_mutex, \ - nova::sync::win32_mutex -#elif defined( NOVA_SYNC_HAS_PTHREAD_RT_MUTEX ) -# define NOVA_SYNC_TIMED_MUTEX_TYPES \ - nova::sync::fast_mutex, \ - nova::sync::fair_mutex, \ - nova::sync::pthread_priority_inherit_mutex -#else -# define NOVA_SYNC_TIMED_MUTEX_TYPES \ - nova::sync::fast_mutex, \ - nova::sync::fair_mutex -#endif -// clang-format on +#define NOVA_SYNC_TIMED_MUTEX_TYPES \ + nova::sync::parking_mutex, \ + nova::sync::ticket_mutex<>, \ + nova::sync::ticket_mutex \ + NOVA_SYNC_WIN32_EVENT_MUTEX_arg \ + NOVA_SYNC_KQUEUE_MUTEX_arg \ + NOVA_SYNC_FAST_KQUEUE_MUTEX_arg \ + NOVA_SYNC_EVENTFD_MUTEX_arg \ + NOVA_SYNC_FAST_EVENTFD_MUTEX_arg -/// @brief All shared mutex types available on this platform. -/// These satisfy `concepts::shared_mutex` (have lock_shared / unlock_shared / try_lock_shared). -// clang-format off -#define NOVA_SYNC_SHARED_MUTEX_TYPES \ - nova::sync::shared_spinlock_mutex -// clang-format on +#define NOVA_SYNC_SHARED_MUTEX_TYPES \ + spinlock_mutex_shared, \ + spinlock_mutex_shared_with_backoff -/// @brief Macro listing only the async-capable mutex types available on this platform. -/// -/// Used in async-specific TEMPLATE_TEST_CASEs to run tests for all mutex types -/// that support async acquisition via the event-loop integration layer. -// clang-format off #define NOVA_SYNC_ASYNC_MUTEX_TEST_TYPES \ NOVA_SYNC_WIN32_EVENT_MUTEX_arg \ NOVA_SYNC_KQUEUE_MUTEX_arg \ NOVA_SYNC_FAST_KQUEUE_MUTEX_arg \ NOVA_SYNC_EVENTFD_MUTEX_arg \ NOVA_SYNC_FAST_EVENTFD_MUTEX_arg + // clang-format on diff --git a/test/negative/acquired_after_wrong_order.cpp b/test/negative/acquired_after_wrong_order.cpp index 45b1ebe..5257d78 100644 --- a/test/negative/acquired_after_wrong_order.cpp +++ b/test/negative/acquired_after_wrong_order.cpp @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann -#include +#include #include struct container { - mutable nova::sync::fast_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::parking_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; // Function marked as requiring lock, but called without lock void modify() NOVA_SYNC_REQUIRES( mtx ) diff --git a/test/negative/double_lock_non_reentrant.cpp b/test/negative/double_lock_non_reentrant.cpp index 319d55b..8d0f727 100644 --- a/test/negative/double_lock_non_reentrant.cpp +++ b/test/negative/double_lock_non_reentrant.cpp @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann -#include +#include #include struct container { - mutable nova::sync::fast_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::parking_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void nested_lock() NOVA_SYNC_EXCLUDES( mtx ) { diff --git a/test/negative/excludes_while_holding.cpp b/test/negative/excludes_while_holding.cpp index 99233df..2967907 100644 --- a/test/negative/excludes_while_holding.cpp +++ b/test/negative/excludes_while_holding.cpp @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann -#include +#include #include struct counter { - mutable nova::sync::fast_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::parking_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void increment() NOVA_SYNC_EXCLUDES( mtx ) { diff --git a/test/negative/guarded_by_without_lock.cpp b/test/negative/guarded_by_without_lock.cpp index b6efdab..1e6db55 100644 --- a/test/negative/guarded_by_without_lock.cpp +++ b/test/negative/guarded_by_without_lock.cpp @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann -#include +#include #include struct counter { - mutable nova::sync::fast_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::parking_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; }; int main() diff --git a/test/negative/pt_guarded_by_without_lock.cpp b/test/negative/pt_guarded_by_without_lock.cpp index 1d88fd2..41dda67 100644 --- a/test/negative/pt_guarded_by_without_lock.cpp +++ b/test/negative/pt_guarded_by_without_lock.cpp @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann -#include +#include #include struct container { - mutable nova::sync::fast_mutex mtx; - int* ptr NOVA_SYNC_PT_GUARDED_BY( mtx ) { nullptr }; + mutable nova::sync::parking_mutex<> mtx; + int* ptr NOVA_SYNC_PT_GUARDED_BY( mtx ) { nullptr }; }; int main() diff --git a/test/negative/release_without_lock.cpp b/test/negative/release_without_lock.cpp index 19e4f7b..1980f48 100644 --- a/test/negative/release_without_lock.cpp +++ b/test/negative/release_without_lock.cpp @@ -1,12 +1,12 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann -#include +#include #include struct unlocked_caller { - mutable nova::sync::fast_mutex mtx; + mutable nova::sync::parking_mutex<> mtx; void unlock_without_lock() NOVA_SYNC_REQUIRES( mtx ) { diff --git a/test/negative/requires_without_lock.cpp b/test/negative/requires_without_lock.cpp index 91b04ad..06361f5 100644 --- a/test/negative/requires_without_lock.cpp +++ b/test/negative/requires_without_lock.cpp @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann -#include +#include #include struct counter { - mutable nova::sync::fast_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::parking_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void increment() NOVA_SYNC_REQUIRES( mtx ) { diff --git a/test/negative/shared_lock_write_guarded.cpp b/test/negative/shared_lock_write_guarded.cpp index 02a2f78..8fe729f 100644 --- a/test/negative/shared_lock_write_guarded.cpp +++ b/test/negative/shared_lock_write_guarded.cpp @@ -6,8 +6,8 @@ struct shared_guarded { - mutable nova::sync::shared_spinlock_mutex mtx; - int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; + mutable nova::sync::shared_spinlock_mutex<> mtx; + int value NOVA_SYNC_GUARDED_BY( mtx ) { 0 }; void read_only() const NOVA_SYNC_REQUIRES_SHARED( mtx ) { diff --git a/test/policy_mutex_test.cpp b/test/policy_mutex_test.cpp new file mode 100644 index 0000000..c20bf5e --- /dev/null +++ b/test/policy_mutex_test.cpp @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +/// @file policy_mutex_test.cpp +/// @brief Tests for policy-based mutex types and select_mutex. + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +//---------------------------------------------------------------------------------------------------------------------- +// Concept checks: parking_mutex + +static_assert( nova::sync::concepts::timed_mutex< nova::sync::parking_mutex< nova::sync::timed > > ); +static_assert( + nova::sync::concepts::timed_mutex< nova::sync::parking_mutex< nova::sync::timed, nova::sync::with_backoff > > ); + +//---------------------------------------------------------------------------------------------------------------------- +// Concept checks: ticket_mutex + +static_assert( nova::sync::concepts::timed_mutex< nova::sync::ticket_mutex<> > ); +static_assert( nova::sync::concepts::timed_mutex< nova::sync::ticket_mutex< nova::sync::with_backoff > > ); + +//---------------------------------------------------------------------------------------------------------------------- +// Concept checks: spinlock variants + +static_assert( nova::sync::concepts::mutex< nova::sync::spinlock_mutex<> > ); +static_assert( nova::sync::concepts::mutex< nova::sync::spinlock_mutex< nova::sync::with_backoff > > ); + +static_assert( nova::sync::concepts::recursive_mutex< nova::sync::recursive_spinlock_mutex<> > ); +static_assert( nova::sync::concepts::recursive_mutex< nova::sync::recursive_spinlock_mutex< nova::sync::with_backoff > > ); + +static_assert( nova::sync::concepts::shared_mutex< nova::sync::shared_spinlock_mutex<> > ); + +//---------------------------------------------------------------------------------------------------------------------- +// Concept checks: semaphores + +static_assert( nova::sync::concepts::counting_semaphore< nova::sync::parking_semaphore<> > ); +static_assert( nova::sync::concepts::counting_semaphore< nova::sync::parking_semaphore< nova::sync::with_backoff > > ); +static_assert( nova::sync::concepts::timed_counting_semaphore< nova::sync::timed_semaphore<> > ); +static_assert( nova::sync::concepts::timed_counting_semaphore< nova::sync::timed_semaphore< nova::sync::with_backoff > > ); + +// Aliases +static_assert( std::is_same_v< nova::sync::fast_semaphore, nova::sync::parking_semaphore<> > ); +static_assert( std::is_same_v< nova::sync::fast_timed_semaphore, nova::sync::timed_semaphore<> > ); + +//---------------------------------------------------------------------------------------------------------------------- +// Concept checks: events + +static_assert( nova::sync::concepts::auto_reset_event< nova::sync::parking_auto_reset_event<> > ); +static_assert( + nova::sync::concepts::auto_reset_event< nova::sync::parking_auto_reset_event< nova::sync::with_backoff > > ); +static_assert( nova::sync::concepts::auto_reset_event< nova::sync::auto_reset_event > ); + +static_assert( nova::sync::concepts::manual_reset_event< nova::sync::parking_manual_reset_event<> > ); +static_assert( + nova::sync::concepts::manual_reset_event< nova::sync::parking_manual_reset_event< nova::sync::with_backoff > > ); +static_assert( nova::sync::concepts::manual_reset_event< nova::sync::manual_reset_event > ); diff --git a/test/semaphore_test.cpp b/test/semaphore_test.cpp index 58346fe..5529fbc 100644 --- a/test/semaphore_test.cpp +++ b/test/semaphore_test.cpp @@ -4,7 +4,7 @@ #include #include -#include +#include #include "semaphore_types.hpp" diff --git a/test/semaphore_types.hpp b/test/semaphore_types.hpp index e69ef50..3c06b1d 100644 --- a/test/semaphore_types.hpp +++ b/test/semaphore_types.hpp @@ -7,10 +7,10 @@ #include #include -#include #include #include #include +#include #include #include From c8c2d65adb66ec7e9ab400f0823f62b7b8ccd0d9 Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Sun, 3 May 2026 17:18:41 +0800 Subject: [PATCH 3/6] add pthread_rwlock_mutex --- CMakeLists.txt | 1 + include/nova/sync/mutex/concepts.hpp | 1 - include/nova/sync/mutex/pthread_mutex.hpp | 26 ++- .../nova/sync/mutex/pthread_rwlock_mutex.hpp | 172 ++++++++++++++++++ test/mutex_types.hpp | 30 ++- 5 files changed, 216 insertions(+), 14 deletions(-) create mode 100644 include/nova/sync/mutex/pthread_rwlock_mutex.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 82fc215..696f718 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,6 +115,7 @@ set(headers include/nova/sync/thread_safety/locked_object.hpp include/nova/sync/mutex/native_async_mutex.hpp include/nova/sync/mutex/pthread_mutex.hpp + include/nova/sync/mutex/pthread_rwlock_mutex.hpp include/nova/sync/mutex/pthread_spinlock_mutex.hpp diff --git a/include/nova/sync/mutex/concepts.hpp b/include/nova/sync/mutex/concepts.hpp index 13a2120..a4c2bd9 100644 --- a/include/nova/sync/mutex/concepts.hpp +++ b/include/nova/sync/mutex/concepts.hpp @@ -72,7 +72,6 @@ inline constexpr bool concepts_is_recursive_v = concepts_is_recursive< T >::valu // recursive_mutex concept evaluates the trait template < typename M > concept recursive_mutex = mutex< M > && concepts_is_recursive_v< M >; - template <> struct concepts_is_recursive< std::recursive_mutex > : std::true_type {}; diff --git a/include/nova/sync/mutex/pthread_mutex.hpp b/include/nova/sync/mutex/pthread_mutex.hpp index d1cbcf0..5d55fa7 100644 --- a/include/nova/sync/mutex/pthread_mutex.hpp +++ b/include/nova/sync/mutex/pthread_mutex.hpp @@ -6,7 +6,7 @@ #if __has_include( ) && __has_include( ) # include # include -# if defined( _POSIX_THREADS ) && _POSIX_THREADS >= 0 +# if defined( _POSIX_THREADS ) && _POSIX_THREADS >= 0 && defined( _POSIX_TIMEOUTS ) && ( _POSIX_TIMEOUTS >= 0 ) # define NOVA_SYNC_HAS_PTHREAD_MUTEX 1 # endif # if defined( _POSIX_THREAD_PRIO_PROTECT ) && _POSIX_THREAD_PRIO_PROTECT >= 0 @@ -169,13 +169,27 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) NOVA_SYNC_REENTRANT_CAPABILITY pthread_mut .tv_nsec = long( nsecs.count() ), }; +# if defined( __linux__ ) + return pthread_mutex_clocklock( &mutex_, CLOCK_REALTIME, &ts ) == 0; +# else return pthread_mutex_timedlock( &mutex_, &ts ) == 0; +# endif +# if defined( __linux__ ) + } else if constexpr ( std::is_same_v< Clock, std::chrono::steady_clock > ) { + auto ns = std::chrono::time_point_cast< std::chrono::nanoseconds >( abs_time ).time_since_epoch(); + auto secs = std::chrono::duration_cast< std::chrono::seconds >( ns ); + auto nsecs = ns - secs; + + struct timespec ts { + .tv_sec = time_t( secs.count() ), + .tv_nsec = long( nsecs.count() ), + }; + + return pthread_mutex_clocklock( &mutex_, CLOCK_MONOTONIC, &ts ) == 0; +# endif } else { - auto remaining = abs_time - Clock::now(); - if ( remaining <= std::chrono::nanoseconds::zero() ) - return try_lock(); - auto sys_deadline = std::chrono::system_clock::now() + remaining; - return try_lock_until( sys_deadline ); + auto rel_time = abs_time - Clock::now(); + return try_lock_for( rel_time ); } } diff --git a/include/nova/sync/mutex/pthread_rwlock_mutex.hpp b/include/nova/sync/mutex/pthread_rwlock_mutex.hpp new file mode 100644 index 0000000..c840edf --- /dev/null +++ b/include/nova/sync/mutex/pthread_rwlock_mutex.hpp @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2026 Tim Blechmann + +#pragma once + +#if __has_include( ) && __has_include( ) +# include +# include +# if defined( _POSIX_THREADS ) && _POSIX_THREADS >= 0 && defined( _POSIX_READER_WRITER_LOCKS ) \ + && _POSIX_READER_WRITER_LOCKS >= 0 && defined( _POSIX_TIMEOUTS ) && _POSIX_TIMEOUTS >= 0 +# define NOVA_SYNC_HAS_PTHREAD_RWLOCK 1 +# endif +#endif + +#ifdef NOVA_SYNC_HAS_PTHREAD_RWLOCK + +# include +# include +# include + +# include +# include +# include + +namespace nova::sync { + +/// @brief POSIX read-write mutex wrapper around pthread_rwlock_t. +/// +/// Availability: only when the platform provides POSIX reader-writer locks. +class NOVA_SYNC_CAPABILITY( "shared_mutex" ) pthread_rwlock_mutex +{ + pthread_rwlock_t rw_ = PTHREAD_RWLOCK_INITIALIZER; + +public: + pthread_rwlock_mutex() + { + [[maybe_unused]] int r = pthread_rwlock_init( &rw_, nullptr ); + assert( r == 0 && "pthread_rwlock_init failed" ); + } + + ~pthread_rwlock_mutex() + { + pthread_rwlock_destroy( &rw_ ); + } + + pthread_rwlock_mutex( const pthread_rwlock_mutex& ) = delete; + pthread_rwlock_mutex& operator=( const pthread_rwlock_mutex& ) = delete; + + // Exclusive (write) lock + void lock() NOVA_SYNC_ACQUIRE() + { + pthread_rwlock_wrlock( &rw_ ); + } + + [[nodiscard]] bool try_lock() noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + return pthread_rwlock_trywrlock( &rw_ ) == 0; + } + + void unlock() noexcept NOVA_SYNC_RELEASE() + { + pthread_rwlock_unlock( &rw_ ); + } + + // Shared (read) lock + void lock_shared() NOVA_SYNC_ACQUIRE_SHARED() + { + pthread_rwlock_rdlock( &rw_ ); + } + + [[nodiscard]] bool try_lock_shared() noexcept NOVA_SYNC_TRY_ACQUIRE_SHARED( true ) + { + return pthread_rwlock_tryrdlock( &rw_ ) == 0; + } + + // Timed shared tries + template < class Rep, class Period > + bool try_lock_shared_for( const std::chrono::duration< Rep, Period >& rel_time ) noexcept + NOVA_SYNC_TRY_ACQUIRE_SHARED( true ) + { + return try_lock_shared_until( std::chrono::steady_clock::now() + rel_time ); + } + + template < class Clock, class Duration > + bool try_lock_shared_until( const std::chrono::time_point< Clock, Duration >& abs_time ) noexcept + NOVA_SYNC_TRY_ACQUIRE_SHARED( true ) + { + if constexpr ( std::is_same_v< Clock, std::chrono::system_clock > ) { + auto ns = std::chrono::time_point_cast< std::chrono::nanoseconds >( abs_time ).time_since_epoch(); + auto secs = std::chrono::duration_cast< std::chrono::seconds >( ns ); + auto nsecs = ns - secs; + struct timespec ts { + .tv_sec = time_t( secs.count() ), + .tv_nsec = long( nsecs.count() ), + }; +# if defined( NOVA_SYNC_USE_PTHREAD_RWLOCK_CLOCK ) + return pthread_rwlock_clockrdlock( &rw_, CLOCK_REALTIME, &ts ) == 0; +# else + return pthread_rwlock_timedrdlock( &rw_, &ts ) == 0; +# endif + } else if constexpr ( std::is_same_v< Clock, std::chrono::steady_clock > ) { +# ifdef __linux__ + // steady_clock -> CLOCK_MONOTONIC + auto ns = std::chrono::time_point_cast< std::chrono::nanoseconds >( abs_time ).time_since_epoch(); + auto secs = std::chrono::duration_cast< std::chrono::seconds >( ns ); + auto nsecs = ns - secs; + struct timespec ts { + .tv_sec = time_t( secs.count() ), + .tv_nsec = long( nsecs.count() ), + }; + return pthread_rwlock_clockrdlock( &rw_, CLOCK_MONOTONIC, &ts ) == 0; +# endif + } else { + auto rel_time = abs_time - Clock::now(); + return try_lock_shared_for( rel_time ); + } + } + + void unlock_shared() noexcept NOVA_SYNC_RELEASE_SHARED() + { + pthread_rwlock_unlock( &rw_ ); + } + + // Timed exclusive tries: rely on POSIX timeouts being available + template < class Rep, class Period > + bool try_lock_for( const std::chrono::duration< Rep, Period >& rel_time ) noexcept NOVA_SYNC_TRY_ACQUIRE( true ) + { + return try_lock_until( std::chrono::steady_clock::now() + rel_time ); + } + + template < class Clock, class Duration > + bool try_lock_until( const std::chrono::time_point< Clock, Duration >& abs_time ) noexcept + NOVA_SYNC_TRY_ACQUIRE( true ) + { + if constexpr ( std::is_same_v< Clock, std::chrono::system_clock > ) { + auto ns = std::chrono::time_point_cast< std::chrono::nanoseconds >( abs_time ).time_since_epoch(); + auto secs = std::chrono::duration_cast< std::chrono::seconds >( ns ); + auto nsecs = ns - secs; + struct timespec ts { + .tv_sec = time_t( secs.count() ), + .tv_nsec = long( nsecs.count() ), + }; +# ifdef __linux__ + return pthread_rwlock_clockwrlock( &rw_, CLOCK_REALTIME, &ts ) == 0; +# else + return pthread_rwlock_timedrwlock( &rw_, &ts ) == 0; +# endif + } else if constexpr ( std::is_same_v< Clock, std::chrono::steady_clock > ) { +# ifdef __linux__ + // steady_clock -> CLOCK_MONOTONIC + auto ns = std::chrono::time_point_cast< std::chrono::nanoseconds >( abs_time ).time_since_epoch(); + auto secs = std::chrono::duration_cast< std::chrono::seconds >( ns ); + auto nsecs = ns - secs; + struct timespec ts { + .tv_sec = time_t( secs.count() ), + .tv_nsec = long( nsecs.count() ), + }; + return pthread_rwlock_clockwrlock( &rw_, CLOCK_MONOTONIC, &ts ) == 0; +# endif + } else { + auto rel_time = abs_time - Clock::now(); + return try_lock_for( rel_time ); + } + } +}; + +// convenience alias +using pthread_shared_mutex = pthread_rwlock_mutex; + +} // namespace nova::sync + +#endif // NOVA_SYNC_HAS_PTHREAD_RWLOCK diff --git a/test/mutex_types.hpp b/test/mutex_types.hpp index 25177c9..fc5e451 100644 --- a/test/mutex_types.hpp +++ b/test/mutex_types.hpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -14,9 +15,15 @@ #include #ifdef NOVA_SYNC_HAS_PTHREAD_SPINLOCK -# define NOVA_SYNC_HAS_PTHREAD_SPINLOCK_arg , nova::sync::pthread_spinlock_mutex +# define NOVA_SYNC_PTHREAD_SPINLOCK_arg , nova::sync::pthread_spinlock_mutex #else -# define NOVA_SYNC_HAS_PTHREAD_SPINLOCK_arg +# define NOVA_SYNC_PTHREAD_SPINLOCK_arg +#endif + +#ifdef NOVA_SYNC_HAS_PTHREAD_RWLOCK_ +# define NOVA_SYNC_PTHREAD_RWLOCK_arg , nova::sync::pthread_rwlock_mutex +#else +# define NOVA_SYNC_PTHREAD_RWLOCK_arg #endif #ifdef NOVA_SYNC_HAS_PTHREAD_MUTEX @@ -30,9 +37,9 @@ #endif #ifdef NOVA_SYNC_HAS_PTHREAD_RT_MUTEX -# define NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg , nova::sync::pthread_priority_inherit_mutex +# define NOVA_SYNC_PTHREAD_RT_MUTEX_arg , nova::sync::pthread_priority_inherit_mutex #else -# define NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg +# define NOVA_SYNC_PTHREAD_RT_MUTEX_arg #endif #ifdef _WIN32 @@ -54,6 +61,12 @@ # define NOVA_SYNC_APPLE_OS_UNFAIR_MUTEX_arg #endif +#ifdef NOVA_SYNC_HAS_PTHREAD_RWLOCK +# define NOVA_SYNC_PTHREAD_RWLOCK_MUTEX_arg , nova::sync::pthread_rwlock_mutex +#else +# define NOVA_SYNC_PTHREAD_RWLOCK_MUTEX_arg +#endif + #ifdef NOVA_SYNC_HAS_KQUEUE_MUTEX # define NOVA_SYNC_KQUEUE_MUTEX_arg , nova::sync::kqueue_mutex<> # define NOVA_SYNC_FAST_KQUEUE_MUTEX_arg , nova::sync::kqueue_mutex< nova::sync::with_backoff > @@ -75,10 +88,12 @@ // clang-format off #define NOVA_SYNC_MUTEX_TEST_EXTRA_TYPES \ - NOVA_SYNC_HAS_PTHREAD_SPINLOCK_arg \ + NOVA_SYNC_PTHREAD_SPINLOCK_arg \ NOVA_SYNC_PTHREAD_RECURSIVE_MUTEX_arg \ + NOVA_SYNC_PTHREAD_RWLOCK_MUTEX_arg \ NOVA_SYNC_PTHREAD_NONRECURSIVE_MUTEX_arg \ - NOVA_SYNC_HAS_PTHREAD_RT_MUTEX_arg \ + NOVA_SYNC_PTHREAD_RWLOCK_arg \ + NOVA_SYNC_PTHREAD_RT_MUTEX_arg \ NOVA_SYNC_WIN32_CRITICAL_SECTION_MUTEX_arg \ NOVA_SYNC_WIN32_MUTEX_arg \ NOVA_SYNC_WIN32_EVENT_MUTEX_arg \ @@ -143,7 +158,8 @@ using spinlock_mutex_shared_with_backoff = nova::sync::spinlock_mutex< nova:: #define NOVA_SYNC_SHARED_MUTEX_TYPES \ spinlock_mutex_shared, \ - spinlock_mutex_shared_with_backoff + spinlock_mutex_shared_with_backoff \ + NOVA_SYNC_PTHREAD_RWLOCK_arg #define NOVA_SYNC_ASYNC_MUTEX_TEST_TYPES \ NOVA_SYNC_WIN32_EVENT_MUTEX_arg \ From c6407bdae2b681fa1e9b23fc697f19b0d9c37714 Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Sun, 3 May 2026 23:14:17 +0800 Subject: [PATCH 4/6] implement futexes on apple and time backoff --- include/nova/sync/detail/backoff.hpp | 32 +++- include/nova/sync/detail/pause.hpp | 4 +- .../sync/event/parking_auto_reset_event.hpp | 11 +- .../sync/event/parking_manual_reset_event.hpp | 11 +- include/nova/sync/mutex/eventfd_mutex.hpp | 16 +- include/nova/sync/mutex/parking_mutex.hpp | 2 +- .../nova/sync/semaphore/parking_semaphore.hpp | 28 ++-- source/nova/sync/futex/atomic_wait.cpp | 139 ++++++++++++++++++ source/nova/sync/mutex/eventfd_mutex.cpp | 17 +-- source/nova/sync/mutex/kqueue_mutex.cpp | 17 +-- source/nova/sync/mutex/parking_mutex.cpp | 36 ++--- 11 files changed, 241 insertions(+), 72 deletions(-) diff --git a/include/nova/sync/detail/backoff.hpp b/include/nova/sync/detail/backoff.hpp index 7ba68f9..50a77ee 100644 --- a/include/nova/sync/detail/backoff.hpp +++ b/include/nova/sync/detail/backoff.hpp @@ -11,8 +11,9 @@ namespace nova::sync::detail { struct exponential_backoff { - int backoff = 8; - static constexpr int spin_limit = 1 << 12; + int backoff = 2; + // Spin up to 16384 cycles before yielding (tuned via benchmarking for typical workloads) + static constexpr int spin_limit = 1 << 14; void run() { @@ -26,4 +27,31 @@ struct exponential_backoff } }; +enum class backoff_result : uint8_t +{ + success, + retry, + retry_without_backoff +}; + +template < typename Functor > + requires( std::is_same_v< std::invoke_result_t< Functor >, backoff_result > ) +[[nodiscard]] bool run_with_exponential_backoff_until( Functor&& func ) +{ + exponential_backoff backoff; + + do { + backoff_result state = func(); + switch ( state ) { + case backoff_result::success: return true; + case backoff_result::retry_without_backoff: continue; + case backoff_result::retry: { + backoff.run(); + break; + } + } + } while ( backoff.backoff < exponential_backoff::spin_limit ); + return false; +} + } // namespace nova::sync::detail diff --git a/include/nova/sync/detail/pause.hpp b/include/nova/sync/detail/pause.hpp index 0007a24..3509534 100644 --- a/include/nova/sync/detail/pause.hpp +++ b/include/nova/sync/detail/pause.hpp @@ -18,7 +18,9 @@ inline void pause() #if defined( __x86_64__ ) || defined( _M_X64 ) || defined( __i386__ ) || defined( _M_IX86 ) _mm_pause(); #elif defined( __arm64__ ) || defined( __aarch64__ ) || defined( __arm__ ) || defined( _M_ARM64 ) || defined( _M_ARM ) -# if defined( _MSC_VER ) +# if __has_builtin( __builtin_arm_isb ) + __builtin_arm_isb( 15 ); +# elif defined( _MSC_VER ) # if defined( _ARM_BARRIER_SY ) # define NOVA_SYNC__ARM_BARRIER_SY _ARM_BARRIER_SY # else diff --git a/include/nova/sync/event/parking_auto_reset_event.hpp b/include/nova/sync/event/parking_auto_reset_event.hpp index 8679737..06905a6 100644 --- a/include/nova/sync/event/parking_auto_reset_event.hpp +++ b/include/nova/sync/event/parking_auto_reset_event.hpp @@ -81,12 +81,13 @@ class parking_auto_reset_event void wait() noexcept { if constexpr ( use_backoff ) { - detail::exponential_backoff backoff; - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + bool success = detail::run_with_exponential_backoff_until( [ this ]() -> detail::backoff_result { if ( try_wait() ) - return; - backoff.run(); - } + return detail::backoff_result::success; + return detail::backoff_result::retry; + } ); + if ( success ) + return; } int32_t prev = state_.fetch_sub( 1, std::memory_order_acquire ); diff --git a/include/nova/sync/event/parking_manual_reset_event.hpp b/include/nova/sync/event/parking_manual_reset_event.hpp index 2d4a09a..4d9fd66 100644 --- a/include/nova/sync/event/parking_manual_reset_event.hpp +++ b/include/nova/sync/event/parking_manual_reset_event.hpp @@ -70,12 +70,13 @@ class parking_manual_reset_event void wait() noexcept { if constexpr ( use_backoff ) { - detail::exponential_backoff backoff; - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + bool success = detail::run_with_exponential_backoff_until( [ this ]() -> detail::backoff_result { if ( state_.load( std::memory_order_acquire ) != 0u ) - return; - backoff.run(); - } + return detail::backoff_result::success; + return detail::backoff_result::retry_without_backoff; + } ); + if ( success ) + return; } if ( state_.load( std::memory_order_acquire ) != 0u ) diff --git a/include/nova/sync/mutex/eventfd_mutex.hpp b/include/nova/sync/mutex/eventfd_mutex.hpp index ff1e936..2ed305e 100644 --- a/include/nova/sync/mutex/eventfd_mutex.hpp +++ b/include/nova/sync/mutex/eventfd_mutex.hpp @@ -166,17 +166,17 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fast_eventfd_mutex_impl if constexpr ( std::is_same_v< Clock, std::chrono::system_clock > || std::is_same_v< Clock, std::chrono::steady_clock > ) { // Brief spin phase (same as try_lock_for_ns) - detail::exponential_backoff backoff; - uint32_t s = state_.load( std::memory_order_relaxed ); - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + uint32_t s = state_.load( std::memory_order_relaxed ); + bool success = detail::run_with_exponential_backoff_until( [ & ]() -> detail::backoff_result { if ( ( s & 1u ) == 0 ) { if ( state_.compare_exchange_weak( s, s | 1u, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; - continue; + return detail::backoff_result::success; + return detail::backoff_result::retry; } - backoff.run(); - s = state_.load( std::memory_order_relaxed ); - } + return detail::backoff_result::failure; + } ); + if ( success ) + return true; // Register as waiter before calling ppoll_until s = add_async_waiter(); // returns state after +2 diff --git a/include/nova/sync/mutex/parking_mutex.hpp b/include/nova/sync/mutex/parking_mutex.hpp index 2a0b4bb..dd29004 100644 --- a/include/nova/sync/mutex/parking_mutex.hpp +++ b/include/nova/sync/mutex/parking_mutex.hpp @@ -47,7 +47,7 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) parking_mutex_plain { uint32_t prev = state_.fetch_and( ~1u, std::memory_order_release ); if ( prev > 1 ) { -#ifdef __linux__ +#if defined( __linux__ ) || defined( __APPLE__ ) atomic_notify_one( state_ ); #else state_.notify_one(); diff --git a/include/nova/sync/semaphore/parking_semaphore.hpp b/include/nova/sync/semaphore/parking_semaphore.hpp index 9d7f30f..3e00735 100644 --- a/include/nova/sync/semaphore/parking_semaphore.hpp +++ b/include/nova/sync/semaphore/parking_semaphore.hpp @@ -70,15 +70,17 @@ class parking_semaphore void acquire() noexcept { if constexpr ( use_backoff ) { - detail::exponential_backoff backoff; - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { - auto c = count_.load( std::memory_order_relaxed ); + auto c = count_.load( std::memory_order_relaxed ); + bool success = detail::run_with_exponential_backoff_until( [ & ]() -> detail::backoff_result { if ( c > 0 ) { if ( count_.compare_exchange_weak( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; + return detail::backoff_result::success; + return detail::backoff_result::retry_without_backoff; } - backoff.run(); - } + return detail::backoff_result::retry; + } ); + if ( success ) + return; } auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); @@ -156,15 +158,17 @@ class timed_semaphore void acquire() noexcept { if constexpr ( use_backoff ) { - detail::exponential_backoff backoff; - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { - auto c = count_.load( std::memory_order_relaxed ); + auto c = count_.load( std::memory_order_relaxed ); + bool success = detail::run_with_exponential_backoff_until( [ & ]() -> detail::backoff_result { if ( c > 0 ) { if ( count_.compare_exchange_weak( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; + return detail::backoff_result::success; + return detail::backoff_result::retry_without_backoff; } - backoff.run(); - } + return detail::backoff_result::retry; + } ); + if ( success ) + return; } auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); diff --git a/source/nova/sync/futex/atomic_wait.cpp b/source/nova/sync/futex/atomic_wait.cpp index 8513c16..ba35d5a 100644 --- a/source/nova/sync/futex/atomic_wait.cpp +++ b/source/nova/sync/futex/atomic_wait.cpp @@ -29,6 +29,11 @@ # define NOVA_SYNC_FUTEX_WIN32 1 +#elif defined( __APPLE__ ) +# include + +# define NOVA_SYNC_FUTEX_APPLE 1 + #else // Portable fallback: hash-table of mutex + condvar buckets. # include @@ -360,6 +365,140 @@ void atomic_notify_all( std::atomic< int32_t >& atom ) noexcept ::WakeByAddressAll( reinterpret_cast< void* >( std::addressof( atom ) ) ); } +// ============================================================================= +// Apple — os_sync_wait_on_address +// ============================================================================= +#elif defined( NOVA_SYNC_FUTEX_APPLE ) + +namespace { + +// Convert system_clock deadline to steady_clock deadline +std::chrono::time_point< std::chrono::steady_clock > +system_clock_to_steady( const std::chrono::time_point< std::chrono::system_clock >& deadline ) noexcept +{ + auto system_now = std::chrono::system_clock::now(); + auto steady_now = std::chrono::steady_clock::now(); + auto offset = deadline - system_now; + return steady_now + offset; +} + +} // namespace + +void atomic_wait( std::atomic< int32_t >& atom, int32_t old, std::memory_order order ) noexcept +{ + { + auto load_order = ( order != std::memory_order_relaxed ) ? std::memory_order_acquire : std::memory_order_relaxed; + if ( atom.load( load_order ) != old ) + return; + } + + // Use blocking wait with no timeout + ::os_sync_wait_on_address( reinterpret_cast< void* >( &atom ), + uint64_t( old ), + sizeof( int32_t ), + OS_SYNC_WAIT_ON_ADDRESS_NONE ); + + if ( order != std::memory_order_relaxed ) + std::atomic_thread_fence( std::memory_order_acquire ); +} + +bool atomic_wait_for( std::atomic< int32_t >& atom, + int32_t old, + std::chrono::nanoseconds rel, + std::memory_order order ) noexcept +{ + if ( rel <= 0ns ) + return atom.load( std::memory_order_relaxed ) != old; + + { + auto load_order = ( order != std::memory_order_relaxed ) ? std::memory_order_acquire : std::memory_order_relaxed; + if ( atom.load( load_order ) != old ) + return true; + } + + // Ensure timeout is at least 1ns (API requires non-zero) + uint64_t timeout_ns = std::min( uint64_t( rel.count() ), UINT64_MAX ); + + int rc = ::os_sync_wait_on_address_with_timeout( reinterpret_cast< void* >( &atom ), + uint64_t( old ), + sizeof( int32_t ), + OS_SYNC_WAIT_ON_ADDRESS_NONE, + OS_CLOCK_MACH_ABSOLUTE_TIME, + timeout_ns ); + + (void)rc; // Ignore return value; just check current value + + if ( order != std::memory_order_relaxed ) + std::atomic_thread_fence( std::memory_order_acquire ); + + return atom.load( std::memory_order_relaxed ) != old; +} + +bool atomic_wait_until( std::atomic< int32_t >& atom, + int32_t old, + const std::chrono::time_point< std::chrono::steady_clock >& deadline, + std::memory_order order ) noexcept +{ + { + auto load_order = ( order != std::memory_order_relaxed ) ? std::memory_order_acquire : std::memory_order_relaxed; + if ( atom.load( load_order ) != old ) + return true; + } + + while ( true ) { + auto remaining = deadline - std::chrono::steady_clock::now(); + if ( remaining <= 0ns ) + return atom.load( std::memory_order_relaxed ) != old; + + // Ensure timeout is at least 1ns (API requires non-zero) + uint64_t timeout_ns = std::min( uint64_t( remaining.count() ), UINT64_MAX ); + + int rc = ::os_sync_wait_on_address_with_timeout( reinterpret_cast< void* >( &atom ), + uint64_t( old ), + sizeof( int32_t ), + OS_SYNC_WAIT_ON_ADDRESS_NONE, + OS_CLOCK_MACH_ABSOLUTE_TIME, + timeout_ns ); + + (void)rc; // Ignore return value; just check current value + + if ( order != std::memory_order_relaxed ) + std::atomic_thread_fence( std::memory_order_acquire ); + + if ( atom.load( std::memory_order_relaxed ) != old ) + return true; + + if ( std::chrono::steady_clock::now() >= deadline ) + return false; + } +} + +bool atomic_wait_until( std::atomic< int32_t >& atom, + int32_t old, + const std::chrono::time_point< std::chrono::system_clock >& deadline, + std::memory_order order ) noexcept +{ + { + auto load_order = ( order != std::memory_order_relaxed ) ? std::memory_order_acquire : std::memory_order_relaxed; + if ( atom.load( load_order ) != old ) + return true; + } + + // Convert system_clock deadline to steady_clock and use that path + auto steady_deadline = system_clock_to_steady( deadline ); + return atomic_wait_until( atom, old, steady_deadline, order ); +} + +void atomic_notify_one( std::atomic< int32_t >& atom ) noexcept +{ + ::os_sync_wake_by_address_any( reinterpret_cast< void* >( &atom ), sizeof( int32_t ), OS_SYNC_WAKE_BY_ADDRESS_NONE ); +} + +void atomic_notify_all( std::atomic< int32_t >& atom ) noexcept +{ + ::os_sync_wake_by_address_all( reinterpret_cast< void* >( &atom ), sizeof( int32_t ), OS_SYNC_WAKE_BY_ADDRESS_NONE ); +} + // ============================================================================= // Portable fallback — hash table of mutex + condvar buckets // ============================================================================= diff --git a/source/nova/sync/mutex/eventfd_mutex.cpp b/source/nova/sync/mutex/eventfd_mutex.cpp index 577f64c..d23227a 100644 --- a/source/nova/sync/mutex/eventfd_mutex.cpp +++ b/source/nova/sync/mutex/eventfd_mutex.cpp @@ -94,19 +94,18 @@ void fast_eventfd_mutex_impl::consume_lock() const noexcept void fast_eventfd_mutex_impl::lock_slow() noexcept { - detail::exponential_backoff backoff; - - uint32_t s = state_.load( std::memory_order_relaxed ); - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + uint32_t s = state_.load( std::memory_order_relaxed ); + bool success = detail::run_with_exponential_backoff_until( [ & ]() -> detail::backoff_result { if ( ( s & 1u ) == 0 ) { if ( state_.compare_exchange_weak( s, s | 1u, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; - continue; + return detail::backoff_result::success; + return detail::backoff_result::retry; } + return detail::backoff_result::failure; + } ); - backoff.run(); - s = state_.load( std::memory_order_relaxed ); - } + if ( success ) + return; s = add_async_waiter(); detail::async_waiter_guard< fast_eventfd_mutex_impl > guard( *this, detail::adopt_async_waiter ); diff --git a/source/nova/sync/mutex/kqueue_mutex.cpp b/source/nova/sync/mutex/kqueue_mutex.cpp index 7db4a4a..8877b1e 100644 --- a/source/nova/sync/mutex/kqueue_mutex.cpp +++ b/source/nova/sync/mutex/kqueue_mutex.cpp @@ -111,19 +111,18 @@ void fast_kqueue_mutex_impl::consume_lock() const noexcept void fast_kqueue_mutex_impl::lock_slow() noexcept { - detail::exponential_backoff backoff; - - uint32_t s = state_.load( std::memory_order_relaxed ); - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + uint32_t s = state_.load( std::memory_order_relaxed ); + bool success = detail::run_with_exponential_backoff_until( [ & ]() -> detail::backoff_result { if ( ( s & 1u ) == 0 ) { if ( state_.compare_exchange_weak( s, s | 1u, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; - continue; + return detail::backoff_result::success; + return detail::backoff_result::retry_without_backoff; } + return detail::backoff_result::retry; + } ); - backoff.run(); - s = state_.load( std::memory_order_relaxed ); - } + if ( success ) + return; s = add_async_waiter(); detail::async_waiter_guard< fast_kqueue_mutex_impl > guard( *this, detail::adopt_async_waiter ); diff --git a/source/nova/sync/mutex/parking_mutex.cpp b/source/nova/sync/mutex/parking_mutex.cpp index 3aa93f1..f0c1bd1 100644 --- a/source/nova/sync/mutex/parking_mutex.cpp +++ b/source/nova/sync/mutex/parking_mutex.cpp @@ -27,7 +27,7 @@ void parking_mutex_plain::lock_slow( uint32_t expected ) noexcept if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) return; } else { -#ifdef __linux__ +#if defined( __linux__ ) || defined( __APPLE__ ) // On Linux, use atomic_wait (futex-based) for better performance atomic_wait( state_, expected, std::memory_order_relaxed ); #else @@ -45,20 +45,18 @@ void parking_mutex_plain::lock_slow( uint32_t expected ) noexcept NOVA_SYNC_NOINLINE void parking_mutex_backoff::lock_slow( uint32_t expected ) noexcept { - detail::exponential_backoff backoff; - - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + bool success = detail::run_with_exponential_backoff_until( [ & ]() -> detail::backoff_result { if ( ( expected & 1 ) == 0 ) { if ( state_.compare_exchange_weak( expected, expected | 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; + return detail::backoff_result::success; // CAS failed — expected was updated; re-evaluate immediately without backing off - continue; + return detail::backoff_result::retry_without_backoff; } - - backoff.run(); - expected = state_.load( std::memory_order_relaxed ); - } + return detail::backoff_result::retry; + } ); + if ( success ) + return; // Spinning exhausted — register as waiter and park state_.fetch_add( 2, std::memory_order_relaxed ); @@ -70,7 +68,7 @@ void parking_mutex_backoff::lock_slow( uint32_t expected ) noexcept if ( state_.compare_exchange_weak( expected, desired, std::memory_order_acquire, std::memory_order_relaxed ) ) return; } else { -#ifdef __linux__ +#if defined( __linux__ ) || defined( __APPLE__ ) // On Linux, use atomic_wait (futex-based) for better performance atomic_wait( state_, expected, std::memory_order_relaxed ); #else @@ -109,20 +107,18 @@ void parking_mutex_timed::lock_slow( uint32_t expected ) noexcept NOVA_SYNC_NOINLINE void parking_mutex_timed_backoff::lock_slow( uint32_t expected ) noexcept { - detail::exponential_backoff backoff; - - while ( backoff.backoff < detail::exponential_backoff::spin_limit ) { + bool success = detail::run_with_exponential_backoff_until( [ & ]() -> detail::backoff_result { if ( ( expected & 1 ) == 0 ) { if ( state_.compare_exchange_weak( expected, expected | 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; + return detail::backoff_result::success; // CAS failed — expected was updated; re-evaluate immediately without backing off - continue; + return detail::backoff_result::retry_without_backoff; } - - backoff.run(); - expected = state_.load( std::memory_order_relaxed ); - } + return detail::backoff_result::retry; + } ); + if ( success ) + return; // Spinning exhausted — register as waiter and park state_.fetch_add( 2, std::memory_order_relaxed ); From 461b79c37329f29d5ad0b919d9ade9a75584276e Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Tue, 5 May 2026 20:45:32 +0800 Subject: [PATCH 5/6] update benchmarks --- Readme.md | 4 +- benchmarks/macos_m4_multi-threaded.svg | 1848 ------------ ...cos_m4_mutex_benchmarks_multi-threaded.svg | 2486 +++++++++++++++++ ...os_m4_mutex_benchmarks_single-threaded.svg | 2405 ++++++++++++++++ benchmarks/macos_m4_single-threaded.svg | 1883 ------------- 5 files changed, 4893 insertions(+), 3733 deletions(-) delete mode 100644 benchmarks/macos_m4_multi-threaded.svg create mode 100644 benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg create mode 100644 benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg delete mode 100644 benchmarks/macos_m4_single-threaded.svg diff --git a/Readme.md b/Readme.md index b2ae113..9276c23 100644 --- a/Readme.md +++ b/Readme.md @@ -231,11 +231,11 @@ Multi-threaded benchmark: Single-threaded benchmark: -![macOS single-threaded](benchmarks/macos_m4_single-threaded.svg) +![macOS single-threaded](benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg) Multi-threaded benchmark: -![macOS multi-threaded](benchmarks/macos_m4_multi-threaded.svg) +![macOS multi-threaded](benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg) #### Windows 11 — Intel i7-14700K diff --git a/benchmarks/macos_m4_multi-threaded.svg b/benchmarks/macos_m4_multi-threaded.svg deleted file mode 100644 index ed12fa8..0000000 --- a/benchmarks/macos_m4_multi-threaded.svg +++ /dev/null @@ -1,1848 +0,0 @@ - - - - - - - - 2026-04-25T20:59:40.000778 - image/svg+xml - - - Matplotlib v3.10.8, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg b/benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg new file mode 100644 index 0000000..a4ce8a7 --- /dev/null +++ b/benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg @@ -0,0 +1,2486 @@ + + + + + + + + 2026-05-05T20:15:21.319817 + image/svg+xml + + + Matplotlib v3.10.9, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg b/benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg new file mode 100644 index 0000000..2251aba --- /dev/null +++ b/benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg @@ -0,0 +1,2405 @@ + + + + + + + + 2026-05-05T20:15:21.213891 + image/svg+xml + + + Matplotlib v3.10.9, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/macos_m4_single-threaded.svg b/benchmarks/macos_m4_single-threaded.svg deleted file mode 100644 index 3faba36..0000000 --- a/benchmarks/macos_m4_single-threaded.svg +++ /dev/null @@ -1,1883 +0,0 @@ - - - - - - - - 2026-04-25T20:59:39.933376 - image/svg+xml - - - Matplotlib v3.10.8, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From 8b1a36c8352c20982bdedd209cf97556832b021d Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Thu, 7 May 2026 21:49:50 +0800 Subject: [PATCH 6/6] updates, cleanups, remove dead code --- CMakeLists.txt | 5 - Readme.md | 201 +- .../linux_intel_14700K_multi-threaded.svg | 2098 ----------- ...14700K_mutex_benchmarks_multi-threaded.svg | 3304 ++++++++++++++++ ...4700K_mutex_benchmarks_single-threaded.svg | 3077 +++++++++++++++ .../linux_intel_14700K_single-threaded.svg | 1948 ---------- ...cos_m4_mutex_benchmarks_multi-threaded.svg | 3230 ++++++++-------- ...os_m4_mutex_benchmarks_single-threaded.svg | 3333 +++++++++-------- .../win32_intel_14700K_multi-threaded.svg | 1912 ---------- ...14700K_mutex_benchmarks_multi-threaded.svg | 2831 ++++++++++++++ ...4700K_mutex_benchmarks_single-threaded.svg | 2632 +++++++++++++ .../win32_intel_14700K_single-threaded.svg | 1816 --------- .../sync/event/parking_auto_reset_event.hpp | 7 - .../sync/event/parking_manual_reset_event.hpp | 8 - .../sync/event/timed_auto_reset_event.hpp | 144 - .../sync/event/timed_manual_reset_event.hpp | 160 - include/nova/sync/mutex/eventfd_mutex.hpp | 4 +- .../nova/sync/semaphore/parking_semaphore.hpp | 20 - .../nova/sync/semaphore/posix_semaphore.hpp | 3 +- .../semaphore/timed_counting_semaphore.hpp | 105 - .../sync/event/timed_auto_reset_event.cpp | 55 - .../sync/event/timed_manual_reset_event.cpp | 25 - source/nova/sync/mutex/eventfd_mutex.cpp | 4 +- test/event_benchmarks.cpp | 14 +- test/event_test.cpp | 26 +- test/mutex_benchmarks.cpp | 21 +- test/mutex_types.hpp | 2 +- test/policy_mutex_test.cpp | 8 +- test/semaphore_test.cpp | 4 - test/semaphore_types.hpp | 33 +- tools/bench_plot.py | 2 +- 31 files changed, 15576 insertions(+), 11456 deletions(-) delete mode 100644 benchmarks/linux_intel_14700K_multi-threaded.svg create mode 100644 benchmarks/linux_intel_14700K_mutex_benchmarks_multi-threaded.svg create mode 100644 benchmarks/linux_intel_14700K_mutex_benchmarks_single-threaded.svg delete mode 100644 benchmarks/linux_intel_14700K_single-threaded.svg delete mode 100644 benchmarks/win32_intel_14700K_multi-threaded.svg create mode 100644 benchmarks/win32_intel_14700K_mutex_benchmarks_multi-threaded.svg create mode 100644 benchmarks/win32_intel_14700K_mutex_benchmarks_single-threaded.svg delete mode 100644 benchmarks/win32_intel_14700K_single-threaded.svg delete mode 100644 include/nova/sync/event/timed_auto_reset_event.hpp delete mode 100644 include/nova/sync/event/timed_manual_reset_event.hpp delete mode 100644 include/nova/sync/semaphore/timed_counting_semaphore.hpp delete mode 100644 source/nova/sync/event/timed_auto_reset_event.cpp delete mode 100644 source/nova/sync/event/timed_manual_reset_event.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 696f718..72acf5c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -84,8 +84,6 @@ set(headers include/nova/sync/event/support/boost_asio_support.hpp include/nova/sync/event/support/libdispatch_support.hpp include/nova/sync/event/support/qt_support.hpp - include/nova/sync/event/timed_auto_reset_event.hpp - include/nova/sync/event/timed_manual_reset_event.hpp include/nova/sync/semaphore/concepts.hpp include/nova/sync/semaphore/detail/async_support.hpp include/nova/sync/semaphore/dispatch_semaphore.hpp @@ -118,7 +116,6 @@ set(headers include/nova/sync/mutex/pthread_rwlock_mutex.hpp include/nova/sync/mutex/pthread_spinlock_mutex.hpp - include/nova/sync/mutex/spinlock_mutex.hpp include/nova/sync/mutex/support/async_waiter_guard.hpp include/nova/sync/mutex/support/boost_asio_support.hpp @@ -137,8 +134,6 @@ set(sources source/nova/sync/futex/atomic_wait.cpp source/nova/sync/event/native_auto_reset_event.cpp source/nova/sync/event/native_manual_reset_event.cpp - source/nova/sync/event/timed_auto_reset_event.cpp - source/nova/sync/event/timed_manual_reset_event.cpp source/nova/sync/semaphore/dispatch_semaphore.cpp source/nova/sync/semaphore/eventfd_semaphore.cpp source/nova/sync/semaphore/kqueue_semaphore.cpp diff --git a/Readme.md b/Readme.md index 9276c23..ccf3c9d 100644 --- a/Readme.md +++ b/Readme.md @@ -1,84 +1,60 @@ # nova::sync -Synchronization primitives for C++20: specialized mutex and event types optimized for different use cases. +Synchronization primitives for C++20: specialized mutex, semaphore and event types optimized for different use cases. +Most notably this includes variants which can be integrated into native event loops (Boost.Asio, libdispatch, epoll, Qt, etc.), allowing to wait asynchronously for a lock or event without blocking the thread. ## Mutex Types -| Type | Characteristics | Named Requirement | -|------|-----------------|-------------------| -| `parking_mutex<>` | Futex-based mutex, parks immediately | `Mutex` | -| `parking_mutex` | Futex-based mutex, exponential backoff before parking | `Mutex` | -| `parking_mutex` | Futex-based mutex, no spin, timed waits | `TimedMutex` | -| `parking_mutex` | Futex-based mutex, exponential backoff, timed waits | `TimedMutex` | -| `ticket_mutex<>` | Fair FIFO ticket lock, futex sleep | `TimedMutex` | -| `ticket_mutex` | Fair FIFO ticket lock with exponential backoff | `TimedMutex` | -| `spinlock_mutex<>` | Spinlock, CPU-pause hints | `Mutex` | -| `spinlock_mutex` | Spinlock, exponential backoff | `Mutex` | -| `spinlock_mutex` | Recursive spinlock | `Mutex` | -| `spinlock_mutex` | Recursive spinlock with backoff | `Mutex` | -| `spinlock_mutex` | Shared (reader-writer) spinlock | `SharedMutex` | -| `spinlock_mutex` | Shared spinlock with exponential backoff | `SharedMutex` | -| `pthread_spinlock_mutex` | `pthread_spinlock_t` based spinlock, POSIX only | `Mutex` | -| `pthread_mutex<>` | POSIX `pthread_mutex_t` (default type) | `TimedMutex` | -| `pthread_mutex` | Recursive POSIX mutex | `TimedMutex` | -| `pthread_mutex` | Error-checking POSIX mutex | `TimedMutex` | -| `pthread_mutex` | Adaptive-spin POSIX mutex (Linux) | `TimedMutex` | -| `pthread_mutex` | POSIX mutex, priority inheritance (RT) | `TimedMutex` | -| `pthread_mutex>` | POSIX mutex, priority ceiling N (RT) | `TimedMutex` | -| `win32_critical_section_mutex<>` | Win32 CRITICAL_SECTION, recursive, Windows only | `Mutex` | -| `win32_critical_section_mutex>` | Win32 CRITICAL_SECTION with custom spin count | `Mutex` | -| `win32_mutex` | Win32 kernel mutex, async-capable, Windows only | `TimedMutex` | -| `win32_srw_mutex` | Win32 SRW lock (ultra-lightweight), Windows only | `Mutex` | -| `apple_os_unfair_mutex` | Apple `os_unfair_lock`, macOS/iOS only | `Mutex` | -| `kqueue_mutex<>` | Apple kqueue-based async mutex | `TimedMutex` | -| `kqueue_mutex` | kqueue mutex with exponential backoff | `TimedMutex` | -| `eventfd_mutex<>` | Linux eventfd-based async mutex | `TimedMutex` | -| `eventfd_mutex` | eventfd mutex with exponential backoff | `TimedMutex` | -| `native_async_mutex` | Cross-platform alias: `win32_event_mutex` / `kqueue_mutex` / `eventfd_mutex` | `TimedMutex` | - -### Policy parameters - -All policy types live in `nova/sync/mutex/policies.hpp`: - -| Policy | Effect | -|--------|--------| -| `with_backoff` | Exponential backoff with CPU pause hints before blocking | -| `recursive` | Allow re-entrant locking from the owning thread (`spinlock_mutex` only) | -| `shared` | Enable shared (reader-writer) locking via `lock_shared()` (`spinlock_mutex` only; mutually exclusive with `recursive`) | -| `priority_inherit` | PTHREAD_PRIO_INHERIT — owner boosted to highest waiter priority | -| `priority_ceiling` | PTHREAD_PRIO_PROTECT — all holders elevated to ceiling N | -| `pthread_recursive` | PTHREAD_MUTEX_RECURSIVE — re-entrant locking | -| `pthread_errorcheck` | PTHREAD_MUTEX_ERRORCHECK — error on double-lock | -| `pthread_adaptive` | PTHREAD_MUTEX_ADAPTIVE_NP — adaptive spin (Linux only) | -| `win32_spin_count` | Spin count for `InitializeCriticalSectionAndSpinCount` | - -### Convenience aliases - -```cpp -using pthread_default_mutex = pthread_mutex<>; -using pthread_recursive_mutex = pthread_mutex< pthread_recursive >; -using pthread_priority_inherit_mutex = pthread_mutex< priority_inherit >; -template < int N > -using pthread_priority_ceiling_mutex = pthread_mutex< priority_ceiling< N > >; -``` - ### `parking_mutex` - -Futex-based mutex using `std::atomic::wait()`. Fast path acquires in one CAS; slow path parks the calling thread. With `with_backoff`, spins briefly before parking — lower latency under brief contention. Add `timed` to enable `try_lock_for` / `try_lock_until`. +Futex-based mutex with configurable backoff. +- **Policies**: + - `with_backoff`: Spins briefly with exponential backoff before parking, resulting in lower latency under brief contention. + - `timed`: Enables timed waits via `try_lock_for` / `try_lock_until`. ### `ticket_mutex` - -FIFO ticket lock guaranteeing strict acquisition order. Prevents starvation under sustained contention. Not suitable for high-throughput low-contention workloads. - -### POSIX mutexes - -`pthread_mutex<>` wraps `pthread_mutex_t`. Priority-protocol variants (`priority_inherit`, `priority_ceiling`) prevent priority inversion in real-time systems — higher overhead; requires RT scheduling for ceiling variant. - -### Platform-specific async mutexes - -`win32_event_mutex`, `kqueue_mutex`, `eventfd_mutex` (and their `` policy variants) -expose native OS handles enabling integration with event loops (Boost.Asio, libdispatch, epoll, Qt, etc.). -The `native_async_mutex` alias resolves to the fastest variant (`with_backoff`) for the current platform. +Fair FIFO ticket lock guaranteeing strict acquisition order. Prevents starvation under sustained contention, but is not suitable for high-throughput low-contention workloads. +- **Policies**: + - `with_backoff`: Spins with exponential backoff before parking. + +### `spinlock_mutex` +Spinlock with different customization options. +- **Policies**: + - `with_backoff`: Enables exponential backoff. + - `recursive`: Allows re-entrant locking from the owning thread. + - `shared`: Enables shared (reader-writer) locking via `lock_shared()` (mutually exclusive with `recursive`). + +### POSIX Mutexes +POSIX wrappers available when targeting POSIX systems. +- **`pthread_mutex`**: Wraps `pthread_mutex_t`. + - **Policies**: + - `pthread_recursive`: Enables re-entrant locking (`PTHREAD_MUTEX_RECURSIVE`). + - `pthread_errorcheck`: Emits an error on double-lock (`PTHREAD_MUTEX_ERRORCHECK`). + - `pthread_adaptive`: Enables adaptive-spin POSIX mutexes (Linux only, `PTHREAD_MUTEX_ADAPTIVE_NP`). + - `priority_inherit`: Priority inheritance protocol where the owner is boosted to the highest waiter priority (`PTHREAD_PRIO_INHERIT`). + - `priority_ceiling`: Priority ceiling protocol where all holders are elevated to ceiling N (`PTHREAD_PRIO_PROTECT`). +- **`pthread_spinlock_mutex`**: Wraps `pthread_spinlock_t`. +- **`pthread_rwlock_mutex`**: Wraps `pthread_rwlock_t` for a POSIX reader-writer lock. + +Note, `priority_inherit` and `priority_ceiling` policies typically require `CAP_SYS_NICE` or equivalent. + +### Windows Mutexes +Mutexes provided by the Win32 runtime. +- **`win32_critical_section_mutex`**: Wraps a Windows `CRITICAL_SECTION` (which is recursive by default). + - **Policies**: `win32_spin_count` sets a custom spin count before falling back to a kernel wait. +- **`win32_srw_mutex`**: Wraps the Slim Reader/Writer (SRW) lock for an ultra-lightweight Windows mutex. + +### macOS / iOS Mutexes +Mutexes provided by Apple's runtime. +- **`apple_os_unfair_mutex`**: Wraps `os_unfair_lock`. + +### Platform-specific Async Mutexes +`win32_event_mutex` (Windows), `kqueue_mutex` (macOS/iOS), and `eventfd_mutex` (Linux). +These expose native OS handles (`native_handle()`) enabling integration with event loops (Boost.Asio, libdispatch, epoll, Qt, etc.). +- **Policies**: + - `with_backoff`: Spins with exponential backoff before falling back to OS waits. +- **Aliases**: + - `native_async_mutex`: Resolves to the pure async mutex for the current platform (e.g. `kqueue_mutex<>`). + - `native_fast_async_mutex`: Resolves to the async mutex with backoff for the current platform (e.g. `kqueue_mutex`). Handlers receive an `expected, std::error_code>` (`std::expected` or `tl::expected`): @@ -132,7 +108,7 @@ std::unique_lock lock = fut.get(); // blocks until acquired; lock.owns_lock() == ### Thread Safety Analysis - All mutex types are annotated for Clang's thread-safety analysis (`-Wthread-safety`). Macros in `` map to TSA attributes (e.g., `NOVA_SYNC_GUARDED_BY`, `NOVA_SYNC_REQUIRES`, `NOVA_SYNC_EXCLUDES`, `NOVA_SYNC_ACQUIRE`, `NOVA_SYNC_RELEASE`) on Clang and expand to nothing on other compilers. +All mutex types are annotated for Clang's thread-safety analysis (`-Wthread-safety`). Macros in `` map to TSA attributes (e.g., `NOVA_SYNC_GUARDED_BY`, `NOVA_SYNC_REQUIRES`, `NOVA_SYNC_EXCLUDES`, `NOVA_SYNC_ACQUIRE`, `NOVA_SYNC_RELEASE`) on Clang and expand to nothing on other compilers. **Typical usage:** ```cpp @@ -151,13 +127,7 @@ increment(); // Error: mutex not held ### `locked_object` — Rust-inspired Thread-Safe Value Wrapper -Type-safe RAII wrapper pairing a value `T` with a `Mutex`, enforcing synchronized access at compile time. The value is only accessible through lock guards. Supports exclusive locking (mutual exclusion) and shared locking (read-write patterns with `std::shared_mutex` or compatible). - -**Lock guards:** -- `locked_object_guard`: Exclusive lock with constness determined by T template parameter -- `shared_locked_object_guard`: Shared lock (read-lock) with const access; requires `std::shared_mutex` - -**Key feature:** Const instances can acquire exclusive locks (enabling interior mutability patterns while maintaining thread safety). +Type-safe RAII wrapper pairing a value `T` with a `Mutex`, enforcing synchronized via a smart-pointer style interface. The value is only accessible through lock guards. Supports exclusive locking (mutual exclusion) and shared locking (read-write patterns with `std::shared_mutex` or compatible). ```cpp #include @@ -221,11 +191,11 @@ The following results were recorded on Ubuntu 25.04 on an Intel i7-14700K. Single-threaded benchmark: -![Linux single-threaded benchmark](benchmarks/linux_intel_14700K_single-threaded.svg) +![Linux single-threaded benchmark](benchmarks/linux_intel_14700K_mutex_benchmarks_single-threaded.svg) Multi-threaded benchmark: -![Linux multi-threaded benchmark](benchmarks/linux_intel_14700K_multi-threaded.svg) +![Linux multi-threaded benchmark](benchmarks/linux_intel_14700K_mutex_benchmarks_multi-threaded.svg) #### macOS - Apple M4 Pro @@ -241,33 +211,33 @@ Multi-threaded benchmark: Single-threaded benchmark: -![Windows single-threaded benchmark](benchmarks/win32_intel_14700K_single-threaded.svg) +![Windows single-threaded benchmark](benchmarks/win32_intel_14700K_mutex_benchmarks_single-threaded.svg) Multi-threaded benchmark: -![Windows multi-threaded benchmark](benchmarks/win32_intel_14700K_multi-threaded.svg) +![Windows multi-threaded benchmark](benchmarks/win32_intel_14700K_mutex_benchmarks_multi-threaded.svg) ## Semaphore Types -| Type | Timed waits | Native handle | Platform | -|------|-------------|---------------|----------| -| `fast_semaphore` | — | — | Cross-platform | -| `timed_counting_semaphore` | `try_acquire_for` / `try_acquire_until` | — | Cross-platform | -| `posix_semaphore` | `try_acquire_for` / `try_acquire_until` | — | Linux | -| `win32_semaphore` | `try_acquire_for` / `try_acquire_until` | `native_handle()` | Windows | -| `eventfd_semaphore` | `try_acquire_for` / `try_acquire_until` | `native_handle()` | Linux | -| `kqueue_semaphore` | `try_acquire_for` / `try_acquire_until` | `native_handle()` | macOS/iOS | -| `mach_semaphore` | `try_acquire_for` / `try_acquire_until` | — | macOS/iOS | -| `dispatch_semaphore` | `try_acquire_for` / `try_acquire_until` | — | macOS/iOS | -| `native_async_semaphore` | `try_acquire_for` / `try_acquire_until` | `native_handle()` | Platform-specific alias | +### `parking_semaphore` +Cross-platform lock-free counting semaphore based on futex. +- **Policies**: + - `with_backoff`: Spins with exponential backoff before parking. -### Platform-specific async semaphores +### `timed_semaphore` +- **`timed_semaphore`**: Timed futex-based semaphore. + - **Policies**: `with_backoff` enables exponential backoff. -`win32_semaphore`, `eventfd_semaphore`, `kqueue_semaphore`, and `mach_semaphore` wrap OS primitives and expose `native_handle()` for integration with event loops (Boost.Asio, libdispatch, epoll, Qt, etc.). +### Platform-specific Async Semaphores +These variants wrap OS primitives and expose `native_handle()` for integration with event loops (Boost.Asio, libdispatch, epoll, Qt, etc.). +- **Windows**: `win32_semaphore`. +- **Linux**: `eventfd_semaphore`. +- **macOS / iOS**: `kqueue_semaphore`. +- **Aliases**: `native_async_semaphore` resolves to the async semaphore with a native handle for the platform. ```cpp -nova::sync::counting_semaphore sem(0); +nova::sync::parking_semaphore sem(0); // Producer thread sem.release(5); // add 5 tokens @@ -296,31 +266,28 @@ auto handle = nova::sync::async_acquire_cancellable(ioc, sem, handle.cancel(); // abort pending wait ``` -## Event Types +### Other Platform-specific Semaphores +- **Linux**: `posix_semaphore` (wrapper around `sem_t`). +- **macOS / iOS**: `mach_semaphore` (wrapper around `semaphore_t`) and `dispatch_semaphore` (wrapper around `dispatch_semaphore_t`). -| Type | Timed waits | Reset | Native handle | -|------|-------------|-------|---------------| -| `manual_reset_event` | — | Manual | — | -| `timed_manual_reset_event` | `try_wait_for` / `try_wait_until` | Manual | — | -| `native_manual_reset_event`| `try_wait_for` / `try_wait_until` | Manual | `native_handle()` | -| `auto_reset_event` | — | Automatic | — | -| `timed_auto_reset_event` | `try_wait_for` / `try_wait_until` | Automatic | — | -| `native_auto_reset_event` | `try_wait_for` / `try_wait_until` | Automatic | `native_handle()` | -### Manual-reset events +## Event Types (Auto-reset / Manual-reset) +### Manual-reset Events Once `signal()` is called, all waiters are woken and subsequent `wait()` / `try_wait()` calls return immediately until `reset()` is called. +- **`parking_manual_reset_event`**: Futex-based manual reset event. + - **Policies**: `with_backoff` enables exponential backoff. +- **`native_manual_reset_event`**: Maps to OS primitives (`eventfd` on Linux, `kqueue` on macOS, `SetEvent` on Windows) and exposes `native_handle()` for integration with event loops. -### Auto-reset events - +### Auto-reset Events Each `signal()` delivers exactly one token. A blocked waiter consumes it; otherwise the next `wait()` / `try_wait()` call consumes it. - -### Native events - -The `native_*` variants map to OS primitives (`eventfd` on Linux, `kqueue` on macOS, `SetEvent` on Windows) and expose `native_handle()` for integration with event loops, C++20 coroutines, or C++26 executors. +- **`parking_auto_reset_event`**: Futex-based auto reset event. + - **Policies**: `with_backoff` enables exponential backoff. +- **`timed_auto_reset_event`**: Supports timed waits. +- **`native_auto_reset_event`**: Maps to OS primitives (`eventfd` on Linux, `kqueue` on macOS, `SetEvent` on Windows) and exposes `native_handle()` for integration with event loops. ```cpp -nova::sync::manual_reset_event ev; +nova::sync::parking_manual_reset_event ev; // Producer thread ev.signal(); // wake all waiters; event stays set @@ -332,7 +299,7 @@ ev.reset(); // clear the event ``` ```cpp -nova::sync::auto_reset_event ev; +nova::sync::parking_auto_reset_event ev; // Producer thread ev.signal(); // deliver one token diff --git a/benchmarks/linux_intel_14700K_multi-threaded.svg b/benchmarks/linux_intel_14700K_multi-threaded.svg deleted file mode 100644 index f35c96c..0000000 --- a/benchmarks/linux_intel_14700K_multi-threaded.svg +++ /dev/null @@ -1,2098 +0,0 @@ - - - - - - - - 2026-04-25T17:11:49.532244 - image/svg+xml - - - Matplotlib v3.10.1+dfsg1, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/benchmarks/linux_intel_14700K_mutex_benchmarks_multi-threaded.svg b/benchmarks/linux_intel_14700K_mutex_benchmarks_multi-threaded.svg new file mode 100644 index 0000000..16a089f --- /dev/null +++ b/benchmarks/linux_intel_14700K_mutex_benchmarks_multi-threaded.svg @@ -0,0 +1,3304 @@ + + + + + + + + 2026-05-08T20:16:54.684927 + image/svg+xml + + + Matplotlib v3.10.7+dfsg1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/linux_intel_14700K_mutex_benchmarks_single-threaded.svg b/benchmarks/linux_intel_14700K_mutex_benchmarks_single-threaded.svg new file mode 100644 index 0000000..abc3475 --- /dev/null +++ b/benchmarks/linux_intel_14700K_mutex_benchmarks_single-threaded.svg @@ -0,0 +1,3077 @@ + + + + + + + + 2026-05-08T20:16:54.536886 + image/svg+xml + + + Matplotlib v3.10.7+dfsg1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/linux_intel_14700K_single-threaded.svg b/benchmarks/linux_intel_14700K_single-threaded.svg deleted file mode 100644 index 04cea37..0000000 --- a/benchmarks/linux_intel_14700K_single-threaded.svg +++ /dev/null @@ -1,1948 +0,0 @@ - - - - - - - - 2026-04-25T17:11:49.449519 - image/svg+xml - - - Matplotlib v3.10.1+dfsg1, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg b/benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg index a4ce8a7..f762d44 100644 --- a/benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg +++ b/benchmarks/macos_m4_mutex_benchmarks_multi-threaded.svg @@ -1,12 +1,12 @@ - + - 2026-05-05T20:15:21.319817 + 2026-05-08T20:35:02.006785 image/svg+xml @@ -21,1079 +21,261 @@ - - - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - - - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - +" clip-path="url(#p32e4e05e7c)" style="fill: #1f77b4"/> - - - + + + + + + - + - - - + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + - - + + - + - + - + + - + + - + - - - + + + + + + @@ -1308,30 +793,255 @@ z - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + - - - + + + - - - - - + + + + + + + + + + + + + + + - - + + - + - + - + + @@ -1739,47 +1323,48 @@ z - - + + - + - - - + + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + @@ -1807,15 +1392,15 @@ z - - + + - + - + - + @@ -1847,48 +1432,79 @@ z - - + + - + - - - + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -1918,15 +1534,15 @@ z - - + + - + - + - + @@ -1974,15 +1590,15 @@ z - - + + - + - + - + @@ -2013,15 +1629,15 @@ z - - + + - + - + - + @@ -2041,15 +1657,143 @@ z - - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -2079,15 +1823,15 @@ z - - + + - + - - - + + + @@ -2100,133 +1844,406 @@ z - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + - - - + + + + - + - - - + + + + + + + + - + + + + - + - - - + + + + + + - + + + + - + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - - - + + + + + + + + + + - + + + - - - + + + - + + + - - - + + + - + + + - - - - + + + + - + + + - - - - + + + + - + + + - - - - + + + + - + + + - - - + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + - + + + - - - + + + - + + + - - - + + + - + + + - - - + + + - + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + + + - - - + + + - + + + - - - + + + - + - + + + - - - + + + - + - + + + - - - - - - - + + + + + + + + + + - - - - - - - + + + + + + + + + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + diff --git a/benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg b/benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg index 2251aba..74cacd1 100644 --- a/benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg +++ b/benchmarks/macos_m4_mutex_benchmarks_single-threaded.svg @@ -1,12 +1,12 @@ - + - 2026-05-05T20:15:21.213891 + 2026-05-08T20:35:01.896303 image/svg+xml @@ -22,198 +22,738 @@ - - +" clip-path="url(#p6634d226c5)" style="fill: #1f77b4"/> - +" clip-path="url(#p6634d226c5)" style="fill: #1f77b4"/> - +" clip-path="url(#p6634d226c5)" style="fill: #1f77b4"/> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + +" transform="scale(0.015625)"/> + + + + + + - - + + + + - - - +" transform="scale(0.015625)"/> + + + + + + - - + + + + - - - + - - - - - - + - - - + - - - - - - + - - - + + - - - - - - + - - - + + + - - - - - - + - - - + - - - - - - - - - - - - - - - - + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + - + + + - + + + - + + + + + + - + + + - + + + + + + - + + + - + + + - + + + - + + + + + + - + + + + + + - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +" clip-path="url(#p35132f84d0)" style="fill: #1f77b4"/> + + + + + + + - + - + - + + + + - + + - + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + @@ -912,15 +1377,48 @@ z - - + + - + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -945,18 +1443,31 @@ z + + + + + + + + + + + + + - - + + - + - + - + @@ -994,54 +1505,77 @@ z - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - + - + - + @@ -1096,77 +1630,15 @@ z - - - - - - - - - - - - - - - - - - - - - - - - + + - + - + - + @@ -1203,15 +1675,15 @@ z - - + + - + - - - + + + @@ -1236,64 +1708,101 @@ z - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + - @@ -1378,15 +1886,160 @@ z - - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -1400,15 +2053,15 @@ z - - + + - + - + - + @@ -1432,15 +2085,15 @@ z - - + + - + - + - + @@ -1460,15 +2113,79 @@ z - - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -1498,53 +2215,15 @@ z - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + - + - + @@ -1598,15 +2277,15 @@ z - - + + - + - - - + + + @@ -1619,316 +2298,265 @@ z - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - + + - + - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + - + - - - - - - - - - + + + + + + - - - - + - + - - - - - - + + + + + + - - - - + - + - - - + + + - - - - + + + - - - - + - + - - - - - - + + + + + + + + + - - - - + - + - - - + + + - - - - + + + - - + + + + - + + + + + + + + + + + - + - - - - - - + + + + + + - + - + - + + - - + + + + - - + - - + - - - - - - - - + + + + - - - + - - - - - - - + + + + - - - + - - - - - - - + + + + - - - + - - - - - - - + + + + - - - + - - - - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - + + + + - - - + - - - - - - - - - + + + + + + - - - - - - - - - + + + + + + - - - + + + - - - - - - + + - - - + + + - - - - - - + + - - - - - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + - - + + + + + diff --git a/benchmarks/win32_intel_14700K_multi-threaded.svg b/benchmarks/win32_intel_14700K_multi-threaded.svg deleted file mode 100644 index 8e86c3c..0000000 --- a/benchmarks/win32_intel_14700K_multi-threaded.svg +++ /dev/null @@ -1,1912 +0,0 @@ - - - - - - - - 2026-04-25T20:33:18.576003 - image/svg+xml - - - Matplotlib v3.10.8, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/benchmarks/win32_intel_14700K_mutex_benchmarks_multi-threaded.svg b/benchmarks/win32_intel_14700K_mutex_benchmarks_multi-threaded.svg new file mode 100644 index 0000000..7debf9e --- /dev/null +++ b/benchmarks/win32_intel_14700K_mutex_benchmarks_multi-threaded.svg @@ -0,0 +1,2831 @@ + + + + + + + + 2026-05-09T05:25:59.723914 + image/svg+xml + + + Matplotlib v3.10.8, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/win32_intel_14700K_mutex_benchmarks_single-threaded.svg b/benchmarks/win32_intel_14700K_mutex_benchmarks_single-threaded.svg new file mode 100644 index 0000000..aa54ace --- /dev/null +++ b/benchmarks/win32_intel_14700K_mutex_benchmarks_single-threaded.svg @@ -0,0 +1,2632 @@ + + + + + + + + 2026-05-09T05:25:59.596844 + image/svg+xml + + + Matplotlib v3.10.8, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/win32_intel_14700K_single-threaded.svg b/benchmarks/win32_intel_14700K_single-threaded.svg deleted file mode 100644 index 14e905e..0000000 --- a/benchmarks/win32_intel_14700K_single-threaded.svg +++ /dev/null @@ -1,1816 +0,0 @@ - - - - - - - - 2026-04-25T20:33:18.481191 - image/svg+xml - - - Matplotlib v3.10.8, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/include/nova/sync/event/parking_auto_reset_event.hpp b/include/nova/sync/event/parking_auto_reset_event.hpp index 06905a6..40bd00d 100644 --- a/include/nova/sync/event/parking_auto_reset_event.hpp +++ b/include/nova/sync/event/parking_auto_reset_event.hpp @@ -29,7 +29,6 @@ namespace nova::sync { /// ### Aliases /// - `parking_auto_reset_event<>` — pure park, no spinning. /// - `parking_auto_reset_event` — spin-then-park. -/// - `auto_reset_event` — deprecated alias for `parking_auto_reset_event<>`. template < typename... Policies > requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) class parking_auto_reset_event @@ -141,10 +140,4 @@ class parking_auto_reset_event } }; -//---------------------------------------------------------------------------------------------------------------------- -// Convenience alias - -/// @brief Deprecated alias for `parking_auto_reset_event<>`. -using auto_reset_event = parking_auto_reset_event<>; - } // namespace nova::sync diff --git a/include/nova/sync/event/parking_manual_reset_event.hpp b/include/nova/sync/event/parking_manual_reset_event.hpp index 4d9fd66..cb3b017 100644 --- a/include/nova/sync/event/parking_manual_reset_event.hpp +++ b/include/nova/sync/event/parking_manual_reset_event.hpp @@ -22,13 +22,11 @@ namespace nova::sync { /// /// | Policy | Effect | /// |---------------|--------------------------------------------------------| -/// | (no exponential_backoff) | Park immediately when not set (default). | /// | `with_backoff`| Spin with exponential backoff before parking. | /// /// ### Aliases /// - `parking_manual_reset_event<>` — pure park, no spinning. /// - `parking_manual_reset_event` — spin-then-park. -/// - `manual_reset_event` — deprecated alias for `parking_manual_reset_event<>`. template < typename... Policies > requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) class parking_manual_reset_event @@ -111,10 +109,4 @@ class parking_manual_reset_event } }; -//---------------------------------------------------------------------------------------------------------------------- -// Convenience alias - -/// @brief Deprecated alias for `parking_manual_reset_event<>`. -using manual_reset_event = parking_manual_reset_event<>; - } // namespace nova::sync diff --git a/include/nova/sync/event/timed_auto_reset_event.hpp b/include/nova/sync/event/timed_auto_reset_event.hpp deleted file mode 100644 index a5e466c..0000000 --- a/include/nova/sync/event/timed_auto_reset_event.hpp +++ /dev/null @@ -1,144 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include -#include -#include -#include -#include // std::ignore - -#include - -#include "nova/sync/event/parking_auto_reset_event.hpp" - -namespace nova::sync { -namespace impl { - -/// @brief Auto-reset event with timed-wait support. -/// -/// Each `signal()` delivers exactly one token. If a thread is blocked in -/// `wait()`, it is woken and the token is consumed. Otherwise the token is -/// stored for the next `wait()` / `try_wait()` call. -class timed_auto_reset_event -{ - // State encoding helpers - // bits 0-15 : waiter_count (number of threads waiting) - // bits 16-31 : post_count (number of unmatched signals) - static constexpr std::ptrdiff_t max_waiters = ( std::ptrdiff_t { 1 } << 16 ) - 2; - static constexpr uint32_t waiter_mask = ( 1u << 16 ) - 1u; - static constexpr uint32_t post_one = 1u << 16; - -public: - /// @brief Constructs the event. - explicit timed_auto_reset_event( bool initially_set = false ) noexcept : - state_( initially_set ? post_one : 0u ) - {} - - ~timed_auto_reset_event() = default; - timed_auto_reset_event( const timed_auto_reset_event& ) = delete; - timed_auto_reset_event& operator=( const timed_auto_reset_event& ) = delete; - - /// @brief Delivers one token, waking exactly one waiter. - void signal() noexcept - { - uint32_t s = state_.load( std::memory_order_relaxed ); - while ( true ) { - const uint32_t post_count = s >> 16; - const uint32_t waiter_count = s & waiter_mask; - - if ( post_count > waiter_count ) - return; - - if ( state_.compare_exchange_weak( s, s + post_one, std::memory_order_release, std::memory_order_relaxed ) ) { - if ( waiter_count > post_count ) - sem_.release( 1 ); - return; - } - } - } - - /// @brief Atomically consumes a token if one is pending. - /// @return true if a token was available and consumed, false otherwise. - [[nodiscard]] bool try_wait() noexcept - { - uint32_t s = state_.load( std::memory_order_relaxed ); - while ( s >= post_one ) { - if ( state_.compare_exchange_weak( s, s - post_one, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; - } - return false; - } - - /// @brief Blocks until a token is available, then consumes it. - void wait() noexcept; - - /// @brief Blocks until a token is available or the absolute deadline passes. - /// @return true if a token was consumed, false if the deadline passed. - template < class Clock, class Duration > - [[nodiscard]] bool try_wait_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept - { - if ( try_wait() ) - return true; - - state_.fetch_add( 1u, std::memory_order_relaxed ); - - uint32_t s = state_.load( std::memory_order_acquire ); - while ( s >= post_one ) { - if ( state_.compare_exchange_weak( - s, s - 1u - post_one, std::memory_order_acquire, std::memory_order_relaxed ) ) { - std::ignore = sem_.try_acquire(); - return true; - } - } - - while ( true ) { - if ( !sem_.try_acquire_until( abs_time ) ) - return on_timed_wait_timeout(); - - s = state_.load( std::memory_order_relaxed ); - while ( true ) { - if ( s >= post_one ) { - if ( state_.compare_exchange_weak( - s, s - 1u - post_one, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; - } else { - break; - } - } - } - } - - /// @brief Blocks until a token is available or the timeout expires. - /// @return true if a token was consumed, false if the timeout expired. - template < class Rep, class Period > - [[nodiscard]] bool try_wait_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept - { - return try_wait_until( std::chrono::steady_clock::now() + rel_time ); - } - -private: - bool on_timed_wait_timeout() noexcept; - - // ----------------------------------------------------------------------- - // Data - - std::atomic< uint32_t > state_; - std::counting_semaphore< max_waiters > sem_ { 0 }; -}; - -} // namespace impl - -#if defined( __linux__ ) || defined( _WIN32 ) - -using timed_auto_reset_event = auto_reset_event; - -#else - -using timed_auto_reset_event = impl::timed_auto_reset_event; - -#endif - - -} // namespace nova::sync diff --git a/include/nova/sync/event/timed_manual_reset_event.hpp b/include/nova/sync/event/timed_manual_reset_event.hpp deleted file mode 100644 index 75f4f26..0000000 --- a/include/nova/sync/event/timed_manual_reset_event.hpp +++ /dev/null @@ -1,160 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include -#include -#include -#include -#include // std::ignore - -#include - -#include "nova/sync/event/parking_manual_reset_event.hpp" - -namespace nova::sync { - -/// @brief Manual-reset event with timed-wait support. -/// -/// Once `signal()` is called, all waiters are woken and subsequent `wait()` / -/// `try_wait()` calls return immediately until `reset()` is called. - -namespace impl { - -class timed_manual_reset_event -{ - // ----------------------------------------------------------------------- - // State encoding helpers - // state_ = (waiter_count << 1) | flag_bit - static constexpr std::ptrdiff_t max_waiters = ( std::ptrdiff_t { 1 } << 30 ) - 1; - static constexpr uint32_t flag_bit = 1u; - static constexpr uint32_t waiter_one = 2u; // adding one waiter - -public: - /// @brief Constructs the event. - explicit timed_manual_reset_event( bool initially_set = false ) noexcept : - state_( initially_set ? flag_bit : 0u ) - {} - - ~timed_manual_reset_event() = default; - timed_manual_reset_event( const timed_manual_reset_event& ) = delete; - timed_manual_reset_event& operator=( const timed_manual_reset_event& ) = delete; - - // ----------------------------------------------------------------------- - // Signalling - - /// @brief Transitions the event to "set", waking all waiters. - void signal() noexcept - { - const uint32_t prev = state_.fetch_or( flag_bit, std::memory_order_release ); - if ( prev & flag_bit ) - return; - - const uint32_t wc = prev >> 1; - if ( wc > 0 ) - sem_.release( std::ptrdiff_t( wc ) ); - } - - /// @brief Transitions the event back to "not set". - void reset() noexcept - { - state_.fetch_and( ~flag_bit, std::memory_order_relaxed ); - } - - // ----------------------------------------------------------------------- - // Waiting - - /// @brief Returns true if the event is currently set, without blocking. - [[nodiscard]] bool try_wait() const noexcept - { - return ( state_.load( std::memory_order_acquire ) & flag_bit ) != 0u; - } - - /// @brief Blocks until the event is set. - void wait() noexcept - { - if ( state_.load( std::memory_order_acquire ) & flag_bit ) - return; // fast path - - state_.fetch_add( waiter_one, std::memory_order_relaxed ); - - if ( state_.load( std::memory_order_acquire ) & flag_bit ) { - state_.fetch_sub( waiter_one, std::memory_order_relaxed ); - std::ignore = sem_.try_acquire(); - return; - } - - while ( true ) { - sem_.acquire(); - if ( state_.load( std::memory_order_acquire ) & flag_bit ) { - state_.fetch_sub( waiter_one, std::memory_order_relaxed ); - return; - } - } - } - - /// @brief Blocks until the event is set or the absolute deadline passes. - /// @return true if the event was set, false if the deadline passed. - template < class Clock, class Duration > - [[nodiscard]] bool try_wait_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept - { - if ( state_.load( std::memory_order_acquire ) & flag_bit ) - return true; - - state_.fetch_add( waiter_one, std::memory_order_relaxed ); - - if ( state_.load( std::memory_order_acquire ) & flag_bit ) { - state_.fetch_sub( waiter_one, std::memory_order_relaxed ); - std::ignore = sem_.try_acquire(); - return true; - } - - while ( true ) { - if ( !sem_.try_acquire_until( abs_time ) ) - return on_timed_wait_timeout(); - - if ( state_.load( std::memory_order_acquire ) & flag_bit ) { - state_.fetch_sub( waiter_one, std::memory_order_relaxed ); - return true; - } - } - } - - /// @brief Blocks until the event is set or the timeout expires. - /// @return true if the event was set, false if the timeout expired. - template < class Rep, class Period > - [[nodiscard]] bool try_wait_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept - { - return try_wait_until( std::chrono::steady_clock::now() + rel_time ); - } - -private: - bool on_timed_wait_timeout() noexcept; - - // ----------------------------------------------------------------------- - // Data - - // state: - // bit 0 : signal flag (1 = set, 0 = not set) - // bits 1–31 : waiter count (number of threads registered for semaphore wakeup) - // stored as (count << 1) so that bit 0 is the flag. - - std::atomic< uint32_t > state_; - std::counting_semaphore< max_waiters > sem_ { 0 }; -}; - -} // namespace impl - -#if defined( __linux__ ) || defined( _WIN32 ) - -using timed_manual_reset_event = manual_reset_event; - -#else - -using timed_manual_reset_event = impl::timed_manual_reset_event; - -#endif - - -} // namespace nova::sync diff --git a/include/nova/sync/mutex/eventfd_mutex.hpp b/include/nova/sync/mutex/eventfd_mutex.hpp index 2ed305e..c00ba08 100644 --- a/include/nova/sync/mutex/eventfd_mutex.hpp +++ b/include/nova/sync/mutex/eventfd_mutex.hpp @@ -171,9 +171,9 @@ class NOVA_SYNC_CAPABILITY( "mutex" ) fast_eventfd_mutex_impl if ( ( s & 1u ) == 0 ) { if ( state_.compare_exchange_weak( s, s | 1u, std::memory_order_acquire, std::memory_order_relaxed ) ) return detail::backoff_result::success; - return detail::backoff_result::retry; + return detail::backoff_result::retry_without_backoff; } - return detail::backoff_result::failure; + return detail::backoff_result::retry; } ); if ( success ) return true; diff --git a/include/nova/sync/semaphore/parking_semaphore.hpp b/include/nova/sync/semaphore/parking_semaphore.hpp index 3e00735..ca47b53 100644 --- a/include/nova/sync/semaphore/parking_semaphore.hpp +++ b/include/nova/sync/semaphore/parking_semaphore.hpp @@ -26,13 +26,8 @@ namespace nova::sync { /// /// | Policy | Effect | /// |---------------|-------------------------------------------------------| -/// | (no exponential_backoff) | Park immediately when count is negative (default). | /// | `with_backoff`| Spin with exponential backoff before parking. | /// -/// ### Aliases -/// - `parking_semaphore<>` — pure park, no spinning. -/// - `parking_semaphore` — spin-then-park. -/// - `fast_semaphore` — deprecated alias for `parking_semaphore<>`. template < typename... Policies > requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) class parking_semaphore @@ -110,17 +105,11 @@ class parking_semaphore /// @brief Timed lock-free counting semaphore with optional exponential backoff. /// -/// Adds `try_acquire_for` / `try_acquire_until` to `parking_semaphore`. /// /// | Policy | Effect | /// |---------------|-------------------------------------------------------| -/// | (absence of `with_backoff`) | Park immediately when count is negative (default). | /// | `with_backoff`| Spin with exponential backoff before parking. | /// -/// ### Aliases -/// - `timed_semaphore<>` — pure park, no spinning. -/// - `timed_semaphore` — spin-then-park. -/// - `fast_timed_semaphore` — deprecated alias for `timed_semaphore<>`. template < typename... Policies > requires( parameter::valid_parameters< detail::backoff_allowed_tags, Policies... > ) class timed_semaphore @@ -232,13 +221,4 @@ class timed_semaphore } }; -//---------------------------------------------------------------------------------------------------------------------- -// Convenience aliases - -/// @brief Deprecated alias for `parking_semaphore<>` (pure park, no backoff). -using fast_semaphore = parking_semaphore<>; - -/// @brief Deprecated alias for `timed_semaphore<>` (pure park, no backoff). -using fast_timed_semaphore = timed_semaphore<>; - } // namespace nova::sync diff --git a/include/nova/sync/semaphore/posix_semaphore.hpp b/include/nova/sync/semaphore/posix_semaphore.hpp index c0056ba..7b16569 100644 --- a/include/nova/sync/semaphore/posix_semaphore.hpp +++ b/include/nova/sync/semaphore/posix_semaphore.hpp @@ -18,8 +18,7 @@ namespace nova::sync { -/// Counting semaphore wrapping POSIX `sem_t`. Linux only (macOS has deprecated sem_init/sem_destroy). -/// Supports timed waits via `sem_timedwait`. +/// Counting semaphore wrapping POSIX `sem_t`. class posix_semaphore { public: diff --git a/include/nova/sync/semaphore/timed_counting_semaphore.hpp b/include/nova/sync/semaphore/timed_counting_semaphore.hpp deleted file mode 100644 index c71cbe3..0000000 --- a/include/nova/sync/semaphore/timed_counting_semaphore.hpp +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#pragma once - -#include -#include -#include -#include -#include - -#include - -namespace nova::sync { - -/// Counting semaphore with timed-wait support. Uses `std::counting_semaphore` -/// internally for timed operations. -class timed_counting_semaphore -{ - static constexpr std::ptrdiff_t max_count = ( std::ptrdiff_t { 1 } << 30 ) - 1; - -public: - /// Constructs a semaphore with @p initial tokens (default: 0). - explicit timed_counting_semaphore( std::ptrdiff_t initial = 0 ) noexcept : - count_( int32_t( initial ) ) - { - assert( initial >= 0 && "timed_counting_semaphore: initial count must be non-negative" ); - } - - ~timed_counting_semaphore() = default; - timed_counting_semaphore( const timed_counting_semaphore& ) = delete; - timed_counting_semaphore& operator=( const timed_counting_semaphore& ) = delete; - - /// Adds @p n tokens and wakes up to @p n blocked waiters. - void release( std::ptrdiff_t n = 1 ) noexcept - { - assert( n >= 0 && "timed_counting_semaphore::release: n must be non-negative" ); - auto prev = count_.fetch_add( int32_t( n ), std::memory_order_release ); - if ( prev < 0 ) { - auto to_wake = std::min( int32_t( n ), -prev ); - for ( int32_t i = 0; i < to_wake; ++i ) - sem_.release( 1 ); - } - } - - /// Blocks until a token is available, then consumes one. - void acquire() noexcept - { - auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); - if ( prev > 0 ) - return; - - sem_.acquire(); - } - - /// Consumes a token if available. Returns `true` on success, `false` if none available. - [[nodiscard]] bool try_acquire() noexcept - { - auto c = count_.load( std::memory_order_relaxed ); - while ( c > 0 ) { - if ( count_.compare_exchange_weak( c, c - 1, std::memory_order_acquire, std::memory_order_relaxed ) ) - return true; - } - return false; - } - - /// Blocks until a token is available or the deadline passes. - /// Returns `true` if acquired, `false` if timed out. - template < class Clock, class Duration > - [[nodiscard]] bool try_acquire_until( std::chrono::time_point< Clock, Duration > const& abs_time ) noexcept - { - if ( try_acquire() ) - return true; - - auto prev = count_.fetch_sub( 1, std::memory_order_acquire ); - if ( prev > 0 ) - return true; - - while ( true ) { - if ( !sem_.try_acquire_until( abs_time ) ) { - auto c = count_.fetch_add( 1, std::memory_order_relaxed ); - if ( c >= 0 ) { - std::ignore = sem_.try_acquire(); - return true; - } - return false; - } - return true; - } - } - - /// Blocks until a token is available or the timeout expires. - /// Returns `true` if acquired, `false` if timed out. - template < class Rep, class Period > - [[nodiscard]] bool try_acquire_for( std::chrono::duration< Rep, Period > const& rel_time ) noexcept - { - return try_acquire_until( std::chrono::steady_clock::now() + rel_time ); - } - -private: - std::atomic< int32_t > count_; - std::counting_semaphore< max_count > sem_ { 0 }; -}; - -} // namespace nova::sync diff --git a/source/nova/sync/event/timed_auto_reset_event.cpp b/source/nova/sync/event/timed_auto_reset_event.cpp deleted file mode 100644 index 4fb6f62..0000000 --- a/source/nova/sync/event/timed_auto_reset_event.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#include "nova/sync/event/timed_auto_reset_event.hpp" - -namespace nova::sync::impl { - -void timed_auto_reset_event::wait() noexcept -{ - if ( try_wait() ) - return; - - state_.fetch_add( 1u, std::memory_order_relaxed ); - - uint32_t s = state_.load( std::memory_order_acquire ); - while ( s >= post_one ) { - if ( state_.compare_exchange_weak( s, s - 1u - post_one, std::memory_order_acquire, std::memory_order_relaxed ) ) { - std::ignore = sem_.try_acquire(); - return; - } - } - - while ( true ) { - sem_.acquire(); - s = state_.load( std::memory_order_relaxed ); - while ( true ) { - if ( s >= post_one ) { - if ( state_.compare_exchange_weak( - s, s - 1u - post_one, std::memory_order_acquire, std::memory_order_relaxed ) ) - return; - } else { - break; - } - } - } -} - -bool timed_auto_reset_event::on_timed_wait_timeout() noexcept -{ - uint32_t s = state_.load( std::memory_order_relaxed ); - while ( true ) { - if ( s >= post_one ) { - if ( state_.compare_exchange_weak( - s, s - 1u - post_one, std::memory_order_acquire, std::memory_order_relaxed ) ) { - sem_.acquire(); - return true; - } - } else { - if ( state_.compare_exchange_weak( s, s - 1u, std::memory_order_relaxed, std::memory_order_relaxed ) ) - return false; - } - } -} - -} // namespace nova::sync::impl diff --git a/source/nova/sync/event/timed_manual_reset_event.cpp b/source/nova/sync/event/timed_manual_reset_event.cpp deleted file mode 100644 index d83bd20..0000000 --- a/source/nova/sync/event/timed_manual_reset_event.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: MIT -// SPDX-FileCopyrightText: 2026 Tim Blechmann - -#include "nova/sync/event/timed_manual_reset_event.hpp" - -namespace nova::sync::impl { - -bool timed_manual_reset_event::on_timed_wait_timeout() noexcept -{ - uint32_t s = state_.load( std::memory_order_relaxed ); - while ( true ) { - if ( s & flag_bit ) { - if ( state_.compare_exchange_weak( s, s - waiter_one, std::memory_order_relaxed, std::memory_order_relaxed ) ) { - sem_.acquire(); - std::atomic_thread_fence( std::memory_order_acquire ); - return true; - } - } else { - if ( state_.compare_exchange_weak( s, s - waiter_one, std::memory_order_relaxed, std::memory_order_relaxed ) ) - return false; - } - } -} - -} // namespace nova::sync::impl diff --git a/source/nova/sync/mutex/eventfd_mutex.cpp b/source/nova/sync/mutex/eventfd_mutex.cpp index d23227a..e0a131f 100644 --- a/source/nova/sync/mutex/eventfd_mutex.cpp +++ b/source/nova/sync/mutex/eventfd_mutex.cpp @@ -99,9 +99,9 @@ void fast_eventfd_mutex_impl::lock_slow() noexcept if ( ( s & 1u ) == 0 ) { if ( state_.compare_exchange_weak( s, s | 1u, std::memory_order_acquire, std::memory_order_relaxed ) ) return detail::backoff_result::success; - return detail::backoff_result::retry; + return detail::backoff_result::retry_without_backoff; } - return detail::backoff_result::failure; + return detail::backoff_result::retry; } ); if ( success ) diff --git a/test/event_benchmarks.cpp b/test/event_benchmarks.cpp index 8f4b787..d5e7e7c 100644 --- a/test/event_benchmarks.cpp +++ b/test/event_benchmarks.cpp @@ -7,8 +7,6 @@ #include #include #include -#include -#include #include @@ -21,9 +19,9 @@ TEMPLATE_TEST_CASE( "manual_reset_event benchmarks", "[!benchmark]", - nova::sync::manual_reset_event, - nova::sync::native_manual_reset_event, - nova::sync::timed_manual_reset_event ) + nova::sync::parking_manual_reset_event<>, + nova::sync::parking_manual_reset_event< nova::sync::with_backoff >, + nova::sync::native_manual_reset_event ) { using event_t = TestType; @@ -94,9 +92,9 @@ TEMPLATE_TEST_CASE( "manual_reset_event benchmarks", TEMPLATE_TEST_CASE( "auto_reset_event benchmarks", "[!benchmark]", - nova::sync::auto_reset_event, - nova::sync::native_auto_reset_event, - nova::sync::timed_auto_reset_event ) + nova::sync::parking_auto_reset_event<>, + nova::sync::parking_auto_reset_event< nova::sync::with_backoff >, + nova::sync::native_auto_reset_event ) { using event_t = TestType; diff --git a/test/event_test.cpp b/test/event_test.cpp index daccf73..35e40e0 100644 --- a/test/event_test.cpp +++ b/test/event_test.cpp @@ -8,15 +8,13 @@ #include #include #include -#include -#include // Validate concepts for the concrete types we exercise in this test file. -static_assert( nova::sync::concepts::manual_reset_event< nova::sync::manual_reset_event > ); -static_assert( nova::sync::concepts::timed_event< nova::sync::timed_manual_reset_event > ); +static_assert( nova::sync::concepts::manual_reset_event< nova::sync::parking_manual_reset_event<> > ); +static_assert( nova::sync::concepts::timed_event< nova::sync::parking_manual_reset_event<> > ); static_assert( nova::sync::concepts::native_async_event< nova::sync::native_manual_reset_event > ); -static_assert( nova::sync::concepts::auto_reset_event< nova::sync::auto_reset_event > ); -static_assert( nova::sync::concepts::timed_event< nova::sync::timed_auto_reset_event > ); +static_assert( nova::sync::concepts::auto_reset_event< nova::sync::parking_auto_reset_event<> > ); +static_assert( nova::sync::concepts::timed_event< nova::sync::parking_auto_reset_event<> > ); #if defined( _WIN32 ) static_assert( nova::sync::concepts::native_async_event< nova::sync::native_auto_reset_event > ); #else @@ -52,8 +50,8 @@ struct thread_guard TEMPLATE_TEST_CASE( "manual_reset_event implementations", "[manual_reset_event]", - nova::sync::manual_reset_event, - nova::sync::timed_manual_reset_event, + nova::sync::parking_manual_reset_event<>, + nova::sync::parking_manual_reset_event< nova::sync::with_backoff >, nova::sync::native_manual_reset_event ) { using event_t = TestType; @@ -377,8 +375,8 @@ TEMPLATE_TEST_CASE( "manual_reset_event implementations", TEMPLATE_TEST_CASE( "manual_reset_event implementations (stress tests)", "[manual_reset_event][stress]", - nova::sync::manual_reset_event, - nova::sync::timed_manual_reset_event, + nova::sync::parking_manual_reset_event<>, + nova::sync::parking_manual_reset_event< nova::sync::with_backoff >, nova::sync::native_manual_reset_event ) { using event_t = TestType; @@ -430,8 +428,8 @@ TEMPLATE_TEST_CASE( "manual_reset_event implementations (stress tests)", TEMPLATE_TEST_CASE( "auto_reset_event implementations", "[auto_reset_event]", - nova::sync::auto_reset_event, - nova::sync::timed_auto_reset_event, + nova::sync::parking_auto_reset_event<>, + nova::sync::parking_auto_reset_event< nova::sync::with_backoff >, nova::sync::native_auto_reset_event ) { using event_t = TestType; @@ -585,8 +583,8 @@ TEMPLATE_TEST_CASE( "auto_reset_event implementations", TEMPLATE_TEST_CASE( "auto_reset_event implementations (stress tests)", "[auto_reset_event][stress]", - nova::sync::auto_reset_event, - nova::sync::timed_auto_reset_event, + nova::sync::parking_auto_reset_event<>, + nova::sync::parking_auto_reset_event< nova::sync::with_backoff >, nova::sync::native_auto_reset_event ) { using event_t = TestType; diff --git a/test/mutex_benchmarks.cpp b/test/mutex_benchmarks.cpp index 6eb32d2..460d346 100644 --- a/test/mutex_benchmarks.cpp +++ b/test/mutex_benchmarks.cpp @@ -1,10 +1,12 @@ // SPDX-License-Identifier: MIT // SPDX-FileCopyrightText: 2026 Tim Blechmann +#include #include #include #include +#include #include #include @@ -12,7 +14,8 @@ #ifdef NOVA_SYNC_HAS_QT # include -# define NOVA_SYNC_QT_MUTEX_TYPE , QMutex +# include +# define NOVA_SYNC_QT_MUTEX_TYPE , QMutex, QBasicReadWriteLock, QRecursiveMutex #else # define NOVA_SYNC_QT_MUTEX_TYPE #endif @@ -32,13 +35,15 @@ TEMPLATE_TEST_CASE( "mutex benchmarks", std::timed_mutex, std::recursive_mutex, std::recursive_timed_mutex, + std::shared_mutex, + std::shared_timed_mutex, NOVA_SYNC_ALL_MUTEX_TYPES NOVA_SYNC_QT_MUTEX_TYPE ) { using mutex_t = TestType; SECTION( "single-threaded" ) { - const int ops = 1000000; + const int ops = 100000; BENCHMARK( "single-threaded" ) { @@ -52,18 +57,20 @@ TEMPLATE_TEST_CASE( "mutex benchmarks", }; } - SECTION( "multi-threaded" ) + SECTION( "multi-threaded (8 threads)" ) { - const unsigned threads = std::max( 2u, std::thread::hardware_concurrency() ); - const int ops_per_thread = 10000; // keep total work reasonable + const unsigned threads = 8; + const int ops_per_thread = 5000; // keep total work reasonable BENCHMARK( "multi-threaded" ) { mutex_t m; std::vector< std::thread > ths; ths.reserve( threads ); + std::barrier sync( threads ); for ( unsigned t = 0; t < threads; ++t ) { ths.emplace_back( [ & ] { + sync.arrive_and_wait(); for ( int i = 0; i < ops_per_thread; ++i ) { m.lock(); work(); @@ -80,15 +87,17 @@ TEMPLATE_TEST_CASE( "mutex benchmarks", SECTION( "multi-threaded (high contention)" ) { const unsigned threads = std::max( 2u, std::thread::hardware_concurrency() * 2 ); - const int ops_per_thread = 10000; // keep total work reasonable + const int ops_per_thread = 5000; // keep total work reasonable BENCHMARK( "multi-threaded (high contention)" ) { mutex_t m; std::vector< std::thread > ths; ths.reserve( threads ); + std::barrier sync( threads ); for ( unsigned t = 0; t < threads; ++t ) { ths.emplace_back( [ & ] { + sync.arrive_and_wait(); for ( int i = 0; i < ops_per_thread; ++i ) { m.lock(); work(); diff --git a/test/mutex_types.hpp b/test/mutex_types.hpp index fc5e451..17abd15 100644 --- a/test/mutex_types.hpp +++ b/test/mutex_types.hpp @@ -118,10 +118,10 @@ using spinlock_mutex_shared = nova::sync::spinlock_mutex< nova:: using spinlock_mutex_shared_with_backoff = nova::sync::spinlock_mutex< nova::sync::shared, nova::sync::with_backoff >; #define NOVA_SYNC_ALL_MUTEX_TYPES \ + parking_timed_mutex_with_backoff, \ nova::sync::parking_mutex<>, \ nova::sync::parking_mutex, \ nova::sync::parking_mutex, \ - parking_timed_mutex_with_backoff, \ nova::sync::ticket_mutex<>, \ nova::sync::ticket_mutex, \ nova::sync::spinlock_mutex<>, \ diff --git a/test/policy_mutex_test.cpp b/test/policy_mutex_test.cpp index c20bf5e..f622b5d 100644 --- a/test/policy_mutex_test.cpp +++ b/test/policy_mutex_test.cpp @@ -50,19 +50,15 @@ static_assert( nova::sync::concepts::counting_semaphore< nova::sync::parking_sem static_assert( nova::sync::concepts::timed_counting_semaphore< nova::sync::timed_semaphore<> > ); static_assert( nova::sync::concepts::timed_counting_semaphore< nova::sync::timed_semaphore< nova::sync::with_backoff > > ); -// Aliases -static_assert( std::is_same_v< nova::sync::fast_semaphore, nova::sync::parking_semaphore<> > ); -static_assert( std::is_same_v< nova::sync::fast_timed_semaphore, nova::sync::timed_semaphore<> > ); - //---------------------------------------------------------------------------------------------------------------------- // Concept checks: events static_assert( nova::sync::concepts::auto_reset_event< nova::sync::parking_auto_reset_event<> > ); static_assert( nova::sync::concepts::auto_reset_event< nova::sync::parking_auto_reset_event< nova::sync::with_backoff > > ); -static_assert( nova::sync::concepts::auto_reset_event< nova::sync::auto_reset_event > ); +static_assert( nova::sync::concepts::auto_reset_event< nova::sync::parking_auto_reset_event<> > ); static_assert( nova::sync::concepts::manual_reset_event< nova::sync::parking_manual_reset_event<> > ); static_assert( nova::sync::concepts::manual_reset_event< nova::sync::parking_manual_reset_event< nova::sync::with_backoff > > ); -static_assert( nova::sync::concepts::manual_reset_event< nova::sync::manual_reset_event > ); +static_assert( nova::sync::concepts::manual_reset_event< nova::sync::parking_manual_reset_event<> > ); diff --git a/test/semaphore_test.cpp b/test/semaphore_test.cpp index 5529fbc..748e67a 100644 --- a/test/semaphore_test.cpp +++ b/test/semaphore_test.cpp @@ -15,10 +15,6 @@ using namespace std::chrono_literals; -// Validate concepts for portable types. -static_assert( nova::sync::concepts::counting_semaphore< nova::sync::fast_semaphore > ); -static_assert( nova::sync::concepts::timed_counting_semaphore< nova::sync::fast_timed_semaphore > ); - // ============================================================================= // Shared helpers // ============================================================================= diff --git a/test/semaphore_types.hpp b/test/semaphore_types.hpp index 3c06b1d..4af541f 100644 --- a/test/semaphore_types.hpp +++ b/test/semaphore_types.hpp @@ -59,14 +59,16 @@ // --------------------------------------------------------------------------- // clang-format off -#define NOVA_SYNC_ALL_SEMAPHORE_TYPES \ - nova::sync::fast_semaphore, \ - nova::sync::fast_timed_semaphore \ - NOVA_SYNC_EVENTFD_SEMAPHORE_arg \ - NOVA_SYNC_KQUEUE_SEMAPHORE_arg \ - NOVA_SYNC_WIN32_SEMAPHORE_arg \ - NOVA_SYNC_POSIX_SEMAPHORE_arg \ - NOVA_SYNC_MACH_SEMAPHORE_arg \ +#define NOVA_SYNC_ALL_SEMAPHORE_TYPES \ + nova::sync::parking_semaphore<>, \ + nova::sync::parking_semaphore, \ + nova::sync::timed_semaphore<>, \ + nova::sync::timed_semaphore \ + NOVA_SYNC_EVENTFD_SEMAPHORE_arg \ + NOVA_SYNC_KQUEUE_SEMAPHORE_arg \ + NOVA_SYNC_WIN32_SEMAPHORE_arg \ + NOVA_SYNC_POSIX_SEMAPHORE_arg \ + NOVA_SYNC_MACH_SEMAPHORE_arg \ NOVA_SYNC_DISPATCH_SEMAPHORE_arg // clang-format on @@ -75,13 +77,14 @@ // --------------------------------------------------------------------------- // clang-format off -#define NOVA_SYNC_TIMED_SEMAPHORE_TYPES \ - nova::sync::fast_timed_semaphore \ - NOVA_SYNC_EVENTFD_SEMAPHORE_arg \ - NOVA_SYNC_KQUEUE_SEMAPHORE_arg \ - NOVA_SYNC_WIN32_SEMAPHORE_arg \ - NOVA_SYNC_POSIX_SEMAPHORE_arg \ - NOVA_SYNC_MACH_SEMAPHORE_arg \ +#define NOVA_SYNC_TIMED_SEMAPHORE_TYPES \ + nova::sync::timed_semaphore<>, \ + nova::sync::timed_semaphore \ + NOVA_SYNC_EVENTFD_SEMAPHORE_arg \ + NOVA_SYNC_KQUEUE_SEMAPHORE_arg \ + NOVA_SYNC_WIN32_SEMAPHORE_arg \ + NOVA_SYNC_POSIX_SEMAPHORE_arg \ + NOVA_SYNC_MACH_SEMAPHORE_arg \ NOVA_SYNC_DISPATCH_SEMAPHORE_arg // clang-format on diff --git a/tools/bench_plot.py b/tools/bench_plot.py index 3e398ca..12e35b8 100755 --- a/tools/bench_plot.py +++ b/tools/bench_plot.py @@ -236,7 +236,7 @@ def percentile(data, p): 1, sharex=True, gridspec_kw={"height_ratios": heights}, - figsize=(max(8, width), max(3, 2 + k * 1.5)), + figsize=(width, height), dpi=100, layout='constrained', )