diff --git a/thirdparty/stdatomic/nix/atomic_cpp.h b/thirdparty/stdatomic/nix/atomic_cpp.h index adc36b9c3d..31a5b7d97b 100644 --- a/thirdparty/stdatomic/nix/atomic_cpp.h +++ b/thirdparty/stdatomic/nix/atomic_cpp.h @@ -120,6 +120,14 @@ using std::memory_order_consume; using std::memory_order_relaxed; using std::memory_order_release; using std::memory_order_seq_cst; + +#define memory_order_relaxed std::memory_order_relaxed +#define memory_order_consume std::memory_order_consume +#define memory_order_acquire std::memory_order_acquire +#define memory_order_release std::memory_order_release +#define memory_order_acq_rel std::memory_order_acq_rel +#define memory_order_seq_cst std::memory_order_seq_cst + #else /* unavailable, possibly because this is C, not C++ */ #include #include @@ -266,6 +274,7 @@ typedef enum memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_seq_cst = __ATOMIC_SEQ_CST } memory_order; + /* * 7.17.4 Fences. */ diff --git a/vlib/sync/spinlock_test.v b/vlib/sync/spinlock_test.v new file mode 100644 index 0000000000..92a214a249 --- /dev/null +++ b/vlib/sync/spinlock_test.v @@ -0,0 +1,22 @@ +import sync + +fn test_spinlock() { + mut counter := 0 + mut s := sync.new_spin_lock() + num_threads := 10 + mut wg := sync.new_waitgroup() + wg.add(num_threads) + + for _ in 0 .. num_threads { + spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int) { + defer { + s.unlock() + wg.done() + } + s.lock() + (*counter_ref)++ + }(mut wg, s, &counter) + } + wg.wait() + assert counter == num_threads +} diff --git a/vlib/sync/sync.c.v b/vlib/sync/sync.c.v index 62bc037859..e7bb9fae58 100644 --- a/vlib/sync/sync.c.v +++ b/vlib/sync/sync.c.v @@ -1,5 +1,7 @@ module sync +import time + @[noreturn] fn cpanic(res int) { panic(unsafe { tos_clone(&u8(C.strerror(res))) }) @@ -15,3 +17,72 @@ fn should_be_zero(res int) { cpanic(res) } } + +// SpinLock is a mutual exclusion lock that busy-waits (spins) when locked. +// When one thread holds the lock, any other thread attempting to acquire it +// will loop repeatedly until the lock becomes available. +pub struct SpinLock { +mut: + locked u8 // Lock state: 0 = unlocked, 1 = locked + padding [63]u8 // Cache line padding (fills to 64 bytes total) +} + +// new_spin_lock creates and returns a new SpinLock instance initialized to unlocked state +pub fn new_spin_lock() &SpinLock { + mut the_lock := &SpinLock{ + locked: 0 + } + // Ensure initialization visibility across threads + C.atomic_thread_fence(C.memory_order_release) + return the_lock +} + +// lock acquires the spin lock. If the lock is currently held by another thread, +// this function will spin (busy-wait) until the lock becomes available. +@[inline] +pub fn (s &SpinLock) lock() { + // Expected value starts as unlocked (0) + mut expected := u8(0) + mut spin_count := 0 + max_spins := 100 + base_delay := 100 // nanosecond + max_delay := 10000 // nanoseconds (10μs) + + // Busy-wait until lock is acquired + for { + // Attempt atomic compare-and-swap: + // Succeeds if current value matches expected (0), + // then swaps to locked (1) + if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) { + // Prevent critical section reordering + C.atomic_thread_fence(C.memory_order_acquire) + return + } + + spin_count++ + // Exponential backoff after max_spins + if spin_count > max_spins { + // Calculate delay with cap: 100ns to 10μs + exponent := int_min(spin_count / max_spins, 10) + delay := int_min(base_delay * (1 << exponent), max_delay) + time.sleep(delay * time.nanosecond) + } else { + // Reduce power/bus contention during spinning + C.cpu_relax() + } + + // Refresh lock state before next attempt + expected = C.atomic_load_byte(&s.locked) + } +} + +// unlock releases the spin lock, making it available to other threads. +// IMPORTANT: Must only be called by the thread that currently holds the lock. +@[inline] +pub fn (s &SpinLock) unlock() { + // Ensure critical section completes before release + C.atomic_thread_fence(C.memory_order_release) + + // Atomically reset to unlocked state + C.atomic_store_byte(&s.locked, 0) +}