sync: add SpinLock (#24788)

This commit is contained in:
kbkpbot 2025-06-26 21:34:00 +08:00 committed by GitHub
parent 5cd799e5ac
commit 06c6554480
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 102 additions and 0 deletions

View File

@ -120,6 +120,14 @@ using std::memory_order_consume;
using std::memory_order_relaxed;
using std::memory_order_release;
using std::memory_order_seq_cst;
#define memory_order_relaxed std::memory_order_relaxed
#define memory_order_consume std::memory_order_consume
#define memory_order_acquire std::memory_order_acquire
#define memory_order_release std::memory_order_release
#define memory_order_acq_rel std::memory_order_acq_rel
#define memory_order_seq_cst std::memory_order_seq_cst
#else /* <atomic> unavailable, possibly because this is C, not C++ */
#include <sys/types.h>
#include <stdbool.h>
@ -266,6 +274,7 @@ typedef enum
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
/*
* 7.17.4 Fences.
*/

22
vlib/sync/spinlock_test.v Normal file
View File

@ -0,0 +1,22 @@
import sync
fn test_spinlock() {
mut counter := 0
mut s := sync.new_spin_lock()
num_threads := 10
mut wg := sync.new_waitgroup()
wg.add(num_threads)
for _ in 0 .. num_threads {
spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int) {
defer {
s.unlock()
wg.done()
}
s.lock()
(*counter_ref)++
}(mut wg, s, &counter)
}
wg.wait()
assert counter == num_threads
}

View File

@ -1,5 +1,7 @@
module sync
import time
@[noreturn]
fn cpanic(res int) {
panic(unsafe { tos_clone(&u8(C.strerror(res))) })
@ -15,3 +17,72 @@ fn should_be_zero(res int) {
cpanic(res)
}
}
// SpinLock is a mutual exclusion lock that busy-waits (spins) when locked.
// When one thread holds the lock, any other thread attempting to acquire it
// will loop repeatedly until the lock becomes available.
pub struct SpinLock {
mut:
locked u8 // Lock state: 0 = unlocked, 1 = locked
padding [63]u8 // Cache line padding (fills to 64 bytes total)
}
// new_spin_lock creates and returns a new SpinLock instance initialized to unlocked state
pub fn new_spin_lock() &SpinLock {
mut the_lock := &SpinLock{
locked: 0
}
// Ensure initialization visibility across threads
C.atomic_thread_fence(C.memory_order_release)
return the_lock
}
// lock acquires the spin lock. If the lock is currently held by another thread,
// this function will spin (busy-wait) until the lock becomes available.
@[inline]
pub fn (s &SpinLock) lock() {
// Expected value starts as unlocked (0)
mut expected := u8(0)
mut spin_count := 0
max_spins := 100
base_delay := 100 // nanosecond
max_delay := 10000 // nanoseconds (10μs)
// Busy-wait until lock is acquired
for {
// Attempt atomic compare-and-swap:
// Succeeds if current value matches expected (0),
// then swaps to locked (1)
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
// Prevent critical section reordering
C.atomic_thread_fence(C.memory_order_acquire)
return
}
spin_count++
// Exponential backoff after max_spins
if spin_count > max_spins {
// Calculate delay with cap: 100ns to 10μs
exponent := int_min(spin_count / max_spins, 10)
delay := int_min(base_delay * (1 << exponent), max_delay)
time.sleep(delay * time.nanosecond)
} else {
// Reduce power/bus contention during spinning
C.cpu_relax()
}
// Refresh lock state before next attempt
expected = C.atomic_load_byte(&s.locked)
}
}
// unlock releases the spin lock, making it available to other threads.
// IMPORTANT: Must only be called by the thread that currently holds the lock.
@[inline]
pub fn (s &SpinLock) unlock() {
// Ensure critical section completes before release
C.atomic_thread_fence(C.memory_order_release)
// Atomically reset to unlocked state
C.atomic_store_byte(&s.locked, 0)
}