v/vlib/sync/sync.c.v
2025-06-26 16:34:00 +03:00

89 lines
2.4 KiB
V

module sync
import time
@[noreturn]
fn cpanic(res int) {
panic(unsafe { tos_clone(&u8(C.strerror(res))) })
}
@[noreturn]
fn cpanic_errno() {
cpanic(C.errno)
}
fn should_be_zero(res int) {
if res != 0 {
cpanic(res)
}
}
// SpinLock is a mutual exclusion lock that busy-waits (spins) when locked.
// When one thread holds the lock, any other thread attempting to acquire it
// will loop repeatedly until the lock becomes available.
pub struct SpinLock {
mut:
locked u8 // Lock state: 0 = unlocked, 1 = locked
padding [63]u8 // Cache line padding (fills to 64 bytes total)
}
// new_spin_lock creates and returns a new SpinLock instance initialized to unlocked state
pub fn new_spin_lock() &SpinLock {
mut the_lock := &SpinLock{
locked: 0
}
// Ensure initialization visibility across threads
C.atomic_thread_fence(C.memory_order_release)
return the_lock
}
// lock acquires the spin lock. If the lock is currently held by another thread,
// this function will spin (busy-wait) until the lock becomes available.
@[inline]
pub fn (s &SpinLock) lock() {
// Expected value starts as unlocked (0)
mut expected := u8(0)
mut spin_count := 0
max_spins := 100
base_delay := 100 // nanosecond
max_delay := 10000 // nanoseconds (10μs)
// Busy-wait until lock is acquired
for {
// Attempt atomic compare-and-swap:
// Succeeds if current value matches expected (0),
// then swaps to locked (1)
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
// Prevent critical section reordering
C.atomic_thread_fence(C.memory_order_acquire)
return
}
spin_count++
// Exponential backoff after max_spins
if spin_count > max_spins {
// Calculate delay with cap: 100ns to 10μs
exponent := int_min(spin_count / max_spins, 10)
delay := int_min(base_delay * (1 << exponent), max_delay)
time.sleep(delay * time.nanosecond)
} else {
// Reduce power/bus contention during spinning
C.cpu_relax()
}
// Refresh lock state before next attempt
expected = C.atomic_load_byte(&s.locked)
}
}
// unlock releases the spin lock, making it available to other threads.
// IMPORTANT: Must only be called by the thread that currently holds the lock.
@[inline]
pub fn (s &SpinLock) unlock() {
// Ensure critical section completes before release
C.atomic_thread_fence(C.memory_order_release)
// Atomically reset to unlocked state
C.atomic_store_byte(&s.locked, 0)
}