sync: fix spin lock, add destroy() and try_lock(), add valgrind annotate support (#24798)

This commit is contained in:
kbkpbot 2025-06-27 21:13:44 +08:00 committed by GitHub
parent 6b45931598
commit d1d43abf5c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 75 additions and 9 deletions

View File

@ -1,22 +1,42 @@
import sync
import rand
import time
fn test_spinlock() {
mut counter := 0
mut s := sync.new_spin_lock()
defer {
s.destroy()
}
num_threads := 10
iterations := 10
mut wg := sync.new_waitgroup()
wg.add(num_threads)
for _ in 0 .. num_threads {
spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int) {
defer {
spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int, iterations int) {
for _ in 0 .. iterations {
s.lock()
unsafe {
tmp := *counter_ref
randval := rand.intn(100) or { 1 }
time.sleep(randval * time.nanosecond)
(*counter_ref) = tmp + 1
}
s.unlock()
wg.done()
}
s.lock()
(*counter_ref)++
}(mut wg, s, &counter)
wg.done()
}(mut wg, s, &counter, iterations)
}
wg.wait()
assert counter == num_threads
assert counter == num_threads * iterations
// test try_lock()
s.lock()
assert s.try_lock() == false
s.unlock()
assert s.try_lock() == true
assert s.try_lock() == false
}

View File

@ -109,3 +109,13 @@ fn C.atomic_fetch_sub_u64(voidptr, u64) u64
fn C.atomic_thread_fence(int)
fn C.cpu_relax()
fn C.ANNOTATE_RWLOCK_CREATE(voidptr)
fn C.ANNOTATE_RWLOCK_ACQUIRED(voidptr, int)
fn C.ANNOTATE_RWLOCK_RELEASED(voidptr, int)
fn C.ANNOTATE_RWLOCK_DESTROY(voidptr)
$if valgrind ? {
#flag -I/usr/include/valgrind
#include <valgrind/helgrind.h>
}

View File

@ -21,6 +21,7 @@ fn should_be_zero(res int) {
// SpinLock is a mutual exclusion lock that busy-waits (spins) when locked.
// When one thread holds the lock, any other thread attempting to acquire it
// will loop repeatedly until the lock becomes available.
@[noinit]
pub struct SpinLock {
mut:
locked u8 // Lock state: 0 = unlocked, 1 = locked
@ -34,6 +35,9 @@ pub fn new_spin_lock() &SpinLock {
}
// Ensure initialization visibility across threads
C.atomic_thread_fence(C.memory_order_release)
$if valgrind ? {
C.ANNOTATE_RWLOCK_CREATE(&the_lock.locked)
}
return the_lock
}
@ -54,6 +58,9 @@ pub fn (s &SpinLock) lock() {
// Succeeds if current value matches expected (0),
// then swaps to locked (1)
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
$if valgrind ? {
C.ANNOTATE_RWLOCK_ACQUIRED(&s.locked, 1) // 1 = write lock
}
// Prevent critical section reordering
C.atomic_thread_fence(C.memory_order_acquire)
return
@ -71,18 +78,47 @@ pub fn (s &SpinLock) lock() {
C.cpu_relax()
}
// Refresh lock state before next attempt
expected = C.atomic_load_byte(&s.locked)
expected = 0
}
}
// try_lock try to lock the spin lock instance and return immediately.
// If the spin lock was already locked, it will return false.
@[inline]
pub fn (s &SpinLock) try_lock() bool {
// First do a relaxed load to check if lock is free in order to prevent
// unnecessary cache misses if someone does while(!try_lock())
// TODO: make a `relaxed` load
if C.atomic_load_byte(&s.locked) == 0 {
mut expected := u8(0)
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
$if valgrind ? {
C.ANNOTATE_RWLOCK_ACQUIRED(&s.locked, 1)
}
C.atomic_thread_fence(C.memory_order_acquire)
return true
}
}
return false
}
// unlock releases the spin lock, making it available to other threads.
// IMPORTANT: Must only be called by the thread that currently holds the lock.
@[inline]
pub fn (s &SpinLock) unlock() {
$if valgrind ? {
C.ANNOTATE_RWLOCK_RELEASED(&s.locked, 1) // 1 = write lock
}
// Ensure critical section completes before release
C.atomic_thread_fence(C.memory_order_release)
// Atomically reset to unlocked state
C.atomic_store_byte(&s.locked, 0)
}
// destroy frees the resources associated with the spin lock instance.
pub fn (s &SpinLock) destroy() {
$if valgrind ? {
C.ANNOTATE_RWLOCK_DESTROY(&s.locked)
}
}