mirror of
https://github.com/vlang/v.git
synced 2025-08-03 09:47:15 -04:00
sync: fix spin lock, add destroy() and try_lock(), add valgrind annotate support (#24798)
This commit is contained in:
parent
6b45931598
commit
d1d43abf5c
@ -1,22 +1,42 @@
|
||||
import sync
|
||||
import rand
|
||||
import time
|
||||
|
||||
fn test_spinlock() {
|
||||
mut counter := 0
|
||||
mut s := sync.new_spin_lock()
|
||||
defer {
|
||||
s.destroy()
|
||||
}
|
||||
num_threads := 10
|
||||
iterations := 10
|
||||
mut wg := sync.new_waitgroup()
|
||||
wg.add(num_threads)
|
||||
|
||||
for _ in 0 .. num_threads {
|
||||
spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int) {
|
||||
defer {
|
||||
spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int, iterations int) {
|
||||
for _ in 0 .. iterations {
|
||||
s.lock()
|
||||
|
||||
unsafe {
|
||||
tmp := *counter_ref
|
||||
randval := rand.intn(100) or { 1 }
|
||||
time.sleep(randval * time.nanosecond)
|
||||
|
||||
(*counter_ref) = tmp + 1
|
||||
}
|
||||
s.unlock()
|
||||
wg.done()
|
||||
}
|
||||
s.lock()
|
||||
(*counter_ref)++
|
||||
}(mut wg, s, &counter)
|
||||
wg.done()
|
||||
}(mut wg, s, &counter, iterations)
|
||||
}
|
||||
wg.wait()
|
||||
assert counter == num_threads
|
||||
assert counter == num_threads * iterations
|
||||
|
||||
// test try_lock()
|
||||
s.lock()
|
||||
assert s.try_lock() == false
|
||||
s.unlock()
|
||||
assert s.try_lock() == true
|
||||
assert s.try_lock() == false
|
||||
}
|
||||
|
@ -109,3 +109,13 @@ fn C.atomic_fetch_sub_u64(voidptr, u64) u64
|
||||
|
||||
fn C.atomic_thread_fence(int)
|
||||
fn C.cpu_relax()
|
||||
|
||||
fn C.ANNOTATE_RWLOCK_CREATE(voidptr)
|
||||
fn C.ANNOTATE_RWLOCK_ACQUIRED(voidptr, int)
|
||||
fn C.ANNOTATE_RWLOCK_RELEASED(voidptr, int)
|
||||
fn C.ANNOTATE_RWLOCK_DESTROY(voidptr)
|
||||
|
||||
$if valgrind ? {
|
||||
#flag -I/usr/include/valgrind
|
||||
#include <valgrind/helgrind.h>
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ fn should_be_zero(res int) {
|
||||
// SpinLock is a mutual exclusion lock that busy-waits (spins) when locked.
|
||||
// When one thread holds the lock, any other thread attempting to acquire it
|
||||
// will loop repeatedly until the lock becomes available.
|
||||
@[noinit]
|
||||
pub struct SpinLock {
|
||||
mut:
|
||||
locked u8 // Lock state: 0 = unlocked, 1 = locked
|
||||
@ -34,6 +35,9 @@ pub fn new_spin_lock() &SpinLock {
|
||||
}
|
||||
// Ensure initialization visibility across threads
|
||||
C.atomic_thread_fence(C.memory_order_release)
|
||||
$if valgrind ? {
|
||||
C.ANNOTATE_RWLOCK_CREATE(&the_lock.locked)
|
||||
}
|
||||
return the_lock
|
||||
}
|
||||
|
||||
@ -54,6 +58,9 @@ pub fn (s &SpinLock) lock() {
|
||||
// Succeeds if current value matches expected (0),
|
||||
// then swaps to locked (1)
|
||||
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
|
||||
$if valgrind ? {
|
||||
C.ANNOTATE_RWLOCK_ACQUIRED(&s.locked, 1) // 1 = write lock
|
||||
}
|
||||
// Prevent critical section reordering
|
||||
C.atomic_thread_fence(C.memory_order_acquire)
|
||||
return
|
||||
@ -71,18 +78,47 @@ pub fn (s &SpinLock) lock() {
|
||||
C.cpu_relax()
|
||||
}
|
||||
|
||||
// Refresh lock state before next attempt
|
||||
expected = C.atomic_load_byte(&s.locked)
|
||||
expected = 0
|
||||
}
|
||||
}
|
||||
|
||||
// try_lock try to lock the spin lock instance and return immediately.
|
||||
// If the spin lock was already locked, it will return false.
|
||||
@[inline]
|
||||
pub fn (s &SpinLock) try_lock() bool {
|
||||
// First do a relaxed load to check if lock is free in order to prevent
|
||||
// unnecessary cache misses if someone does while(!try_lock())
|
||||
// TODO: make a `relaxed` load
|
||||
if C.atomic_load_byte(&s.locked) == 0 {
|
||||
mut expected := u8(0)
|
||||
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
|
||||
$if valgrind ? {
|
||||
C.ANNOTATE_RWLOCK_ACQUIRED(&s.locked, 1)
|
||||
}
|
||||
C.atomic_thread_fence(C.memory_order_acquire)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// unlock releases the spin lock, making it available to other threads.
|
||||
// IMPORTANT: Must only be called by the thread that currently holds the lock.
|
||||
@[inline]
|
||||
pub fn (s &SpinLock) unlock() {
|
||||
$if valgrind ? {
|
||||
C.ANNOTATE_RWLOCK_RELEASED(&s.locked, 1) // 1 = write lock
|
||||
}
|
||||
// Ensure critical section completes before release
|
||||
C.atomic_thread_fence(C.memory_order_release)
|
||||
|
||||
// Atomically reset to unlocked state
|
||||
C.atomic_store_byte(&s.locked, 0)
|
||||
}
|
||||
|
||||
// destroy frees the resources associated with the spin lock instance.
|
||||
pub fn (s &SpinLock) destroy() {
|
||||
$if valgrind ? {
|
||||
C.ANNOTATE_RWLOCK_DESTROY(&s.locked)
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user