sync.stdatomic: fix bug with add() and sub() returning the new values, add voidptr support, add swap() and compare_and_swap() (#24685)

This commit is contained in:
kbkpbot 2025-06-10 14:05:11 +08:00 committed by GitHub
parent 174065f16f
commit 0c495d07d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 321 additions and 91 deletions

View File

@ -693,35 +693,35 @@ extern inline unsigned long long __aarch64_ldeor8_relax(unsigned long long*ptr,
// Since V might be confused with "generic" C functions either we provide special versions
// for gcc/clang, too
static inline unsigned long long atomic_load_u64(unsigned long long* x) {
return atomic_load_explicit((_Atomic (unsigned long long)*)x, memory_order_seq_cst);
static inline unsigned long long atomic_load_u64(uint64_t* x) {
return atomic_load_explicit((_Atomic (uint64_t)*)x, memory_order_seq_cst);
}
static inline void atomic_store_u64(unsigned long long* x, unsigned long long y) {
atomic_store_explicit((_Atomic(unsigned long long)*)x, y, memory_order_seq_cst);
static inline void atomic_store_u64(uint64_t* x, uint64_t y) {
atomic_store_explicit((_Atomic(uint64_t)*)x, y, memory_order_seq_cst);
}
static inline int atomic_compare_exchange_weak_u64(unsigned long long* x, unsigned long long* expected, unsigned long long y) {
return (int)atomic_compare_exchange_weak_explicit((_Atomic(unsigned long long)*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst);
static inline int atomic_compare_exchange_weak_u64(uint64_t* x, uint64_t* expected, uint64_t y) {
return (int)atomic_compare_exchange_weak_explicit((_Atomic(uint64_t)*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst);
}
static inline int atomic_compare_exchange_strong_u64(unsigned long long* x, unsigned long long* expected, unsigned long long y) {
return (int)atomic_compare_exchange_strong_explicit((_Atomic(unsigned long long)*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst);
static inline int atomic_compare_exchange_strong_u64(uint64_t* x, uint64_t* expected, uint64_t y) {
return (int)atomic_compare_exchange_strong_explicit((_Atomic(uint64_t)*)x, expected, y, memory_order_seq_cst, memory_order_seq_cst);
}
static inline unsigned long long atomic_exchange_u64(unsigned long long* x, unsigned long long y) {
return atomic_exchange_explicit((_Atomic(unsigned long long)*)x, y, memory_order_seq_cst);
static inline unsigned long long atomic_exchange_u64(uint64_t* x, uint64_t y) {
return atomic_exchange_explicit((_Atomic(uint64_t)*)x, y, memory_order_seq_cst);
}
static inline unsigned long long atomic_fetch_add_u64(unsigned long long* x, unsigned long long y) {
return atomic_fetch_add_explicit((_Atomic(unsigned long long)*)x, y, memory_order_seq_cst);
static inline unsigned long long atomic_fetch_add_u64(uint64_t* x, uint64_t y) {
return atomic_fetch_add_explicit((_Atomic(uint64_t)*)x, y, memory_order_seq_cst);
}
static inline unsigned long long atomic_fetch_sub_u64(unsigned long long* x, unsigned long long y) {
return atomic_fetch_sub_explicit((_Atomic(unsigned long long)*)x, y, memory_order_seq_cst);
static inline unsigned long long atomic_fetch_sub_u64(uint64_t* x, uint64_t y) {
return atomic_fetch_sub_explicit((_Atomic(uint64_t)*)x, y, memory_order_seq_cst);
}
static inline unsigned long long atomic_fetch_and_u64(unsigned long long* x, unsigned long long y) {
return atomic_fetch_and_explicit((_Atomic(unsigned long long)*)x, y, memory_order_seq_cst);
static inline unsigned long long atomic_fetch_and_u64(uint64_t* x, uint64_t y) {
return atomic_fetch_and_explicit((_Atomic(uint64_t)*)x, y, memory_order_seq_cst);
}
static inline unsigned long long atomic_fetch_or_u64(unsigned long long* x, unsigned long long y) {
return atomic_fetch_or_explicit((_Atomic(unsigned long long)*)x, y, memory_order_seq_cst);
static inline unsigned long long atomic_fetch_or_u64(uint64_t* x, uint64_t y) {
return atomic_fetch_or_explicit((_Atomic(uint64_t)*)x, y, memory_order_seq_cst);
}
static inline unsigned long long atomic_fetch_xor_u64(unsigned long long* x, unsigned long long y) {
return atomic_fetch_xor_explicit((_Atomic(unsigned long long)*)x, y, memory_order_seq_cst);
static inline unsigned long long atomic_fetch_xor_u64(uint64_t* x, uint64_t y) {
return atomic_fetch_xor_explicit((_Atomic(uint64_t)*)x, y, memory_order_seq_cst);
}

View File

@ -294,8 +294,20 @@ static inline int atomic_compare_exchange_strong_u32(unsigned volatile * object,
#else
#define InterlockedExchange16 ManualInterlockedExchange16
#define InterlockedExchangeAdd16 ManualInterlockedExchangeAdd16
static inline uint16_t ManualInterlockedExchange16(volatile uint16_t* object, uint16_t desired) {
__asm__ __volatile__ (
"xchgw %0, %1"
: "+r" (desired),
"+m" (*object)
:
: "memory"
);
return desired;
}
static inline unsigned short ManualInterlockedExchangeAdd16(unsigned short volatile* Addend, unsigned short Value) {
__asm__ __volatile__ (
"lock xaddw %w[value], %[mem]"
@ -385,12 +397,23 @@ static inline int atomic_compare_exchange_strong_u16(unsigned short volatile * o
#else
#define InterlockedExchange8 ManualInterlockedExchange8
#define InterlockedCompareExchange8 ManualInterlockedCompareExchange8
#define InterlockedExchangeAdd8 ManualInterlockedExchangeAdd8
#define InterlockedOr8 ManualInterlockedOr8
#define InterlockedXor8 ManualInterlockedXor8
#define InterlockedAnd8 ManualInterlockedAnd8
static inline char ManualInterlockedExchange8(char volatile* object, char desired) {
__asm__ __volatile__ (
"xchgb %0, %1"
: "+q" (desired), "+m" (*object)
:
: "memory"
);
return desired;
}
static inline unsigned char ManualInterlockedCompareExchange8(unsigned char volatile * dest, unsigned char exchange, unsigned char comparand) {
unsigned char result;

View File

@ -69,8 +69,6 @@ pub struct AtomicVal[T] {
// new_atomic creates a new atomic value of `T` type
@[inline]
pub fn new_atomic[T](val T) &AtomicVal[T] {
// can't use `$if T is $int || T is bool` with $compile_error() now
// see issue #24562
$if T is $int {
return &AtomicVal[T]{
val: val
@ -79,8 +77,12 @@ pub fn new_atomic[T](val T) &AtomicVal[T] {
return &AtomicVal[T]{
val: val
}
} $else $if T is voidptr {
return &AtomicVal[T]{
val: val
}
} $else {
$compile_error('atomic: only support number and bool types')
$compile_error('atomic: only support number, bool, and voidptr types')
}
return unsafe { nil }
}
@ -99,19 +101,24 @@ pub fn (mut a AtomicVal[T]) load() T {
} $else $if T is u64 || T is i64 {
return T(C.atomic_load_u64(voidptr(&a.val)))
} $else $if T is int {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(int) == 4 {
return int(C.atomic_load_u32(voidptr(&a.val)))
} else {
return int(C.atomic_load_u64(voidptr(&a.val)))
}
} $else $if T is isize || T is usize {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(isize) == 4 {
return T(C.atomic_load_u32(voidptr(&a.val)))
} else {
return T(C.atomic_load_u64(voidptr(&a.val)))
}
} $else $if T is voidptr {
// TODO: this should be $if sizeof(T) == 4
$if x32 {
return T(C.atomic_load_u32(voidptr(&a.val)))
} $else {
return T(C.atomic_load_u64(voidptr(&a.val)))
}
}
return a.val
}
@ -120,90 +127,207 @@ pub fn (mut a AtomicVal[T]) load() T {
@[inline]
pub fn (mut a AtomicVal[T]) store(val T) {
$if T is bool {
C.atomic_store_byte(voidptr(&a.val), val)
C.atomic_store_byte(voidptr(&a.val), u8(val))
} $else $if T is u8 || T is i8 {
C.atomic_store_byte(voidptr(&a.val), val)
C.atomic_store_byte(voidptr(&a.val), u8(val))
} $else $if T is u16 || T is i16 {
C.atomic_store_u16(voidptr(&a.val), val)
C.atomic_store_u16(voidptr(&a.val), u16(val))
} $else $if T is u32 || T is i32 {
C.atomic_store_u32(voidptr(&a.val), val)
C.atomic_store_u32(voidptr(&a.val), u32(val))
} $else $if T is u64 || T is i64 {
C.atomic_store_u64(voidptr(&a.val), val)
C.atomic_store_u64(voidptr(&a.val), u64(val))
} $else $if T is int {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(int) == 4 {
C.atomic_store_u32(voidptr(&a.val), val)
C.atomic_store_u32(voidptr(&a.val), u32(val))
} else {
C.atomic_store_u64(voidptr(&a.val), val)
C.atomic_store_u64(voidptr(&a.val), u64(val))
}
} $else $if T is isize || T is usize {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(isize) == 4 {
C.atomic_store_u32(voidptr(&a.val), val)
C.atomic_store_u32(voidptr(&a.val), u32(val))
} else {
C.atomic_store_u64(voidptr(&a.val), val)
C.atomic_store_u64(voidptr(&a.val), u64(val))
}
} $else $if T is voidptr {
// TODO: this should be $if sizeof(T) == 4
$if x32 {
C.atomic_store_u32(voidptr(&a.val), u32(val))
} $else {
C.atomic_store_u64(voidptr(&a.val), u64(val))
}
}
}
// add adds the atomic value with `delta`
// add adds the atomic value with `delta` and returns the previous value
@[inline]
pub fn (mut a AtomicVal[T]) add(delta T) T {
$if T is bool {
panic('atomic: can not add() a bool type')
panic('atomic: add() not supported for bool type')
} $else $if T is voidptr {
panic('atomic: add() not supported for voidptr type')
} $else $if T is u8 || T is i8 {
C.atomic_fetch_add_byte(voidptr(&a.val), delta)
old := C.atomic_fetch_add_byte(voidptr(&a.val), u8(delta))
return T(old)
} $else $if T is u16 || T is i16 {
C.atomic_fetch_add_u16(voidptr(&a.val), delta)
old := C.atomic_fetch_add_u16(voidptr(&a.val), u16(delta))
return T(old)
} $else $if T is u32 || T is i32 {
C.atomic_fetch_add_u32(voidptr(&a.val), delta)
old := C.atomic_fetch_add_u32(voidptr(&a.val), u32(delta))
return T(old)
} $else $if T is u64 || T is i64 {
C.atomic_fetch_add_u64(voidptr(&a.val), delta)
old := C.atomic_fetch_add_u64(voidptr(&a.val), u64(delta))
return T(old)
} $else $if T is int {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(int) == 4 {
C.atomic_fetch_add_u32(voidptr(&a.val), delta)
old := C.atomic_fetch_add_u32(voidptr(&a.val), u32(delta))
return T(old)
} else {
C.atomic_fetch_add_u64(voidptr(&a.val), delta)
old := C.atomic_fetch_add_u64(voidptr(&a.val), u64(delta))
return T(old)
}
} $else $if T is isize || T is usize {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(isize) == 4 {
C.atomic_fetch_add_u32(voidptr(&a.val), delta)
old := C.atomic_fetch_add_u32(voidptr(&a.val), u32(delta))
return T(old)
} else {
C.atomic_fetch_add_u64(voidptr(&a.val), delta)
old := C.atomic_fetch_add_u64(voidptr(&a.val), u64(delta))
return T(old)
}
}
return T(a.val)
panic('unreachable')
}
// sub subs the atomic value with `delta`
// sub subtracts the atomic value with `delta` and returns the previous value
@[inline]
pub fn (mut a AtomicVal[T]) sub(delta T) T {
$if T is bool {
panic('atomic: can not sub() a bool type')
panic('atomic: sub() not supported for bool type')
} $else $if T is voidptr {
panic('atomic: sub() not supported for voidptr type')
} $else $if T is u8 || T is i8 {
C.atomic_fetch_sub_byte(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_byte(voidptr(&a.val), u8(delta))
return T(old)
} $else $if T is u16 || T is i16 {
C.atomic_fetch_sub_u16(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_u16(voidptr(&a.val), u16(delta))
return T(old)
} $else $if T is u32 || T is i32 {
C.atomic_fetch_sub_u32(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_u32(voidptr(&a.val), u32(delta))
return T(old)
} $else $if T is u64 || T is i64 {
C.atomic_fetch_sub_u64(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_u64(voidptr(&a.val), u64(delta))
return T(old)
} $else $if T is int {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(int) == 4 {
C.atomic_fetch_sub_u32(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_u32(voidptr(&a.val), u32(delta))
return T(old)
} else {
C.atomic_fetch_sub_u64(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_u64(voidptr(&a.val), u64(delta))
return T(old)
}
} $else $if T is isize || T is usize {
// TODO: remove this test or a compile time support $if sizeof() ==
if sizeof(isize) == 4 {
C.atomic_fetch_sub_u32(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_u32(voidptr(&a.val), u32(delta))
return T(old)
} else {
C.atomic_fetch_sub_u64(voidptr(&a.val), delta)
old := C.atomic_fetch_sub_u64(voidptr(&a.val), u64(delta))
return T(old)
}
}
return T(a.val)
panic('unreachable')
}
// swap sets the `new` value and returns the previous value
@[inline]
pub fn (mut a AtomicVal[T]) swap(new T) T {
$if T is bool {
old := C.atomic_exchange_byte(voidptr(&a.val), u8(new))
return old != 0
} $else $if T is u8 || T is i8 {
old := C.atomic_exchange_byte(voidptr(&a.val), u8(new))
return T(old)
} $else $if T is u16 || T is i16 {
old := C.atomic_exchange_u16(voidptr(&a.val), u16(new))
return T(old)
} $else $if T is u32 || T is i32 {
old := C.atomic_exchange_u32(voidptr(&a.val), u32(new))
return T(old)
} $else $if T is u64 || T is i64 {
old := C.atomic_exchange_u64(voidptr(&a.val), u64(new))
return T(old)
} $else $if T is int {
if sizeof(int) == 4 {
old := C.atomic_exchange_u32(voidptr(&a.val), u32(new))
return T(old)
} else {
old := C.atomic_exchange_u64(voidptr(&a.val), u64(new))
return T(old)
}
} $else $if T is isize || T is usize {
if sizeof(isize) == 4 {
old := C.atomic_exchange_u32(voidptr(&a.val), u32(new))
return T(old)
} else {
old := C.atomic_exchange_u64(voidptr(&a.val), u64(new))
return T(old)
}
} $else $if T is voidptr {
// TODO: this should be $if sizeof(T) == 4
$if x32 {
old := C.atomic_exchange_u32(voidptr(&a.val), u32(new))
return T(old)
} $else {
old := C.atomic_exchange_u64(voidptr(&a.val), u64(new))
return T(old)
}
}
panic('unreachable')
}
// compare_and_swap executes the compare-and-swap(CAS) operation
// if atomic value == `expected`, then it will be set to `new`, and return true
// else return false, and the atomic value remains unchanged
@[inline]
pub fn (mut a AtomicVal[T]) compare_and_swap(expected T, new T) bool {
$if T is bool {
mut exp := u8(expected)
return C.atomic_compare_exchange_strong_byte(voidptr(&a.val), &exp, u8(new))
} $else $if T is u8 || T is i8 {
mut exp := u8(expected)
return C.atomic_compare_exchange_strong_byte(voidptr(&a.val), &exp, u8(new))
} $else $if T is u16 || T is i16 {
mut exp := u16(expected)
return C.atomic_compare_exchange_strong_u16(voidptr(&a.val), &exp, u16(new))
} $else $if T is u32 || T is i32 {
mut exp := u32(expected)
return C.atomic_compare_exchange_strong_u32(voidptr(&a.val), &exp, u32(new))
} $else $if T is u64 || T is i64 {
mut exp := u64(expected)
return C.atomic_compare_exchange_strong_u64(voidptr(&a.val), &exp, u64(new))
} $else $if T is int {
if sizeof(int) == 4 {
mut exp := u32(expected)
return C.atomic_compare_exchange_strong_u32(voidptr(&a.val), &exp, u32(new))
} else {
mut exp := u64(expected)
return C.atomic_compare_exchange_strong_u64(voidptr(&a.val), &exp, u64(new))
}
} $else $if T is isize || T is usize {
if sizeof(isize) == 4 {
mut exp := u32(expected)
return C.atomic_compare_exchange_strong_u32(voidptr(&a.val), &exp, u32(new))
} else {
mut exp := u64(expected)
return C.atomic_compare_exchange_strong_u64(voidptr(&a.val), &exp, u64(new))
}
} $else $if T is voidptr {
// TODO: this should be $if sizeof(T) == 4
$if x32 {
mut exp := u32(expected)
return C.atomic_compare_exchange_strong_u32(voidptr(&a.val), &exp, u32(new))
} $else {
mut exp := u64(expected)
return C.atomic_compare_exchange_strong_u64(voidptr(&a.val), &exp, u64(new))
}
}
panic('unreachable')
}

View File

@ -100,92 +100,175 @@ fn test_atomic_vals() {
assert v_bool.load() == true
v_bool.store(false)
assert v_bool.load() == false
assert v_bool.swap(true) == false
assert v_bool.swap(false) == true
assert v_bool.compare_and_swap(false, true) == true
assert v_bool.load() == true
mut v_i8 := stdatomic.new_atomic(i8(-33))
v_i8.store(-34)
assert v_i8.load() == -34
v_i8.add(10)
assert v_i8.add(10) == -34
assert v_i8.load() == -24
v_i8.sub(7)
assert v_i8.sub(7) == -24
assert v_i8.load() == -31
mut new_i8 := i8(-20)
assert v_i8.swap(new_i8) == -31
assert v_i8.swap(-31) == new_i8
assert v_i8.compare_and_swap(-31, new_i8) == true
assert v_i8.compare_and_swap(new_i8, -32) == true
assert v_i8.load() == -32
mut v_u8 := stdatomic.new_atomic(u8(33))
v_u8.store(34)
assert v_u8.load() == 34
v_u8.add(10)
assert v_u8.add(10) == 34
assert v_u8.load() == 44
v_u8.sub(7)
assert v_u8.sub(7) == 44
assert v_u8.load() == 37
mut new_u8 := u8(20)
assert v_u8.swap(new_u8) == 37
assert v_u8.swap(37) == new_u8
assert v_u8.compare_and_swap(37, new_u8) == true
assert v_u8.compare_and_swap(new_u8, 38) == true
assert v_u8.load() == 38
mut v_i16 := stdatomic.new_atomic(i16(-333))
v_i16.store(-334)
assert v_i16.load() == -334
v_i16.add(10)
assert v_i16.add(10) == -334
assert v_i16.load() == -324
v_i16.sub(7)
assert v_i16.sub(7) == -324
assert v_i16.load() == -331
mut new_i16 := i16(-200)
assert v_i16.swap(new_i16) == -331
assert v_i16.swap(-331) == new_i16
assert v_i16.compare_and_swap(-331, new_i16) == true
assert v_i16.compare_and_swap(new_i16, -332) == true
assert v_i16.load() == -332
mut v_u16 := stdatomic.new_atomic(u16(333))
v_u16.store(334)
assert v_u16.load() == 334
v_u16.add(10)
assert v_u16.add(10) == 334
assert v_u16.load() == 344
v_u16.sub(7)
assert v_u16.sub(7) == 344
assert v_u16.load() == 337
mut new_u16 := u16(200)
assert v_u16.swap(new_u16) == 337
assert v_u16.swap(337) == new_u16
assert v_u16.compare_and_swap(337, new_u16) == true
assert v_u16.compare_and_swap(new_u16, 332) == true
assert v_u16.load() == 332
mut v_i32 := stdatomic.new_atomic(i32(-3333))
v_i32.store(-3334)
assert v_i32.load() == -3334
v_i32.add(10)
assert v_i32.add(10) == -3334
assert v_i32.load() == -3324
v_i32.sub(7)
assert v_i32.sub(7) == -3324
assert v_i32.load() == -3331
mut new_i32 := i32(-2000)
assert v_i32.swap(new_i32) == -3331
assert v_i32.swap(-3331) == new_i32
assert v_i32.compare_and_swap(-3331, new_i32) == true
assert v_i32.compare_and_swap(new_i32, -3332) == true
assert v_i32.load() == -3332
mut v_u32 := stdatomic.new_atomic(u32(3333))
v_u32.store(3334)
assert v_u32.load() == 3334
v_u32.add(10)
assert v_u32.add(10) == 3334
assert v_u32.load() == 3344
v_u32.sub(7)
assert v_u32.sub(7) == 3344
assert v_u32.load() == 3337
mut new_u32 := u32(2000)
assert v_u32.swap(new_u32) == 3337
assert v_u32.swap(3337) == new_u32
assert v_u32.compare_and_swap(3337, new_u32) == true
assert v_u32.compare_and_swap(new_u32, 3338) == true
assert v_u32.load() == 3338
mut v_i64 := stdatomic.new_atomic(i64(-33333))
v_i64.store(-33334)
assert v_i64.load() == -33334
v_i64.add(10)
assert v_i64.add(10) == -33334
assert v_i64.load() == -33324
v_i64.sub(7)
assert v_i64.sub(7) == -33324
assert v_i64.load() == -33331
mut new_i64 := i64(-20000)
assert v_i64.swap(new_i64) == -33331
assert v_i64.swap(-33331) == new_i64
assert v_i64.compare_and_swap(-33331, new_i64) == true
assert v_i64.compare_and_swap(new_i64, -33332) == true
assert v_i64.load() == -33332
mut v_u64 := stdatomic.new_atomic(u64(33333))
v_u64.store(33334)
assert v_u64.load() == 33334
v_u64.add(10)
assert v_u64.add(10) == 33334
assert v_u64.load() == 33344
v_u64.sub(7)
assert v_u64.sub(7) == 33344
assert v_u64.load() == 33337
mut new_u64 := u64(20000)
assert v_u64.swap(new_u64) == 33337
assert v_u64.swap(33337) == new_u64
assert v_u64.compare_and_swap(33337, new_u64) == true
assert v_u64.compare_and_swap(new_u64, 33338) == true
assert v_u64.load() == 33338
mut v_int := stdatomic.new_atomic(int(-44))
v_int.store(-45)
assert v_int.load() == -45
v_int.add(10)
assert v_int.add(10) == -45
assert v_int.load() == -35
v_int.sub(7)
assert v_int.sub(7) == -35
assert v_int.load() == -42
mut new_int := int(-40000)
assert v_int.swap(new_int) == -42
assert v_int.swap(-42) == new_int
assert v_int.compare_and_swap(-42, new_int) == true
assert v_int.compare_and_swap(new_int, -40001) == true
assert v_int.load() == -40001
mut v_isize := stdatomic.new_atomic(isize(-55))
v_isize.store(-56)
assert v_isize.load() == -56
v_isize.add(10)
assert v_isize.add(10) == -56
assert v_isize.load() == -46
v_isize.sub(7)
assert v_isize.sub(7) == -46
assert v_isize.load() == -53
mut new_isize := isize(-50000)
assert v_isize.swap(new_isize) == -53
assert v_isize.swap(-53) == new_isize
assert v_isize.compare_and_swap(-53, new_isize) == true
assert v_isize.compare_and_swap(new_isize, -50001) == true
assert v_isize.load() == -50001
mut v_usize := stdatomic.new_atomic(usize(55))
v_usize.store(56)
assert v_usize.load() == 56
v_usize.add(10)
assert v_usize.add(10) == 56
assert v_usize.load() == 66
v_usize.sub(7)
assert v_usize.sub(7) == 66
assert v_usize.load() == 59
mut new_usize := usize(50000)
assert v_usize.swap(new_usize) == 59
assert v_usize.swap(59) == new_usize
assert v_usize.compare_and_swap(59, new_usize) == true
assert v_usize.compare_and_swap(new_usize, 60) == true
assert v_usize.load() == 60
mut val_1 := int(100)
mut ptr_1 := voidptr(&val_1)
mut val_2 := int(200)
mut ptr_2 := voidptr(&val_2)
mut v_voidptr := stdatomic.new_atomic(ptr_1)
assert v_voidptr.load() == ptr_1
v_voidptr.store(ptr_2)
assert v_voidptr.load() == ptr_2
assert v_voidptr.swap(ptr_1) == ptr_2
assert v_voidptr.swap(ptr_2) == ptr_1
assert v_voidptr.compare_and_swap(ptr_2, ptr_1) == true
assert v_voidptr.load() == ptr_1
}

View File

@ -1,7 +1,7 @@
vlib/sync/stdatomic/atomic.c.v:83:3: error: atomic: only support number and bool types
81 | }
82 | } $else {
83 | $compile_error('atomic: only support number and bool types')
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 | }
85 | return unsafe { nil }
vlib/sync/stdatomic/atomic.c.v:85:3: error: atomic: only support number, bool, and voidptr types
83 | }
84 | } $else {
85 | $compile_error('atomic: only support number, bool, and voidptr types')
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
86 | }
87 | return unsafe { nil }