Implement mutex assertions only in debug mode.

This avoids side effects in release fixing #317
This commit is contained in:
IntegratedQuantum 2024-04-16 22:15:01 +02:00
parent 4ea9b37b8c
commit 82be61ed6d
10 changed files with 39 additions and 26 deletions

View File

@ -111,7 +111,7 @@ pub const ClientEntityManager = struct {
}
fn update() void {
std.debug.assert(!mutex.tryLock()); // The mutex should be locked when calling this function.
main.utils.assertLocked(&mutex);
var time: i16 = @truncate(std.time.milliTimestamp());
time -%= timeDifference.difference.load(.monotonic);
for(entities.items) |*ent| {

View File

@ -22,7 +22,7 @@ child: GuiComponent = undefined,
mutex: std.Thread.Mutex = .{},
pub fn updateInner(self: *MutexComponent, _other: anytype) void {
std.debug.assert(!self.mutex.tryLock()); // self.mutex must be locked before calling!
main.utils.assertLocked(&self.mutex);
var other: GuiComponent = undefined;
if(@TypeOf(_other) == GuiComponent) {
other = _other;
@ -35,7 +35,7 @@ pub fn updateInner(self: *MutexComponent, _other: anytype) void {
}
pub fn deinit(self: *MutexComponent) void {
std.debug.assert(!self.mutex.tryLock()); // self.mutex must be locked before calling!
main.utils.assertLocked(&self.mutex);
self.child.deinit();
}

View File

@ -82,7 +82,7 @@ const GuiCommandQueue = struct {
}
fn executeOpenWindowCommand(window: *GuiWindow) void {
std.debug.assert(!mutex.tryLock()); // mutex must be locked.
main.utils.assertLocked(&mutex);
defer updateWindowPositions();
for(openWindows.items, 0..) |_openWindow, i| {
if(_openWindow == window) {
@ -98,7 +98,7 @@ const GuiCommandQueue = struct {
}
fn executeCloseWindowCommand(window: *GuiWindow) void {
std.debug.assert(!mutex.tryLock()); // mutex must be locked.
main.utils.assertLocked(&mutex);
defer updateWindowPositions();
if(selectedWindow == window) {
selectedWindow = null;

View File

@ -38,7 +38,7 @@ var input: *TextInput = undefined;
var hideInput: bool = true;
fn refresh() void {
std.debug.assert(!mutexComponent.mutex.tryLock()); // mutex must be locked!
main.utils.assertLocked(&mutexComponent.mutex);
if(window.rootComponent) |old| {
old.mutexComponent.child.verticalList.children.clearRetainingCapacity();
old.deinit();

View File

@ -143,7 +143,7 @@ pub const ItemDropManager = struct {
}
fn storeSingle(self: *ItemDropManager, allocator: NeverFailingAllocator, i: u16) JsonElement {
std.debug.assert(!self.mutex.tryLock()); // Mutex must be locked!
main.utils.assertLocked(&self.mutex);
const obj = JsonElement.initObject(allocator);
const itemDrop = self.list.get(i);
obj.put("i", i);
@ -337,7 +337,7 @@ pub const ItemDropManager = struct {
// }
fn updateEnt(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
std.debug.assert(!self.mutex.tryLock()); // Mutex must be locked!
main.utils.assertLocked(&self.mutex);
const startedInABlock = self.checkBlocks(chunk, pos);
if(startedInABlock) {
self.fixStuckInBlock(chunk, pos, vel, deltaTime);
@ -361,7 +361,7 @@ pub const ItemDropManager = struct {
}
fn fixStuckInBlock(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
std.debug.assert(!self.mutex.tryLock()); // Mutex must be locked!
main.utils.assertLocked(&self.mutex);
const centeredPos = pos.* - @as(Vec3d, @splat(0.5));
const pos0: Vec3i = @intFromFloat(@floor(centeredPos));

View File

@ -807,7 +807,7 @@ pub const ChunkMesh = struct {
}
pub fn finishData(self: *ChunkMesh) void {
std.debug.assert(!self.mutex.tryLock());
main.utils.assertLocked(&self.mutex);
self.opaqueMesh.finish(self);
self.transparentMesh.finish(self);
}

View File

@ -87,13 +87,14 @@ pub const ChannelChunk = struct {
}
pub fn getValueHoldingTheLock(self: *ChannelChunk, x: i32, y: i32, z: i32) [3]u8 {
main.utils.assertLockedShared(&self.lock);
const index = chunk.getIndex(x, y, z);
return self.getValueInternal(index);
}
fn setValueInternal(self: *ChannelChunk, i: usize, val: [3]u8) void {
std.debug.assert(self.paletteLength <= self.palette.len);
std.debug.assert(!self.lock.tryLock());
main.utils.assertLockedShared(&self.lock);
var paletteIndex: u32 = 0;
while(paletteIndex < self.paletteLength) : (paletteIndex += 1) { // TODO: There got to be a faster way to do this. Either using SIMD or using a cache or hashmap.
if(std.meta.eql(self.palette[paletteIndex], val)) {
@ -125,7 +126,7 @@ pub const ChannelChunk = struct {
}
fn optimizeLayout(self: *ChannelChunk) void {
std.debug.assert(!self.lock.tryLock());
main.utils.assertLockedShared(&self.lock);
if(std.math.log2_int_ceil(usize, self.palette.len) == std.math.log2_int_ceil(usize, self.activePaletteEntries)) return;
var newData = main.utils.DynamicPackedIntArray(chunk.chunkVolume).initCapacity(main.globalAllocator, @intCast(std.math.log2_int_ceil(u32, self.activePaletteEntries)));

View File

@ -61,7 +61,7 @@ pub const User = struct {
}
pub fn update(self: *User) void {
std.debug.assert(!mutex.tryLock()); // The mutex should be locked when calling this function.
main.utils.assertLocked(&mutex);
var time = @as(i16, @truncate(std.time.milliTimestamp())) -% main.settings.entityLookback;
time -%= self.timeDifference.difference.load(.monotonic);
self.interpolation.update(time, self.lastTime);
@ -222,7 +222,7 @@ pub fn connect(user: *User) void {
// private Entity[] lastSentEntities = new Entity[0];
pub fn sendMessage(msg: []const u8) void {
std.debug.assert(!mutex.tryLock()); // Mutex must be locked!
main.utils.assertLocked(&mutex);
std.log.info("Chat: {s}", .{msg}); // TODO use color \033[0;32m
for(users.items) |user| {
main.network.Protocols.chat.send(user.conn, msg);

View File

@ -884,7 +884,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
/// Moves an element from a given index down the heap, such that all children are always smaller than their parents.
fn siftDown(self: *@This(), _i: usize) void {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function.
assertLocked(&self.mutex);
var i = _i;
while(2*i + 1 < self.size) {
const biggest = if(2*i + 2 < self.size and self.array[2*i + 2].biggerThan(self.array[2*i + 1])) 2*i + 2 else 2*i + 1;
@ -901,7 +901,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
/// Moves an element from a given index up the heap, such that all children are always smaller than their parents.
fn siftUp(self: *@This(), _i: usize) void {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function.
assertLocked(&self.mutex);
var i = _i;
while(i > 0) {
const parentIndex = (i - 1)/2;
@ -924,7 +924,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
/// Returns the i-th element in the heap. Useless for most applications.
pub fn get(self: *@This(), i: usize) ?T {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function.
assertLocked(&self.mutex);
if(i >= self.size) return null;
return self.array[i];
}
@ -945,7 +945,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
}
fn removeIndex(self: *@This(), i: usize) void {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function.
assertLocked(&self.mutex);
self.size -= 1;
self.array[i] = self.array[self.size];
self.siftDown(i);
@ -1213,7 +1213,7 @@ pub fn Cache(comptime T: type, comptime numberOfBuckets: u32, comptime bucketSiz
items: [bucketSize]?*T = [_]?*T {null} ** bucketSize,
fn find(self: *@This(), compare: anytype) ?*T {
std.debug.assert(!self.mutex.tryLock()); // The mutex must be locked.
assertLocked(&self.mutex);
for(self.items, 0..) |item, i| {
if(compare.equals(item)) {
if(i != 0) {
@ -1228,7 +1228,7 @@ pub fn Cache(comptime T: type, comptime numberOfBuckets: u32, comptime bucketSiz
/// Returns the object that got kicked out of the cache. This must be deinited by the user.
fn add(self: *@This(), item: *T) ?*T {
std.debug.assert(!self.mutex.tryLock()); // The mutex must be locked.
assertLocked(&self.mutex);
const previous = self.items[bucketSize - 1];
std.mem.copyBackwards(?*T, self.items[1..], self.items[0..bucketSize - 1]);
self.items[0] = item;
@ -1236,7 +1236,7 @@ pub fn Cache(comptime T: type, comptime numberOfBuckets: u32, comptime bucketSiz
}
fn findOrCreate(self: *@This(), compare: anytype, comptime initFunction: fn(@TypeOf(compare)) *T) *T {
std.debug.assert(!self.mutex.tryLock()); // The mutex must be locked.
assertLocked(&self.mutex);
if(self.find(compare)) |item| {
return item;
}
@ -1482,4 +1482,16 @@ pub const TimeDifference = struct {
_ = @atomicRmw(i16, &self.difference.raw, .Add, -1, .monotonic);
}
}
};
};
pub fn assertLocked(mutex: *std.Thread.Mutex) void {
if(builtin.mode == .Debug) {
std.debug.assert(!mutex.tryLock());
}
}
pub fn assertLockedShared(lock: *std.Thread.RwLock) void {
if(builtin.mode == .Debug) {
std.debug.assert(!lock.tryLock());
}
}

View File

@ -85,7 +85,7 @@ const LinuxImpl = struct {
}
fn addWatchDescriptorsRecursive(info: *DirectoryInfo, path: []const u8) void {
std.debug.assert(!mutex.tryLock());
main.utils.assertLocked(&mutex);
var iterableDir = std.fs.cwd().openDir(path, .{.iterate = true}) catch |err| {
std.log.err("Error while opening dirs {s}: {s}", .{path, @errorName(err)});
return;
@ -106,7 +106,7 @@ const LinuxImpl = struct {
}
fn updateRecursiveCallback(info: *DirectoryInfo) void {
std.debug.assert(!mutex.tryLock());
main.utils.assertLocked(&mutex);
for(info.watchDescriptors.items[1..]) |watchDescriptor| {
removeWatchDescriptor(watchDescriptor, info.path);
}
@ -154,7 +154,7 @@ const LinuxImpl = struct {
}
fn addWatchDescriptor(info: *DirectoryInfo, path: [:0]const u8) void {
std.debug.assert(!mutex.tryLock());
main.utils.assertLocked(&mutex);
const watchDescriptor = c.inotify_add_watch(fd, path.ptr, c.IN_CLOSE_WRITE | c.IN_DELETE | c.IN_CREATE | c.IN_MOVE | c.IN_ONLYDIR);
if(watchDescriptor == -1) {
std.log.err("Error while adding watch descriptor for path {s}: {}", .{path, std.posix.errno(watchDescriptor)});
@ -164,7 +164,7 @@ const LinuxImpl = struct {
}
fn removeWatchDescriptor(watchDescriptor: c_int, path: []const u8) void {
std.debug.assert(!mutex.tryLock());
main.utils.assertLocked(&mutex);
_ = callbacks.remove(watchDescriptor);
const result = c.inotify_rm_watch(fd, watchDescriptor);
if(result == -1) {