Implement mutex assertions only in debug mode.

This avoids side effects in release fixing #317
This commit is contained in:
IntegratedQuantum 2024-04-16 22:15:01 +02:00
parent 4ea9b37b8c
commit 82be61ed6d
10 changed files with 39 additions and 26 deletions

View File

@ -111,7 +111,7 @@ pub const ClientEntityManager = struct {
} }
fn update() void { fn update() void {
std.debug.assert(!mutex.tryLock()); // The mutex should be locked when calling this function. main.utils.assertLocked(&mutex);
var time: i16 = @truncate(std.time.milliTimestamp()); var time: i16 = @truncate(std.time.milliTimestamp());
time -%= timeDifference.difference.load(.monotonic); time -%= timeDifference.difference.load(.monotonic);
for(entities.items) |*ent| { for(entities.items) |*ent| {

View File

@ -22,7 +22,7 @@ child: GuiComponent = undefined,
mutex: std.Thread.Mutex = .{}, mutex: std.Thread.Mutex = .{},
pub fn updateInner(self: *MutexComponent, _other: anytype) void { pub fn updateInner(self: *MutexComponent, _other: anytype) void {
std.debug.assert(!self.mutex.tryLock()); // self.mutex must be locked before calling! main.utils.assertLocked(&self.mutex);
var other: GuiComponent = undefined; var other: GuiComponent = undefined;
if(@TypeOf(_other) == GuiComponent) { if(@TypeOf(_other) == GuiComponent) {
other = _other; other = _other;
@ -35,7 +35,7 @@ pub fn updateInner(self: *MutexComponent, _other: anytype) void {
} }
pub fn deinit(self: *MutexComponent) void { pub fn deinit(self: *MutexComponent) void {
std.debug.assert(!self.mutex.tryLock()); // self.mutex must be locked before calling! main.utils.assertLocked(&self.mutex);
self.child.deinit(); self.child.deinit();
} }

View File

@ -82,7 +82,7 @@ const GuiCommandQueue = struct {
} }
fn executeOpenWindowCommand(window: *GuiWindow) void { fn executeOpenWindowCommand(window: *GuiWindow) void {
std.debug.assert(!mutex.tryLock()); // mutex must be locked. main.utils.assertLocked(&mutex);
defer updateWindowPositions(); defer updateWindowPositions();
for(openWindows.items, 0..) |_openWindow, i| { for(openWindows.items, 0..) |_openWindow, i| {
if(_openWindow == window) { if(_openWindow == window) {
@ -98,7 +98,7 @@ const GuiCommandQueue = struct {
} }
fn executeCloseWindowCommand(window: *GuiWindow) void { fn executeCloseWindowCommand(window: *GuiWindow) void {
std.debug.assert(!mutex.tryLock()); // mutex must be locked. main.utils.assertLocked(&mutex);
defer updateWindowPositions(); defer updateWindowPositions();
if(selectedWindow == window) { if(selectedWindow == window) {
selectedWindow = null; selectedWindow = null;

View File

@ -38,7 +38,7 @@ var input: *TextInput = undefined;
var hideInput: bool = true; var hideInput: bool = true;
fn refresh() void { fn refresh() void {
std.debug.assert(!mutexComponent.mutex.tryLock()); // mutex must be locked! main.utils.assertLocked(&mutexComponent.mutex);
if(window.rootComponent) |old| { if(window.rootComponent) |old| {
old.mutexComponent.child.verticalList.children.clearRetainingCapacity(); old.mutexComponent.child.verticalList.children.clearRetainingCapacity();
old.deinit(); old.deinit();

View File

@ -143,7 +143,7 @@ pub const ItemDropManager = struct {
} }
fn storeSingle(self: *ItemDropManager, allocator: NeverFailingAllocator, i: u16) JsonElement { fn storeSingle(self: *ItemDropManager, allocator: NeverFailingAllocator, i: u16) JsonElement {
std.debug.assert(!self.mutex.tryLock()); // Mutex must be locked! main.utils.assertLocked(&self.mutex);
const obj = JsonElement.initObject(allocator); const obj = JsonElement.initObject(allocator);
const itemDrop = self.list.get(i); const itemDrop = self.list.get(i);
obj.put("i", i); obj.put("i", i);
@ -337,7 +337,7 @@ pub const ItemDropManager = struct {
// } // }
fn updateEnt(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void { fn updateEnt(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
std.debug.assert(!self.mutex.tryLock()); // Mutex must be locked! main.utils.assertLocked(&self.mutex);
const startedInABlock = self.checkBlocks(chunk, pos); const startedInABlock = self.checkBlocks(chunk, pos);
if(startedInABlock) { if(startedInABlock) {
self.fixStuckInBlock(chunk, pos, vel, deltaTime); self.fixStuckInBlock(chunk, pos, vel, deltaTime);
@ -361,7 +361,7 @@ pub const ItemDropManager = struct {
} }
fn fixStuckInBlock(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void { fn fixStuckInBlock(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
std.debug.assert(!self.mutex.tryLock()); // Mutex must be locked! main.utils.assertLocked(&self.mutex);
const centeredPos = pos.* - @as(Vec3d, @splat(0.5)); const centeredPos = pos.* - @as(Vec3d, @splat(0.5));
const pos0: Vec3i = @intFromFloat(@floor(centeredPos)); const pos0: Vec3i = @intFromFloat(@floor(centeredPos));

View File

@ -807,7 +807,7 @@ pub const ChunkMesh = struct {
} }
pub fn finishData(self: *ChunkMesh) void { pub fn finishData(self: *ChunkMesh) void {
std.debug.assert(!self.mutex.tryLock()); main.utils.assertLocked(&self.mutex);
self.opaqueMesh.finish(self); self.opaqueMesh.finish(self);
self.transparentMesh.finish(self); self.transparentMesh.finish(self);
} }

View File

@ -87,13 +87,14 @@ pub const ChannelChunk = struct {
} }
pub fn getValueHoldingTheLock(self: *ChannelChunk, x: i32, y: i32, z: i32) [3]u8 { pub fn getValueHoldingTheLock(self: *ChannelChunk, x: i32, y: i32, z: i32) [3]u8 {
main.utils.assertLockedShared(&self.lock);
const index = chunk.getIndex(x, y, z); const index = chunk.getIndex(x, y, z);
return self.getValueInternal(index); return self.getValueInternal(index);
} }
fn setValueInternal(self: *ChannelChunk, i: usize, val: [3]u8) void { fn setValueInternal(self: *ChannelChunk, i: usize, val: [3]u8) void {
std.debug.assert(self.paletteLength <= self.palette.len); std.debug.assert(self.paletteLength <= self.palette.len);
std.debug.assert(!self.lock.tryLock()); main.utils.assertLockedShared(&self.lock);
var paletteIndex: u32 = 0; var paletteIndex: u32 = 0;
while(paletteIndex < self.paletteLength) : (paletteIndex += 1) { // TODO: There got to be a faster way to do this. Either using SIMD or using a cache or hashmap. while(paletteIndex < self.paletteLength) : (paletteIndex += 1) { // TODO: There got to be a faster way to do this. Either using SIMD or using a cache or hashmap.
if(std.meta.eql(self.palette[paletteIndex], val)) { if(std.meta.eql(self.palette[paletteIndex], val)) {
@ -125,7 +126,7 @@ pub const ChannelChunk = struct {
} }
fn optimizeLayout(self: *ChannelChunk) void { fn optimizeLayout(self: *ChannelChunk) void {
std.debug.assert(!self.lock.tryLock()); main.utils.assertLockedShared(&self.lock);
if(std.math.log2_int_ceil(usize, self.palette.len) == std.math.log2_int_ceil(usize, self.activePaletteEntries)) return; if(std.math.log2_int_ceil(usize, self.palette.len) == std.math.log2_int_ceil(usize, self.activePaletteEntries)) return;
var newData = main.utils.DynamicPackedIntArray(chunk.chunkVolume).initCapacity(main.globalAllocator, @intCast(std.math.log2_int_ceil(u32, self.activePaletteEntries))); var newData = main.utils.DynamicPackedIntArray(chunk.chunkVolume).initCapacity(main.globalAllocator, @intCast(std.math.log2_int_ceil(u32, self.activePaletteEntries)));

View File

@ -61,7 +61,7 @@ pub const User = struct {
} }
pub fn update(self: *User) void { pub fn update(self: *User) void {
std.debug.assert(!mutex.tryLock()); // The mutex should be locked when calling this function. main.utils.assertLocked(&mutex);
var time = @as(i16, @truncate(std.time.milliTimestamp())) -% main.settings.entityLookback; var time = @as(i16, @truncate(std.time.milliTimestamp())) -% main.settings.entityLookback;
time -%= self.timeDifference.difference.load(.monotonic); time -%= self.timeDifference.difference.load(.monotonic);
self.interpolation.update(time, self.lastTime); self.interpolation.update(time, self.lastTime);
@ -222,7 +222,7 @@ pub fn connect(user: *User) void {
// private Entity[] lastSentEntities = new Entity[0]; // private Entity[] lastSentEntities = new Entity[0];
pub fn sendMessage(msg: []const u8) void { pub fn sendMessage(msg: []const u8) void {
std.debug.assert(!mutex.tryLock()); // Mutex must be locked! main.utils.assertLocked(&mutex);
std.log.info("Chat: {s}", .{msg}); // TODO use color \033[0;32m std.log.info("Chat: {s}", .{msg}); // TODO use color \033[0;32m
for(users.items) |user| { for(users.items) |user| {
main.network.Protocols.chat.send(user.conn, msg); main.network.Protocols.chat.send(user.conn, msg);

View File

@ -884,7 +884,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
/// Moves an element from a given index down the heap, such that all children are always smaller than their parents. /// Moves an element from a given index down the heap, such that all children are always smaller than their parents.
fn siftDown(self: *@This(), _i: usize) void { fn siftDown(self: *@This(), _i: usize) void {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function. assertLocked(&self.mutex);
var i = _i; var i = _i;
while(2*i + 1 < self.size) { while(2*i + 1 < self.size) {
const biggest = if(2*i + 2 < self.size and self.array[2*i + 2].biggerThan(self.array[2*i + 1])) 2*i + 2 else 2*i + 1; const biggest = if(2*i + 2 < self.size and self.array[2*i + 2].biggerThan(self.array[2*i + 1])) 2*i + 2 else 2*i + 1;
@ -901,7 +901,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
/// Moves an element from a given index up the heap, such that all children are always smaller than their parents. /// Moves an element from a given index up the heap, such that all children are always smaller than their parents.
fn siftUp(self: *@This(), _i: usize) void { fn siftUp(self: *@This(), _i: usize) void {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function. assertLocked(&self.mutex);
var i = _i; var i = _i;
while(i > 0) { while(i > 0) {
const parentIndex = (i - 1)/2; const parentIndex = (i - 1)/2;
@ -924,7 +924,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
/// Returns the i-th element in the heap. Useless for most applications. /// Returns the i-th element in the heap. Useless for most applications.
pub fn get(self: *@This(), i: usize) ?T { pub fn get(self: *@This(), i: usize) ?T {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function. assertLocked(&self.mutex);
if(i >= self.size) return null; if(i >= self.size) return null;
return self.array[i]; return self.array[i];
} }
@ -945,7 +945,7 @@ pub fn BlockingMaxHeap(comptime T: type) type {
} }
fn removeIndex(self: *@This(), i: usize) void { fn removeIndex(self: *@This(), i: usize) void {
std.debug.assert(!self.mutex.tryLock()); // The mutex should be locked when calling this function. assertLocked(&self.mutex);
self.size -= 1; self.size -= 1;
self.array[i] = self.array[self.size]; self.array[i] = self.array[self.size];
self.siftDown(i); self.siftDown(i);
@ -1213,7 +1213,7 @@ pub fn Cache(comptime T: type, comptime numberOfBuckets: u32, comptime bucketSiz
items: [bucketSize]?*T = [_]?*T {null} ** bucketSize, items: [bucketSize]?*T = [_]?*T {null} ** bucketSize,
fn find(self: *@This(), compare: anytype) ?*T { fn find(self: *@This(), compare: anytype) ?*T {
std.debug.assert(!self.mutex.tryLock()); // The mutex must be locked. assertLocked(&self.mutex);
for(self.items, 0..) |item, i| { for(self.items, 0..) |item, i| {
if(compare.equals(item)) { if(compare.equals(item)) {
if(i != 0) { if(i != 0) {
@ -1228,7 +1228,7 @@ pub fn Cache(comptime T: type, comptime numberOfBuckets: u32, comptime bucketSiz
/// Returns the object that got kicked out of the cache. This must be deinited by the user. /// Returns the object that got kicked out of the cache. This must be deinited by the user.
fn add(self: *@This(), item: *T) ?*T { fn add(self: *@This(), item: *T) ?*T {
std.debug.assert(!self.mutex.tryLock()); // The mutex must be locked. assertLocked(&self.mutex);
const previous = self.items[bucketSize - 1]; const previous = self.items[bucketSize - 1];
std.mem.copyBackwards(?*T, self.items[1..], self.items[0..bucketSize - 1]); std.mem.copyBackwards(?*T, self.items[1..], self.items[0..bucketSize - 1]);
self.items[0] = item; self.items[0] = item;
@ -1236,7 +1236,7 @@ pub fn Cache(comptime T: type, comptime numberOfBuckets: u32, comptime bucketSiz
} }
fn findOrCreate(self: *@This(), compare: anytype, comptime initFunction: fn(@TypeOf(compare)) *T) *T { fn findOrCreate(self: *@This(), compare: anytype, comptime initFunction: fn(@TypeOf(compare)) *T) *T {
std.debug.assert(!self.mutex.tryLock()); // The mutex must be locked. assertLocked(&self.mutex);
if(self.find(compare)) |item| { if(self.find(compare)) |item| {
return item; return item;
} }
@ -1482,4 +1482,16 @@ pub const TimeDifference = struct {
_ = @atomicRmw(i16, &self.difference.raw, .Add, -1, .monotonic); _ = @atomicRmw(i16, &self.difference.raw, .Add, -1, .monotonic);
} }
} }
}; };
pub fn assertLocked(mutex: *std.Thread.Mutex) void {
if(builtin.mode == .Debug) {
std.debug.assert(!mutex.tryLock());
}
}
pub fn assertLockedShared(lock: *std.Thread.RwLock) void {
if(builtin.mode == .Debug) {
std.debug.assert(!lock.tryLock());
}
}

View File

@ -85,7 +85,7 @@ const LinuxImpl = struct {
} }
fn addWatchDescriptorsRecursive(info: *DirectoryInfo, path: []const u8) void { fn addWatchDescriptorsRecursive(info: *DirectoryInfo, path: []const u8) void {
std.debug.assert(!mutex.tryLock()); main.utils.assertLocked(&mutex);
var iterableDir = std.fs.cwd().openDir(path, .{.iterate = true}) catch |err| { var iterableDir = std.fs.cwd().openDir(path, .{.iterate = true}) catch |err| {
std.log.err("Error while opening dirs {s}: {s}", .{path, @errorName(err)}); std.log.err("Error while opening dirs {s}: {s}", .{path, @errorName(err)});
return; return;
@ -106,7 +106,7 @@ const LinuxImpl = struct {
} }
fn updateRecursiveCallback(info: *DirectoryInfo) void { fn updateRecursiveCallback(info: *DirectoryInfo) void {
std.debug.assert(!mutex.tryLock()); main.utils.assertLocked(&mutex);
for(info.watchDescriptors.items[1..]) |watchDescriptor| { for(info.watchDescriptors.items[1..]) |watchDescriptor| {
removeWatchDescriptor(watchDescriptor, info.path); removeWatchDescriptor(watchDescriptor, info.path);
} }
@ -154,7 +154,7 @@ const LinuxImpl = struct {
} }
fn addWatchDescriptor(info: *DirectoryInfo, path: [:0]const u8) void { fn addWatchDescriptor(info: *DirectoryInfo, path: [:0]const u8) void {
std.debug.assert(!mutex.tryLock()); main.utils.assertLocked(&mutex);
const watchDescriptor = c.inotify_add_watch(fd, path.ptr, c.IN_CLOSE_WRITE | c.IN_DELETE | c.IN_CREATE | c.IN_MOVE | c.IN_ONLYDIR); const watchDescriptor = c.inotify_add_watch(fd, path.ptr, c.IN_CLOSE_WRITE | c.IN_DELETE | c.IN_CREATE | c.IN_MOVE | c.IN_ONLYDIR);
if(watchDescriptor == -1) { if(watchDescriptor == -1) {
std.log.err("Error while adding watch descriptor for path {s}: {}", .{path, std.posix.errno(watchDescriptor)}); std.log.err("Error while adding watch descriptor for path {s}: {}", .{path, std.posix.errno(watchDescriptor)});
@ -164,7 +164,7 @@ const LinuxImpl = struct {
} }
fn removeWatchDescriptor(watchDescriptor: c_int, path: []const u8) void { fn removeWatchDescriptor(watchDescriptor: c_int, path: []const u8) void {
std.debug.assert(!mutex.tryLock()); main.utils.assertLocked(&mutex);
_ = callbacks.remove(watchDescriptor); _ = callbacks.remove(watchDescriptor);
const result = c.inotify_rm_watch(fd, watchDescriptor); const result = c.inotify_rm_watch(fd, watchDescriptor);
if(result == -1) { if(result == -1) {