Introduce our own MemoryPool type for easier locking, more diagnostics and safety.

This commit is contained in:
IntegratedQuantum 2025-03-09 17:31:35 +01:00
parent f73a36855b
commit 50de67fca1
4 changed files with 96 additions and 32 deletions

View File

@ -128,14 +128,12 @@ fn extractZFromIndex(index: usize) i32 {
return @intCast(index & chunkMask);
}
var memoryPool: std.heap.MemoryPoolAligned(Chunk, @alignOf(Chunk)) = undefined;
var memoryPoolMutex: std.Thread.Mutex = .{};
var serverPool: std.heap.MemoryPoolAligned(ServerChunk, @alignOf(ServerChunk)) = undefined;
var serverPoolMutex: std.Thread.Mutex = .{};
var memoryPool: main.heap.MemoryPool(Chunk) = undefined;
var serverPool: main.heap.MemoryPool(ServerChunk) = undefined;
pub fn init() void {
memoryPool = .init(main.globalAllocator.allocator);
serverPool = .init(main.globalAllocator.allocator);
memoryPool = .init(main.globalAllocator);
serverPool = .init(main.globalAllocator);
}
pub fn deinit() void {
@ -226,9 +224,7 @@ pub const Chunk = struct { // MARK: Chunk
widthShift: u5,
pub fn init(pos: ChunkPosition) *Chunk {
memoryPoolMutex.lock();
const self = memoryPool.create() catch unreachable;
memoryPoolMutex.unlock();
const self = memoryPool.create();
std.debug.assert((pos.voxelSize - 1 & pos.voxelSize) == 0);
std.debug.assert(@mod(pos.wx, pos.voxelSize) == 0 and @mod(pos.wy, pos.voxelSize) == 0 and @mod(pos.wz, pos.voxelSize) == 0);
const voxelSizeShift: u5 = @intCast(std.math.log2_int(u31, pos.voxelSize));
@ -245,9 +241,7 @@ pub const Chunk = struct { // MARK: Chunk
pub fn deinit(self: *Chunk) void {
self.data.deinit();
memoryPoolMutex.lock();
memoryPool.destroy(@alignCast(self));
memoryPoolMutex.unlock();
}
/// Updates a block if it is inside this chunk.
@ -282,9 +276,7 @@ pub const ServerChunk = struct { // MARK: ServerChunk
refCount: std.atomic.Value(u16),
pub fn initAndIncreaseRefCount(pos: ChunkPosition) *ServerChunk {
serverPoolMutex.lock();
const self = serverPool.create() catch unreachable;
serverPoolMutex.unlock();
const self = serverPool.create();
std.debug.assert((pos.voxelSize - 1 & pos.voxelSize) == 0);
std.debug.assert(@mod(pos.wx, pos.voxelSize) == 0 and @mod(pos.wy, pos.voxelSize) == 0 and @mod(pos.wz, pos.voxelSize) == 0);
const voxelSizeShift: u5 = @intCast(std.math.log2_int(u31, pos.voxelSize));
@ -308,9 +300,7 @@ pub const ServerChunk = struct { // MARK: ServerChunk
self.save(main.server.world.?);
}
self.super.data.deinit();
serverPoolMutex.lock();
serverPool.destroy(@alignCast(self));
serverPoolMutex.unlock();
}
pub fn setChanged(self: *ServerChunk) void {

View File

@ -7,11 +7,10 @@ const chunk = main.chunk;
const chunk_meshing = @import("chunk_meshing.zig");
const mesh_storage = @import("mesh_storage.zig");
var memoryPool: std.heap.MemoryPool(ChannelChunk) = undefined;
var memoryPoolMutex: std.Thread.Mutex = .{};
var memoryPool: main.heap.MemoryPool(ChannelChunk) = undefined;
pub fn init() void {
memoryPool = .init(main.globalAllocator.allocator);
memoryPool = .init(main.globalAllocator);
}
pub fn deinit() void {
@ -33,9 +32,7 @@ pub const ChannelChunk = struct {
isSun: bool,
pub fn init(ch: *chunk.Chunk, isSun: bool) *ChannelChunk {
memoryPoolMutex.lock();
const self = memoryPool.create() catch unreachable;
memoryPoolMutex.unlock();
const self = memoryPool.create();
self.lock = .{};
self.ch = ch;
self.isSun = isSun;
@ -45,9 +42,7 @@ pub const ChannelChunk = struct {
pub fn deinit(self: *ChannelChunk) void {
self.data.deinit();
memoryPoolMutex.lock();
memoryPool.destroy(self);
memoryPoolMutex.unlock();
}
const Entry = struct {

View File

@ -51,13 +51,12 @@ const BlockUpdate = struct {
};
var blockUpdateList: main.utils.ConcurrentQueue(BlockUpdate) = undefined;
var meshMemoryPool: std.heap.MemoryPoolAligned(chunk_meshing.ChunkMesh, @alignOf(chunk_meshing.ChunkMesh)) = undefined;
var meshMemoryPoolMutex: std.Thread.Mutex = .{};
var meshMemoryPool: main.heap.MemoryPool(chunk_meshing.ChunkMesh) = undefined;
pub fn init() void { // MARK: init()
lastRD = 0;
blockUpdateList = .init(main.globalAllocator, 16);
meshMemoryPool = .init(main.globalAllocator.allocator);
meshMemoryPool = .init(main.globalAllocator);
for(&storageLists) |*storageList| {
storageList.* = main.globalAllocator.create([storageSize*storageSize*storageSize]ChunkMeshNode);
for(storageList.*) |*val| {
@ -759,9 +758,7 @@ pub fn updateMeshes(targetTime: i64) void { // MARK: updateMeshes()
defer mutex.unlock();
for(clearList.items) |mesh| {
mesh.deinit();
meshMemoryPoolMutex.lock();
meshMemoryPool.destroy(mesh);
meshMemoryPoolMutex.unlock();
}
clearList.clearRetainingCapacity();
while(priorityMeshUpdateList.dequeue()) |mesh| {
@ -928,9 +925,7 @@ pub const MeshGenerationTask = struct { // MARK: MeshGenerationTask
pub fn run(self: *MeshGenerationTask) void {
defer main.globalAllocator.destroy(self);
const pos = self.mesh.pos;
meshMemoryPoolMutex.lock();
const mesh = meshMemoryPool.create() catch unreachable;
meshMemoryPoolMutex.unlock();
const mesh = meshMemoryPool.create();
mesh.init(pos, self.mesh);
defer mesh.decreaseRefCount();
mesh.generateLightingData() catch return;

View File

@ -488,3 +488,87 @@ pub const NeverFailingArenaAllocator = struct { // MARK: NeverFailingArena
}
}
};
/// basically a copy of std.heap.MemoryPool, except it's thread-safe and has some more diagnostics.
pub fn MemoryPool(Item: type) type {
return struct {
const Pool = @This();
/// Size of the memory pool items. This is not necessarily the same
/// as `@sizeOf(Item)` as the pool also uses the items for internal means.
pub const item_size = @max(@sizeOf(Node), @sizeOf(Item));
// This needs to be kept in sync with Node.
const node_alignment = @alignOf(*anyopaque);
/// Alignment of the memory pool items. This is not necessarily the same
/// as `@alignOf(Item)` as the pool also uses the items for internal means.
pub const item_alignment = @max(node_alignment, @alignOf(Item));
const Node = struct {
next: ?*align(item_alignment) @This(),
};
const NodePtr = *align(item_alignment) Node;
const ItemPtr = *align(item_alignment) Item;
arena: NeverFailingArenaAllocator,
free_list: ?NodePtr = null,
freeAllocations: usize = 0,
totalAllocations: usize = 0,
mutex: std.Thread.Mutex = .{},
/// Creates a new memory pool.
pub fn init(allocator: NeverFailingAllocator) Pool {
return .{.arena = NeverFailingArenaAllocator.init(allocator)};
}
/// Destroys the memory pool and frees all allocated memory.
pub fn deinit(pool: *Pool) void {
if(pool.freeAllocations != pool.totalAllocations) {
std.log.err("Memory pool of type {s} leaked {} elements", .{@typeName(Item), pool.totalAllocations - pool.freeAllocations});
} else {
std.log.info("Memory pool of type {s} contained a total of {} MiB ({} elements)", .{@typeName(Item), pool.totalAllocations*item_size >> 20, pool.totalAllocations});
}
pool.arena.deinit();
pool.* = undefined;
}
/// Creates a new item and adds it to the memory pool.
pub fn create(pool: *Pool) ItemPtr {
pool.mutex.lock();
defer pool.mutex.unlock();
const node = if(pool.free_list) |item| blk: {
pool.free_list = item.next;
break :blk item;
} else @as(NodePtr, @ptrCast(pool.allocNew()));
pool.freeAllocations -= 1;
const ptr = @as(ItemPtr, @ptrCast(node));
ptr.* = undefined;
return ptr;
}
/// Destroys a previously created item.
/// Only pass items to `ptr` that were previously created with `create()` of the same memory pool!
pub fn destroy(pool: *Pool, ptr: ItemPtr) void {
pool.mutex.lock();
defer pool.mutex.unlock();
ptr.* = undefined;
const node = @as(NodePtr, @ptrCast(ptr));
node.* = Node{
.next = pool.free_list,
};
pool.free_list = node;
pool.freeAllocations += 1;
}
fn allocNew(pool: *Pool) *align(item_alignment) [item_size]u8 {
main.utils.assertLocked(&pool.mutex);
pool.totalAllocations += 1;
pool.freeAllocations += 1;
const mem = pool.arena.allocator().alignedAlloc(u8, item_alignment, item_size);
return mem[0..item_size]; // coerce slice to array pointer
}
};
}