Add an allocator for the palette compressed chunk data.

This commit is contained in:
IntegratedQuantum 2025-03-09 20:05:06 +01:00
parent 50de67fca1
commit 2452aef769
3 changed files with 156 additions and 21 deletions

View File

@ -576,6 +576,9 @@ pub fn main() void { // MARK: main()
audio.init() catch std.log.err("Failed to initialize audio. Continuing the game without sounds.", .{});
defer audio.deinit();
utils.initDynamicIntArrayStorage();
defer utils.deinitDynamicIntArrayStorage();
chunk.init();
defer chunk.deinit();

View File

@ -777,25 +777,36 @@ pub const ThreadPool = struct { // MARK: ThreadPool
}
};
var dynamicIntArrayAllocator: main.heap.PowerOfTwoPoolAllocator(main.chunk.chunkVolume/@bitSizeOf(u8), main.chunk.chunkVolume*@sizeOf(u16), 64) = undefined;
pub fn initDynamicIntArrayStorage() void {
dynamicIntArrayAllocator = .init(main.globalAllocator);
}
pub fn deinitDynamicIntArrayStorage() void {
dynamicIntArrayAllocator.deinit();
}
/// An packed array of integers with dynamic bit size.
/// The bit size can be changed using the `resize` function.
pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIntArray
std.debug.assert(std.math.isPowerOfTwo(size));
return struct {
data: []align(64) u32 = &.{},
bitSize: u5 = 0,
const Self = @This();
pub fn initCapacity(allocator: main.heap.NeverFailingAllocator, bitSize: u5) Self {
pub fn initCapacity(bitSize: u5) Self {
std.debug.assert(bitSize == 0 or bitSize & bitSize - 1 == 0); // Must be a power of 2
return .{
.data = allocator.alignedAlloc(u32, 64, @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize),
.data = dynamicIntArrayAllocator.allocator().alignedAlloc(u32, 64, @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize),
.bitSize = bitSize,
};
}
pub fn deinit(self: *Self, allocator: main.heap.NeverFailingAllocator) void {
allocator.free(self.data);
pub fn deinit(self: *Self) void {
dynamicIntArrayAllocator.allocator().free(self.data);
self.* = .{};
}
@ -808,9 +819,9 @@ pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIn
return result;
}
pub fn resizeOnce(self: *Self, allocator: main.heap.NeverFailingAllocator) void {
pub fn resizeOnce(self: *Self) void {
const newBitSize = if(self.bitSize != 0) self.bitSize*2 else 1;
var newSelf = Self.initCapacity(allocator, newBitSize);
var newSelf = Self.initCapacity(newBitSize);
switch(self.bitSize) {
0 => @memset(newSelf.data, 0),
@ -823,7 +834,7 @@ pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIn
},
else => unreachable,
}
allocator.free(self.data);
dynamicIntArrayAllocator.allocator().free(self.data);
self.* = newSelf;
}
@ -889,13 +900,10 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
}
pub fn initCopy(self: *Self, template: *const Self) void {
const dataDupe = main.globalAllocator.alignedAlloc(u32, 64, template.data.data.len);
@memcpy(dataDupe, template.data.data);
const dataDupe = DynamicPackedIntArray(size).initCapacity(template.data.bitSize);
@memcpy(dataDupe.data, template.data.data);
self.* = .{
.data = .{
.data = dataDupe,
.bitSize = template.data.bitSize,
},
.data = dataDupe,
.palette = main.globalAllocator.dupe(T, template.palette),
.paletteOccupancy = main.globalAllocator.dupe(u32, template.paletteOccupancy),
.paletteLength = template.paletteLength,
@ -908,7 +916,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
const bitSize: u5 = getTargetBitSize(paletteLength);
const bufferLength = @as(u32, 1) << bitSize;
self.* = .{
.data = DynamicPackedIntArray(size).initCapacity(main.globalAllocator, bitSize),
.data = DynamicPackedIntArray(size).initCapacity(bitSize),
.palette = main.globalAllocator.alloc(T, bufferLength),
.paletteOccupancy = main.globalAllocator.alloc(u32, bufferLength),
.paletteLength = paletteLength,
@ -919,7 +927,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
}
pub fn deinit(self: *Self) void {
self.data.deinit(main.globalAllocator);
self.data.deinit();
main.globalAllocator.free(self.palette);
main.globalAllocator.free(self.paletteOccupancy);
}
@ -945,7 +953,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
}
if(paletteIndex == self.paletteLength) {
if(self.paletteLength == self.palette.len) {
self.data.resizeOnce(main.globalAllocator);
self.data.resizeOnce();
self.palette = main.globalAllocator.realloc(self.palette, @as(usize, 1) << self.data.bitSize);
const oldLen = self.paletteOccupancy.len;
self.paletteOccupancy = main.globalAllocator.realloc(self.paletteOccupancy, @as(usize, 1) << self.data.bitSize);
@ -1007,7 +1015,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
const newBitSize = getTargetBitSize(@intCast(self.activePaletteEntries));
if(self.data.bitSize == newBitSize) return;
var newData = main.utils.DynamicPackedIntArray(size).initCapacity(main.globalAllocator, newBitSize);
var newData = main.utils.DynamicPackedIntArray(size).initCapacity(newBitSize);
const paletteMap: []u32 = main.stackAllocator.alloc(u32, self.paletteLength);
defer main.stackAllocator.free(paletteMap);
{
@ -1031,7 +1039,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
for(0..size) |i| {
newData.setValue(i, paletteMap[self.data.getValue(i)]);
}
self.data.deinit(main.globalAllocator);
self.data.deinit();
self.data = newData;
self.paletteLength = self.activePaletteEntries;
self.palette = main.globalAllocator.realloc(self.palette, @as(usize, 1) << self.data.bitSize);

View File

@ -490,7 +490,7 @@ pub const NeverFailingArenaAllocator = struct { // MARK: NeverFailingArena
};
/// basically a copy of std.heap.MemoryPool, except it's thread-safe and has some more diagnostics.
pub fn MemoryPool(Item: type) type {
pub fn MemoryPool(Item: type) type { // MARK: MemoryPool
return struct {
const Pool = @This();
@ -526,8 +526,8 @@ pub fn MemoryPool(Item: type) type {
pub fn deinit(pool: *Pool) void {
if(pool.freeAllocations != pool.totalAllocations) {
std.log.err("Memory pool of type {s} leaked {} elements", .{@typeName(Item), pool.totalAllocations - pool.freeAllocations});
} else {
std.log.info("Memory pool of type {s} contained a total of {} MiB ({} elements)", .{@typeName(Item), pool.totalAllocations*item_size >> 20, pool.totalAllocations});
} else if(pool.totalAllocations != 0) {
std.log.info("{} MiB ({} elements) in {s} Memory pool", .{pool.totalAllocations*item_size >> 20, pool.totalAllocations, @typeName(Item)});
}
pool.arena.deinit();
pool.* = undefined;
@ -572,3 +572,127 @@ pub fn MemoryPool(Item: type) type {
}
};
}
pub fn PowerOfTwoPoolAllocator(minSize: comptime_int, maxSize: comptime_int, maxAlignment: comptime_int) type { // MARK: PowerOfTwoPoolAllocator
std.debug.assert(std.math.isPowerOfTwo(minSize));
std.debug.assert(std.math.isPowerOfTwo(maxSize));
std.debug.assert(maxSize > minSize);
std.debug.assert(minSize >= maxAlignment);
std.debug.assert(minSize >= @sizeOf(usize));
const alignment = @max(maxAlignment, @sizeOf(usize));
const baseShift = std.math.log2_int(usize, minSize);
const bucketCount = std.math.log2_int(usize, maxSize) - baseShift + 1;
return struct {
const Self = @This();
const Node = struct {
next: ?*align(alignment) @This(),
};
const NodePtr = *align(alignment) Node;
const Bucket = struct {
freeLists: ?*align(alignment) Node = null,
freeAllocations: usize = 0,
totalAllocations: usize = 0,
pub fn deinit(self: *Bucket, size: usize) void {
if(self.freeAllocations != self.totalAllocations) {
std.log.err("PowerOfTwoPoolAllocator bucket of size {} leaked {} elements", .{size, self.totalAllocations - self.freeAllocations});
} else if(self.totalAllocations != 0) {
std.log.info("{} MiB ({} elements) in size {} PowerOfTwoPoolAllocator bucket", .{self.totalAllocations*size >> 20, self.totalAllocations, size});
}
self.* = undefined;
}
/// Creates a new item and adds it to the memory pool.
pub fn create(self: *Bucket, arena: NeverFailingAllocator, size: usize) [*]u8 {
const node = if(self.freeLists) |item| blk: {
self.freeLists = item.next;
break :blk item;
} else @as(NodePtr, @ptrCast(self.allocNew(arena, size)));
self.freeAllocations -= 1;
return @ptrCast(node);
}
/// Destroys a previously created item.
/// Only pass items to `ptr` that were previously created with `create()` of the same memory pool!
pub fn destroy(self: *Bucket, ptr: [*]u8) void {
const node = @as(NodePtr, @ptrCast(@alignCast(ptr)));
node.* = Node{
.next = self.freeLists,
};
self.freeLists = node;
self.freeAllocations += 1;
}
fn allocNew(self: *Bucket, arena: NeverFailingAllocator, size: usize) [*]align(alignment) u8 {
self.totalAllocations += 1;
self.freeAllocations += 1;
return arena.alignedAlloc(u8, alignment, size).ptr;
}
};
arena: NeverFailingArenaAllocator,
buckets: [bucketCount]Bucket = @splat(.{}),
mutex: std.Thread.Mutex = .{},
pub fn init(backingAllocator: NeverFailingAllocator) Self {
return .{.arena = .init(backingAllocator)};
}
pub fn deinit(self: *Self) void {
for(&self.buckets, 0..) |*bucket, i| {
bucket.deinit(@as(usize, minSize) << @intCast(i));
}
self.arena.deinit();
}
pub fn allocator(self: *Self) NeverFailingAllocator {
return .{
.allocator = .{
.vtable = &.{
.alloc = &alloc,
.resize = &resize,
.remap = &remap,
.free = &free,
},
.ptr = self,
},
.IAssertThatTheProvidedAllocatorCantFail = {},
};
}
fn alloc(ctx: *anyopaque, len: usize, _alignment: std.mem.Alignment, _: usize) ?[*]u8 {
std.debug.assert(@as(usize, 1) << @intFromEnum(_alignment) <= maxAlignment);
std.debug.assert(std.math.isPowerOfTwo(len));
std.debug.assert(len >= minSize);
std.debug.assert(len <= maxSize);
const self: *Self = @ptrCast(@alignCast(ctx));
const bucket = @ctz(len) - baseShift;
self.mutex.lock();
defer self.mutex.unlock();
return self.buckets[bucket].create(self.arena.allocator(), len);
}
fn resize(_: *anyopaque, _: []u8, _: std.mem.Alignment, _: usize, _: usize) bool {
return false;
}
fn remap(_: *anyopaque, _: []u8, _: std.mem.Alignment, _: usize, _: usize) ?[*]u8 {
return null;
}
fn free(ctx: *anyopaque, memory: []u8, _alignment: std.mem.Alignment, _: usize) void {
std.debug.assert(@as(usize, 1) << @intFromEnum(_alignment) <= maxAlignment);
std.debug.assert(std.math.isPowerOfTwo(memory.len));
const self: *Self = @ptrCast(@alignCast(ctx));
const bucket = @ctz(memory.len) - baseShift;
self.mutex.lock();
defer self.mutex.unlock();
self.buckets[bucket].destroy(memory.ptr);
}
};
}