mirror of
https://github.com/PixelGuys/Cubyz.git
synced 2025-08-03 11:17:05 -04:00
Add an allocator for the palette compressed chunk data.
This commit is contained in:
parent
50de67fca1
commit
2452aef769
@ -576,6 +576,9 @@ pub fn main() void { // MARK: main()
|
|||||||
audio.init() catch std.log.err("Failed to initialize audio. Continuing the game without sounds.", .{});
|
audio.init() catch std.log.err("Failed to initialize audio. Continuing the game without sounds.", .{});
|
||||||
defer audio.deinit();
|
defer audio.deinit();
|
||||||
|
|
||||||
|
utils.initDynamicIntArrayStorage();
|
||||||
|
defer utils.deinitDynamicIntArrayStorage();
|
||||||
|
|
||||||
chunk.init();
|
chunk.init();
|
||||||
defer chunk.deinit();
|
defer chunk.deinit();
|
||||||
|
|
||||||
|
@ -777,25 +777,36 @@ pub const ThreadPool = struct { // MARK: ThreadPool
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
var dynamicIntArrayAllocator: main.heap.PowerOfTwoPoolAllocator(main.chunk.chunkVolume/@bitSizeOf(u8), main.chunk.chunkVolume*@sizeOf(u16), 64) = undefined;
|
||||||
|
|
||||||
|
pub fn initDynamicIntArrayStorage() void {
|
||||||
|
dynamicIntArrayAllocator = .init(main.globalAllocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinitDynamicIntArrayStorage() void {
|
||||||
|
dynamicIntArrayAllocator.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
/// An packed array of integers with dynamic bit size.
|
/// An packed array of integers with dynamic bit size.
|
||||||
/// The bit size can be changed using the `resize` function.
|
/// The bit size can be changed using the `resize` function.
|
||||||
pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIntArray
|
pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIntArray
|
||||||
|
std.debug.assert(std.math.isPowerOfTwo(size));
|
||||||
return struct {
|
return struct {
|
||||||
data: []align(64) u32 = &.{},
|
data: []align(64) u32 = &.{},
|
||||||
bitSize: u5 = 0,
|
bitSize: u5 = 0,
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
pub fn initCapacity(allocator: main.heap.NeverFailingAllocator, bitSize: u5) Self {
|
pub fn initCapacity(bitSize: u5) Self {
|
||||||
std.debug.assert(bitSize == 0 or bitSize & bitSize - 1 == 0); // Must be a power of 2
|
std.debug.assert(bitSize == 0 or bitSize & bitSize - 1 == 0); // Must be a power of 2
|
||||||
return .{
|
return .{
|
||||||
.data = allocator.alignedAlloc(u32, 64, @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize),
|
.data = dynamicIntArrayAllocator.allocator().alignedAlloc(u32, 64, @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize),
|
||||||
.bitSize = bitSize,
|
.bitSize = bitSize,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(self: *Self, allocator: main.heap.NeverFailingAllocator) void {
|
pub fn deinit(self: *Self) void {
|
||||||
allocator.free(self.data);
|
dynamicIntArrayAllocator.allocator().free(self.data);
|
||||||
self.* = .{};
|
self.* = .{};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -808,9 +819,9 @@ pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIn
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn resizeOnce(self: *Self, allocator: main.heap.NeverFailingAllocator) void {
|
pub fn resizeOnce(self: *Self) void {
|
||||||
const newBitSize = if(self.bitSize != 0) self.bitSize*2 else 1;
|
const newBitSize = if(self.bitSize != 0) self.bitSize*2 else 1;
|
||||||
var newSelf = Self.initCapacity(allocator, newBitSize);
|
var newSelf = Self.initCapacity(newBitSize);
|
||||||
|
|
||||||
switch(self.bitSize) {
|
switch(self.bitSize) {
|
||||||
0 => @memset(newSelf.data, 0),
|
0 => @memset(newSelf.data, 0),
|
||||||
@ -823,7 +834,7 @@ pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIn
|
|||||||
},
|
},
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
}
|
}
|
||||||
allocator.free(self.data);
|
dynamicIntArrayAllocator.allocator().free(self.data);
|
||||||
self.* = newSelf;
|
self.* = newSelf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -889,13 +900,10 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn initCopy(self: *Self, template: *const Self) void {
|
pub fn initCopy(self: *Self, template: *const Self) void {
|
||||||
const dataDupe = main.globalAllocator.alignedAlloc(u32, 64, template.data.data.len);
|
const dataDupe = DynamicPackedIntArray(size).initCapacity(template.data.bitSize);
|
||||||
@memcpy(dataDupe, template.data.data);
|
@memcpy(dataDupe.data, template.data.data);
|
||||||
self.* = .{
|
self.* = .{
|
||||||
.data = .{
|
|
||||||
.data = dataDupe,
|
.data = dataDupe,
|
||||||
.bitSize = template.data.bitSize,
|
|
||||||
},
|
|
||||||
.palette = main.globalAllocator.dupe(T, template.palette),
|
.palette = main.globalAllocator.dupe(T, template.palette),
|
||||||
.paletteOccupancy = main.globalAllocator.dupe(u32, template.paletteOccupancy),
|
.paletteOccupancy = main.globalAllocator.dupe(u32, template.paletteOccupancy),
|
||||||
.paletteLength = template.paletteLength,
|
.paletteLength = template.paletteLength,
|
||||||
@ -908,7 +916,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
|||||||
const bitSize: u5 = getTargetBitSize(paletteLength);
|
const bitSize: u5 = getTargetBitSize(paletteLength);
|
||||||
const bufferLength = @as(u32, 1) << bitSize;
|
const bufferLength = @as(u32, 1) << bitSize;
|
||||||
self.* = .{
|
self.* = .{
|
||||||
.data = DynamicPackedIntArray(size).initCapacity(main.globalAllocator, bitSize),
|
.data = DynamicPackedIntArray(size).initCapacity(bitSize),
|
||||||
.palette = main.globalAllocator.alloc(T, bufferLength),
|
.palette = main.globalAllocator.alloc(T, bufferLength),
|
||||||
.paletteOccupancy = main.globalAllocator.alloc(u32, bufferLength),
|
.paletteOccupancy = main.globalAllocator.alloc(u32, bufferLength),
|
||||||
.paletteLength = paletteLength,
|
.paletteLength = paletteLength,
|
||||||
@ -919,7 +927,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(self: *Self) void {
|
pub fn deinit(self: *Self) void {
|
||||||
self.data.deinit(main.globalAllocator);
|
self.data.deinit();
|
||||||
main.globalAllocator.free(self.palette);
|
main.globalAllocator.free(self.palette);
|
||||||
main.globalAllocator.free(self.paletteOccupancy);
|
main.globalAllocator.free(self.paletteOccupancy);
|
||||||
}
|
}
|
||||||
@ -945,7 +953,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
|||||||
}
|
}
|
||||||
if(paletteIndex == self.paletteLength) {
|
if(paletteIndex == self.paletteLength) {
|
||||||
if(self.paletteLength == self.palette.len) {
|
if(self.paletteLength == self.palette.len) {
|
||||||
self.data.resizeOnce(main.globalAllocator);
|
self.data.resizeOnce();
|
||||||
self.palette = main.globalAllocator.realloc(self.palette, @as(usize, 1) << self.data.bitSize);
|
self.palette = main.globalAllocator.realloc(self.palette, @as(usize, 1) << self.data.bitSize);
|
||||||
const oldLen = self.paletteOccupancy.len;
|
const oldLen = self.paletteOccupancy.len;
|
||||||
self.paletteOccupancy = main.globalAllocator.realloc(self.paletteOccupancy, @as(usize, 1) << self.data.bitSize);
|
self.paletteOccupancy = main.globalAllocator.realloc(self.paletteOccupancy, @as(usize, 1) << self.data.bitSize);
|
||||||
@ -1007,7 +1015,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
|||||||
const newBitSize = getTargetBitSize(@intCast(self.activePaletteEntries));
|
const newBitSize = getTargetBitSize(@intCast(self.activePaletteEntries));
|
||||||
if(self.data.bitSize == newBitSize) return;
|
if(self.data.bitSize == newBitSize) return;
|
||||||
|
|
||||||
var newData = main.utils.DynamicPackedIntArray(size).initCapacity(main.globalAllocator, newBitSize);
|
var newData = main.utils.DynamicPackedIntArray(size).initCapacity(newBitSize);
|
||||||
const paletteMap: []u32 = main.stackAllocator.alloc(u32, self.paletteLength);
|
const paletteMap: []u32 = main.stackAllocator.alloc(u32, self.paletteLength);
|
||||||
defer main.stackAllocator.free(paletteMap);
|
defer main.stackAllocator.free(paletteMap);
|
||||||
{
|
{
|
||||||
@ -1031,7 +1039,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
|||||||
for(0..size) |i| {
|
for(0..size) |i| {
|
||||||
newData.setValue(i, paletteMap[self.data.getValue(i)]);
|
newData.setValue(i, paletteMap[self.data.getValue(i)]);
|
||||||
}
|
}
|
||||||
self.data.deinit(main.globalAllocator);
|
self.data.deinit();
|
||||||
self.data = newData;
|
self.data = newData;
|
||||||
self.paletteLength = self.activePaletteEntries;
|
self.paletteLength = self.activePaletteEntries;
|
||||||
self.palette = main.globalAllocator.realloc(self.palette, @as(usize, 1) << self.data.bitSize);
|
self.palette = main.globalAllocator.realloc(self.palette, @as(usize, 1) << self.data.bitSize);
|
||||||
|
@ -490,7 +490,7 @@ pub const NeverFailingArenaAllocator = struct { // MARK: NeverFailingArena
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// basically a copy of std.heap.MemoryPool, except it's thread-safe and has some more diagnostics.
|
/// basically a copy of std.heap.MemoryPool, except it's thread-safe and has some more diagnostics.
|
||||||
pub fn MemoryPool(Item: type) type {
|
pub fn MemoryPool(Item: type) type { // MARK: MemoryPool
|
||||||
return struct {
|
return struct {
|
||||||
const Pool = @This();
|
const Pool = @This();
|
||||||
|
|
||||||
@ -526,8 +526,8 @@ pub fn MemoryPool(Item: type) type {
|
|||||||
pub fn deinit(pool: *Pool) void {
|
pub fn deinit(pool: *Pool) void {
|
||||||
if(pool.freeAllocations != pool.totalAllocations) {
|
if(pool.freeAllocations != pool.totalAllocations) {
|
||||||
std.log.err("Memory pool of type {s} leaked {} elements", .{@typeName(Item), pool.totalAllocations - pool.freeAllocations});
|
std.log.err("Memory pool of type {s} leaked {} elements", .{@typeName(Item), pool.totalAllocations - pool.freeAllocations});
|
||||||
} else {
|
} else if(pool.totalAllocations != 0) {
|
||||||
std.log.info("Memory pool of type {s} contained a total of {} MiB ({} elements)", .{@typeName(Item), pool.totalAllocations*item_size >> 20, pool.totalAllocations});
|
std.log.info("{} MiB ({} elements) in {s} Memory pool", .{pool.totalAllocations*item_size >> 20, pool.totalAllocations, @typeName(Item)});
|
||||||
}
|
}
|
||||||
pool.arena.deinit();
|
pool.arena.deinit();
|
||||||
pool.* = undefined;
|
pool.* = undefined;
|
||||||
@ -572,3 +572,127 @@ pub fn MemoryPool(Item: type) type {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn PowerOfTwoPoolAllocator(minSize: comptime_int, maxSize: comptime_int, maxAlignment: comptime_int) type { // MARK: PowerOfTwoPoolAllocator
|
||||||
|
std.debug.assert(std.math.isPowerOfTwo(minSize));
|
||||||
|
std.debug.assert(std.math.isPowerOfTwo(maxSize));
|
||||||
|
std.debug.assert(maxSize > minSize);
|
||||||
|
std.debug.assert(minSize >= maxAlignment);
|
||||||
|
std.debug.assert(minSize >= @sizeOf(usize));
|
||||||
|
|
||||||
|
const alignment = @max(maxAlignment, @sizeOf(usize));
|
||||||
|
|
||||||
|
const baseShift = std.math.log2_int(usize, minSize);
|
||||||
|
const bucketCount = std.math.log2_int(usize, maxSize) - baseShift + 1;
|
||||||
|
return struct {
|
||||||
|
const Self = @This();
|
||||||
|
|
||||||
|
const Node = struct {
|
||||||
|
next: ?*align(alignment) @This(),
|
||||||
|
};
|
||||||
|
const NodePtr = *align(alignment) Node;
|
||||||
|
|
||||||
|
const Bucket = struct {
|
||||||
|
freeLists: ?*align(alignment) Node = null,
|
||||||
|
freeAllocations: usize = 0,
|
||||||
|
totalAllocations: usize = 0,
|
||||||
|
|
||||||
|
pub fn deinit(self: *Bucket, size: usize) void {
|
||||||
|
if(self.freeAllocations != self.totalAllocations) {
|
||||||
|
std.log.err("PowerOfTwoPoolAllocator bucket of size {} leaked {} elements", .{size, self.totalAllocations - self.freeAllocations});
|
||||||
|
} else if(self.totalAllocations != 0) {
|
||||||
|
std.log.info("{} MiB ({} elements) in size {} PowerOfTwoPoolAllocator bucket", .{self.totalAllocations*size >> 20, self.totalAllocations, size});
|
||||||
|
}
|
||||||
|
self.* = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new item and adds it to the memory pool.
|
||||||
|
pub fn create(self: *Bucket, arena: NeverFailingAllocator, size: usize) [*]u8 {
|
||||||
|
const node = if(self.freeLists) |item| blk: {
|
||||||
|
self.freeLists = item.next;
|
||||||
|
break :blk item;
|
||||||
|
} else @as(NodePtr, @ptrCast(self.allocNew(arena, size)));
|
||||||
|
|
||||||
|
self.freeAllocations -= 1;
|
||||||
|
return @ptrCast(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Destroys a previously created item.
|
||||||
|
/// Only pass items to `ptr` that were previously created with `create()` of the same memory pool!
|
||||||
|
pub fn destroy(self: *Bucket, ptr: [*]u8) void {
|
||||||
|
const node = @as(NodePtr, @ptrCast(@alignCast(ptr)));
|
||||||
|
node.* = Node{
|
||||||
|
.next = self.freeLists,
|
||||||
|
};
|
||||||
|
self.freeLists = node;
|
||||||
|
self.freeAllocations += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn allocNew(self: *Bucket, arena: NeverFailingAllocator, size: usize) [*]align(alignment) u8 {
|
||||||
|
self.totalAllocations += 1;
|
||||||
|
self.freeAllocations += 1;
|
||||||
|
return arena.alignedAlloc(u8, alignment, size).ptr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
arena: NeverFailingArenaAllocator,
|
||||||
|
buckets: [bucketCount]Bucket = @splat(.{}),
|
||||||
|
mutex: std.Thread.Mutex = .{},
|
||||||
|
|
||||||
|
pub fn init(backingAllocator: NeverFailingAllocator) Self {
|
||||||
|
return .{.arena = .init(backingAllocator)};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Self) void {
|
||||||
|
for(&self.buckets, 0..) |*bucket, i| {
|
||||||
|
bucket.deinit(@as(usize, minSize) << @intCast(i));
|
||||||
|
}
|
||||||
|
self.arena.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allocator(self: *Self) NeverFailingAllocator {
|
||||||
|
return .{
|
||||||
|
.allocator = .{
|
||||||
|
.vtable = &.{
|
||||||
|
.alloc = &alloc,
|
||||||
|
.resize = &resize,
|
||||||
|
.remap = &remap,
|
||||||
|
.free = &free,
|
||||||
|
},
|
||||||
|
.ptr = self,
|
||||||
|
},
|
||||||
|
.IAssertThatTheProvidedAllocatorCantFail = {},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn alloc(ctx: *anyopaque, len: usize, _alignment: std.mem.Alignment, _: usize) ?[*]u8 {
|
||||||
|
std.debug.assert(@as(usize, 1) << @intFromEnum(_alignment) <= maxAlignment);
|
||||||
|
std.debug.assert(std.math.isPowerOfTwo(len));
|
||||||
|
std.debug.assert(len >= minSize);
|
||||||
|
std.debug.assert(len <= maxSize);
|
||||||
|
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||||
|
const bucket = @ctz(len) - baseShift;
|
||||||
|
self.mutex.lock();
|
||||||
|
defer self.mutex.unlock();
|
||||||
|
return self.buckets[bucket].create(self.arena.allocator(), len);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resize(_: *anyopaque, _: []u8, _: std.mem.Alignment, _: usize, _: usize) bool {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remap(_: *anyopaque, _: []u8, _: std.mem.Alignment, _: usize, _: usize) ?[*]u8 {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn free(ctx: *anyopaque, memory: []u8, _alignment: std.mem.Alignment, _: usize) void {
|
||||||
|
std.debug.assert(@as(usize, 1) << @intFromEnum(_alignment) <= maxAlignment);
|
||||||
|
std.debug.assert(std.math.isPowerOfTwo(memory.len));
|
||||||
|
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||||
|
const bucket = @ctz(memory.len) - baseShift;
|
||||||
|
self.mutex.lock();
|
||||||
|
defer self.mutex.unlock();
|
||||||
|
self.buckets[bucket].destroy(memory.ptr);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user