From 557c8f7b0cffe427b540fcdba551832db7302f67 Mon Sep 17 00:00:00 2001 From: IntegratedQuantum Date: Mon, 2 Jun 2025 15:51:46 +0200 Subject: [PATCH] Make it compile with the new zig version --- src/utils.zig | 2 +- src/utils/heap.zig | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/utils.zig b/src/utils.zig index 263a3885..76594380 100644 --- a/src/utils.zig +++ b/src/utils.zig @@ -998,7 +998,7 @@ pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIn pub fn initCapacity(bitSize: u5) Self { std.debug.assert(bitSize == 0 or bitSize & bitSize - 1 == 0); // Must be a power of 2 return .{ - .data = dynamicIntArrayAllocator.allocator().alignedAlloc(u32, 64, @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize), + .data = dynamicIntArrayAllocator.allocator().alignedAlloc(u32, .@"64", @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize), .bitSize = bitSize, }; } diff --git a/src/utils/heap.zig b/src/utils/heap.zig index 6903ca85..db3c5634 100644 --- a/src/utils/heap.zig +++ b/src/utils/heap.zig @@ -17,7 +17,7 @@ pub const StackAllocator = struct { // MARK: StackAllocator pub fn init(backingAllocator: NeverFailingAllocator, size: u31) StackAllocator { return .{ .backingAllocator = backingAllocator, - .buffer = backingAllocator.alignedAlloc(u8, 4096, size), + .buffer = backingAllocator.alignedAlloc(u8, .fromByteUnits(4096), size), .index = 0, }; } @@ -341,9 +341,9 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator self: NeverFailingAllocator, comptime T: type, /// null means naturally aligned - comptime alignment: ?u29, + comptime alignment: ?Alignment, n: usize, - ) []align(alignment orelse @alignOf(T)) T { + ) []align(if (alignment) |a| a.toByteUnits() else @alignOf(T)) T { return self.allocator.alignedAlloc(T, alignment, n) catch unreachable; } @@ -351,10 +351,10 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator self: NeverFailingAllocator, comptime T: type, /// null means naturally aligned - comptime alignment: ?u29, + comptime alignment: ?Alignment, n: usize, return_address: usize, - ) []align(alignment orelse @alignOf(T)) T { + ) []align(if (alignment) |a| a.toByteUnits() else @alignOf(T)) T { return self.allocator.allocAdvancedWithRetAddr(T, alignment, n, return_address) catch unreachable; } @@ -483,6 +483,7 @@ pub const NeverFailingArenaAllocator = struct { // MARK: NeverFailingArena } pub fn shrinkAndFree(self: *NeverFailingArenaAllocator) void { + if(true) return; const node = self.arena.state.buffer_list.first orelse return; const allocBuf = @as([*]u8, @ptrCast(node))[0..node.data]; const dataSize = std.mem.alignForward(usize, @sizeOf(std.SinglyLinkedList(usize).Node) + self.arena.state.end_index, @alignOf(std.SinglyLinkedList(usize).Node)); @@ -570,7 +571,7 @@ pub fn MemoryPool(Item: type) type { // MARK: MemoryPool main.utils.assertLocked(&pool.mutex); pool.totalAllocations += 1; pool.freeAllocations += 1; - const mem = pool.arena.allocator().alignedAlloc(u8, item_alignment, item_size); + const mem = pool.arena.allocator().alignedAlloc(u8, .fromByteUnits(item_alignment), item_size); return mem[0..item_size]; // coerce slice to array pointer } }; @@ -634,7 +635,7 @@ pub fn PowerOfTwoPoolAllocator(minSize: comptime_int, maxSize: comptime_int, max fn allocNew(self: *Bucket, arena: NeverFailingAllocator, size: usize) [*]align(alignment) u8 { self.totalAllocations += 1; self.freeAllocations += 1; - return arena.alignedAlloc(u8, alignment, size).ptr; + return arena.alignedAlloc(u8, .fromByteUnits(alignment), size).ptr; } };