mirror of
https://github.com/PixelGuys/Cubyz.git
synced 2025-08-03 11:17:05 -04:00
Make it compile with the new zig version
This commit is contained in:
parent
a8b36f3ca4
commit
557c8f7b0c
@ -998,7 +998,7 @@ pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIn
|
|||||||
pub fn initCapacity(bitSize: u5) Self {
|
pub fn initCapacity(bitSize: u5) Self {
|
||||||
std.debug.assert(bitSize == 0 or bitSize & bitSize - 1 == 0); // Must be a power of 2
|
std.debug.assert(bitSize == 0 or bitSize & bitSize - 1 == 0); // Must be a power of 2
|
||||||
return .{
|
return .{
|
||||||
.data = dynamicIntArrayAllocator.allocator().alignedAlloc(u32, 64, @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize),
|
.data = dynamicIntArrayAllocator.allocator().alignedAlloc(u32, .@"64", @as(usize, @divExact(size, @bitSizeOf(u32)))*bitSize),
|
||||||
.bitSize = bitSize,
|
.bitSize = bitSize,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ pub const StackAllocator = struct { // MARK: StackAllocator
|
|||||||
pub fn init(backingAllocator: NeverFailingAllocator, size: u31) StackAllocator {
|
pub fn init(backingAllocator: NeverFailingAllocator, size: u31) StackAllocator {
|
||||||
return .{
|
return .{
|
||||||
.backingAllocator = backingAllocator,
|
.backingAllocator = backingAllocator,
|
||||||
.buffer = backingAllocator.alignedAlloc(u8, 4096, size),
|
.buffer = backingAllocator.alignedAlloc(u8, .fromByteUnits(4096), size),
|
||||||
.index = 0,
|
.index = 0,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -341,9 +341,9 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator
|
|||||||
self: NeverFailingAllocator,
|
self: NeverFailingAllocator,
|
||||||
comptime T: type,
|
comptime T: type,
|
||||||
/// null means naturally aligned
|
/// null means naturally aligned
|
||||||
comptime alignment: ?u29,
|
comptime alignment: ?Alignment,
|
||||||
n: usize,
|
n: usize,
|
||||||
) []align(alignment orelse @alignOf(T)) T {
|
) []align(if (alignment) |a| a.toByteUnits() else @alignOf(T)) T {
|
||||||
return self.allocator.alignedAlloc(T, alignment, n) catch unreachable;
|
return self.allocator.alignedAlloc(T, alignment, n) catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,10 +351,10 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator
|
|||||||
self: NeverFailingAllocator,
|
self: NeverFailingAllocator,
|
||||||
comptime T: type,
|
comptime T: type,
|
||||||
/// null means naturally aligned
|
/// null means naturally aligned
|
||||||
comptime alignment: ?u29,
|
comptime alignment: ?Alignment,
|
||||||
n: usize,
|
n: usize,
|
||||||
return_address: usize,
|
return_address: usize,
|
||||||
) []align(alignment orelse @alignOf(T)) T {
|
) []align(if (alignment) |a| a.toByteUnits() else @alignOf(T)) T {
|
||||||
return self.allocator.allocAdvancedWithRetAddr(T, alignment, n, return_address) catch unreachable;
|
return self.allocator.allocAdvancedWithRetAddr(T, alignment, n, return_address) catch unreachable;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -483,6 +483,7 @@ pub const NeverFailingArenaAllocator = struct { // MARK: NeverFailingArena
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn shrinkAndFree(self: *NeverFailingArenaAllocator) void {
|
pub fn shrinkAndFree(self: *NeverFailingArenaAllocator) void {
|
||||||
|
if(true) return;
|
||||||
const node = self.arena.state.buffer_list.first orelse return;
|
const node = self.arena.state.buffer_list.first orelse return;
|
||||||
const allocBuf = @as([*]u8, @ptrCast(node))[0..node.data];
|
const allocBuf = @as([*]u8, @ptrCast(node))[0..node.data];
|
||||||
const dataSize = std.mem.alignForward(usize, @sizeOf(std.SinglyLinkedList(usize).Node) + self.arena.state.end_index, @alignOf(std.SinglyLinkedList(usize).Node));
|
const dataSize = std.mem.alignForward(usize, @sizeOf(std.SinglyLinkedList(usize).Node) + self.arena.state.end_index, @alignOf(std.SinglyLinkedList(usize).Node));
|
||||||
@ -570,7 +571,7 @@ pub fn MemoryPool(Item: type) type { // MARK: MemoryPool
|
|||||||
main.utils.assertLocked(&pool.mutex);
|
main.utils.assertLocked(&pool.mutex);
|
||||||
pool.totalAllocations += 1;
|
pool.totalAllocations += 1;
|
||||||
pool.freeAllocations += 1;
|
pool.freeAllocations += 1;
|
||||||
const mem = pool.arena.allocator().alignedAlloc(u8, item_alignment, item_size);
|
const mem = pool.arena.allocator().alignedAlloc(u8, .fromByteUnits(item_alignment), item_size);
|
||||||
return mem[0..item_size]; // coerce slice to array pointer
|
return mem[0..item_size]; // coerce slice to array pointer
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -634,7 +635,7 @@ pub fn PowerOfTwoPoolAllocator(minSize: comptime_int, maxSize: comptime_int, max
|
|||||||
fn allocNew(self: *Bucket, arena: NeverFailingAllocator, size: usize) [*]align(alignment) u8 {
|
fn allocNew(self: *Bucket, arena: NeverFailingAllocator, size: usize) [*]align(alignment) u8 {
|
||||||
self.totalAllocations += 1;
|
self.totalAllocations += 1;
|
||||||
self.freeAllocations += 1;
|
self.freeAllocations += 1;
|
||||||
return arena.alignedAlloc(u8, alignment, size).ptr;
|
return arena.alignedAlloc(u8, .fromByteUnits(alignment), size).ptr;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user