Update Zig to 0.14.0 (#1158)

* Update Zig to 0.14.0

* Update ci.yml

* Copy the zig fmt source code

* Update ci.yml

* Fix formatting issues

* Update CONTRIBUTING.md with a new formatting command
This commit is contained in:
IntegratedQuantum 2025-03-05 21:41:02 +01:00 committed by GitHub
parent 43b1b977f4
commit 03769c2cda
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 569 additions and 100 deletions

View File

@ -16,14 +16,14 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: mlugg/setup-zig@v1 - uses: mlugg/setup-zig@v1
with: with:
version: 0.14.0-dev.1550+4fba7336a version: 0.14.0
- run: sudo apt install libgl-dev libasound2-dev libx11-dev - run: sudo apt install libgl-dev libasound2-dev libx11-dev
- run: wget -O /opt/hostedtoolcache/zig/0.14.0-dev.1550/x64/lib/std/zig/render.zig https://github.com/PixelGuys/Cubyz-std-lib/releases/download/0.14.0-dev.1550+4fba7336a/render.zig - run: wget -O /opt/hostedtoolcache/zig/0.14.0/x64/lib/std/zig/render.zig https://github.com/PixelGuys/Cubyz-std-lib/releases/download/0.14.0/render.zig
- run: zig build - run: zig build
- run: zig build -Dtarget=x86_64-windows-gnu - run: zig build -Dtarget=x86_64-windows-gnu
- run: zig build format --summary none - run: zig build format --summary none
- run: | - run: |
if zig fmt *.zig src/*.zig src/*/*.zig src/*/*/*.zig src/*/*/*/*.zig src/*/*/*/*/*.zig src/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*/*/*/*.zig | grep -q . ; then if zig build fmt -- *.zig src/*.zig src/*/*.zig src/*/*/*.zig src/*/*/*/*.zig src/*/*/*/*/*.zig src/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*/*/*.zig src/*/*/*/*/*/*/*/*/*/*.zig | grep -q . ; then
git diff --color=always; git diff --color=always;
exit 1; exit 1;
fi fi

View File

@ -1 +1 @@
0.14.0-dev.1550+4fba7336a 0.14.0

View File

@ -19,9 +19,9 @@ If you are new to Zig it can also be very helpful to ask questions. For example,
Cubyz uses a slightly modified version of `zig fmt` which uses tabs and behaves slightly different in regards to spacing. Cubyz uses a slightly modified version of `zig fmt` which uses tabs and behaves slightly different in regards to spacing.
Because of that, if you use the the default Zig formatter it will reformat all your files. Because of that, if you use the the default Zig formatter it will reformat all your files.
To fix this you can either disable zig's formatter (In VSCode you can disable this in the Zig extension settings), or make sure it uses the zig executable that comes with the game (in ./compiler/zig) which does contain a fixed version of the formatter. To fix this you need to disable zig's formatter (In VSCode you can disable this in the Zig extension settings).
To run the formatter locally on a specific file, you can use `./compiler/zig/zig fmt fileName.zig`. To run the formatter locally on a specific file, you can use `./compiler/zig/zig build fmt -- fileName.zig`.
# Select something to work on # Select something to work on

View File

@ -131,4 +131,22 @@ pub fn build(b: *std.Build) !void {
const formatter_step = b.step("format", "Check the formatting of the code"); const formatter_step = b.step("format", "Check the formatting of the code");
formatter_step.dependOn(&formatter_cmd.step); formatter_step.dependOn(&formatter_cmd.step);
const zig_fmt = b.addExecutable(.{
.name = "zig_fmt",
.root_source_file = b.path("src/formatter/fmt.zig"),
.target = target,
.optimize = optimize,
});
const zig_fmt_install = b.addInstallArtifact(zig_fmt, .{});
const zig_fmt_cmd = b.addRunArtifact(zig_fmt);
zig_fmt_cmd.step.dependOn(&zig_fmt_install.step);
if(b.args) |args| {
zig_fmt_cmd.addArgs(args);
}
const zig_fmt_step = b.step("fmt", "Run the (modified) zig fmt on the code");
zig_fmt_step.dependOn(&zig_fmt_cmd.step);
} }

View File

@ -1,5 +1,6 @@
.{ .{
.name = "Cubyzig", .name = .Cubyzig,
.fingerprint = 0x782a2e402c0ee887,
.version = "0.0.0", .version = "0.0.0",
.paths = .{""}, .paths = .{""},
.dependencies = .{ .dependencies = .{

396
src/formatter/fmt.zig Normal file
View File

@ -0,0 +1,396 @@
pub fn main() !void {
var gpa = std.heap.DebugAllocator(.{}).init;
var arena = std.heap.ArenaAllocator.init(gpa.allocator());
defer arena.deinit();
const args = try process.argsAlloc(arena.allocator());
try run(gpa.allocator(), arena.allocator(), args[1..]);
}
// zig fmt: off
/// Everything below is a direct copy of fmt.zig from the zig compiler
const usage_fmt =
\\Usage: zig fmt [file]...
\\
\\ Formats the input files and modifies them in-place.
\\ Arguments can be files or directories, which are searched
\\ recursively.
\\
\\Options:
\\ -h, --help Print this help and exit
\\ --color [auto|off|on] Enable or disable colored error messages
\\ --stdin Format code from stdin; output to stdout
\\ --check List non-conforming files and exit with an error
\\ if the list is non-empty
\\ --ast-check Run zig ast-check on every file
\\ --exclude [file] Exclude file or directory from formatting
\\ --zon Treat all input files as ZON, regardless of file extension
\\
\\
;
const Fmt = struct {
seen: SeenMap,
any_error: bool,
check_ast: bool,
force_zon: bool,
color: Color,
gpa: Allocator,
arena: Allocator,
out_buffer: std.ArrayList(u8),
const SeenMap = std.AutoHashMap(fs.File.INode, void);
};
pub fn run(
gpa: Allocator,
arena: Allocator,
args: []const []const u8,
) !void {
var color: Color = .auto;
var stdin_flag = false;
var check_flag = false;
var check_ast_flag = false;
var force_zon = false;
var input_files = std.ArrayList([]const u8).init(gpa);
defer input_files.deinit();
var excluded_files = std.ArrayList([]const u8).init(gpa);
defer excluded_files.deinit();
{
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
const stdout = std.io.getStdOut().writer();
try stdout.writeAll(usage_fmt);
return process.cleanExit();
} else if (mem.eql(u8, arg, "--color")) {
if (i + 1 >= args.len) {
fatal("expected [auto|on|off] after --color", .{});
}
i += 1;
const next_arg = args[i];
color = std.meta.stringToEnum(Color, next_arg) orelse {
fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg});
};
} else if (mem.eql(u8, arg, "--stdin")) {
stdin_flag = true;
} else if (mem.eql(u8, arg, "--check")) {
check_flag = true;
} else if (mem.eql(u8, arg, "--ast-check")) {
check_ast_flag = true;
} else if (mem.eql(u8, arg, "--exclude")) {
if (i + 1 >= args.len) {
fatal("expected parameter after --exclude", .{});
}
i += 1;
const next_arg = args[i];
try excluded_files.append(next_arg);
} else if (mem.eql(u8, arg, "--zon")) {
force_zon = true;
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
} else {
try input_files.append(arg);
}
}
}
if (stdin_flag) {
if (input_files.items.len != 0) {
fatal("cannot use --stdin with positional arguments", .{});
}
const stdin = std.io.getStdIn();
const source_code = std.zig.readSourceFileToEndAlloc(gpa, stdin, null) catch |err| {
fatal("unable to read stdin: {}", .{err});
};
defer gpa.free(source_code);
var tree = std.zig.Ast.parse(gpa, source_code, if (force_zon) .zon else .zig) catch |err| {
fatal("error parsing stdin: {}", .{err});
};
defer tree.deinit(gpa);
if (check_ast_flag) {
if (!force_zon) {
var zir = try std.zig.AstGen.generate(gpa, tree);
defer zir.deinit(gpa);
if (zir.hasCompileErrors()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
try wip_errors.init(gpa);
defer wip_errors.deinit();
try wip_errors.addZirErrorMessages(zir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(color.renderOptions());
process.exit(2);
}
} else {
const zoir = try std.zig.ZonGen.generate(gpa, tree, .{});
defer zoir.deinit(gpa);
if (zoir.hasCompileErrors()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
try wip_errors.init(gpa);
defer wip_errors.deinit();
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(color.renderOptions());
process.exit(2);
}
}
} else if (tree.errors.len != 0) {
try std.zig.printAstErrorsToStderr(gpa, tree, "<stdin>", color);
process.exit(2);
}
const formatted = try tree.render(gpa);
defer gpa.free(formatted);
if (check_flag) {
const code: u8 = @intFromBool(mem.eql(u8, formatted, source_code));
process.exit(code);
}
return std.io.getStdOut().writeAll(formatted);
}
if (input_files.items.len == 0) {
fatal("expected at least one source file argument", .{});
}
var fmt: Fmt = .{
.gpa = gpa,
.arena = arena,
.seen = .init(gpa),
.any_error = false,
.check_ast = check_ast_flag,
.force_zon = force_zon,
.color = color,
.out_buffer = std.ArrayList(u8).init(gpa),
};
defer fmt.seen.deinit();
defer fmt.out_buffer.deinit();
// Mark any excluded files/directories as already seen,
// so that they are skipped later during actual processing
for (excluded_files.items) |file_path| {
const stat = fs.cwd().statFile(file_path) catch |err| switch (err) {
error.FileNotFound => continue,
// On Windows, statFile does not work for directories
error.IsDir => dir: {
var dir = try fs.cwd().openDir(file_path, .{});
defer dir.close();
break :dir try dir.stat();
},
else => |e| return e,
};
try fmt.seen.put(stat.inode, {});
}
for (input_files.items) |file_path| {
try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path);
}
if (fmt.any_error) {
process.exit(1);
}
}
const FmtError = error{
SystemResources,
OperationAborted,
IoPending,
BrokenPipe,
Unexpected,
WouldBlock,
Canceled,
FileClosed,
DestinationAddressRequired,
DiskQuota,
FileTooBig,
InputOutput,
NoSpaceLeft,
AccessDenied,
OutOfMemory,
RenameAcrossMountPoints,
ReadOnlyFileSystem,
LinkQuotaExceeded,
FileBusy,
EndOfStream,
Unseekable,
NotOpenForWriting,
UnsupportedEncoding,
InvalidEncoding,
ConnectionResetByPeer,
SocketNotConnected,
LockViolation,
NetNameDeleted,
InvalidArgument,
ProcessNotFound,
} || fs.File.OpenError;
fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void {
fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) {
error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path),
else => {
std.log.err("unable to format '{s}': {s}", .{ file_path, @errorName(err) });
fmt.any_error = true;
return;
},
};
}
fn fmtPathDir(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
parent_dir: fs.Dir,
parent_sub_path: []const u8,
) FmtError!void {
var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true });
defer dir.close();
const stat = try dir.stat();
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
const is_dir = entry.kind == .directory;
if (mem.startsWith(u8, entry.name, ".")) continue;
if (is_dir or entry.kind == .file and (mem.endsWith(u8, entry.name, ".zig") or mem.endsWith(u8, entry.name, ".zon"))) {
const full_path = try fs.path.join(fmt.gpa, &[_][]const u8{ file_path, entry.name });
defer fmt.gpa.free(full_path);
if (is_dir) {
try fmtPathDir(fmt, full_path, check_mode, dir, entry.name);
} else {
fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| {
std.log.err("unable to format '{s}': {s}", .{ full_path, @errorName(err) });
fmt.any_error = true;
return;
};
}
}
}
}
fn fmtPathFile(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
dir: fs.Dir,
sub_path: []const u8,
) FmtError!void {
const source_file = try dir.openFile(sub_path, .{});
var file_closed = false;
errdefer if (!file_closed) source_file.close();
const stat = try source_file.stat();
if (stat.kind == .directory)
return error.IsDir;
const gpa = fmt.gpa;
const source_code = try std.zig.readSourceFileToEndAlloc(
gpa,
source_file,
std.math.cast(usize, stat.size) orelse return error.FileTooBig,
);
defer gpa.free(source_code);
source_file.close();
file_closed = true;
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
const mode: std.zig.Ast.Mode = mode: {
if (fmt.force_zon) break :mode .zon;
if (mem.endsWith(u8, sub_path, ".zon")) break :mode .zon;
break :mode .zig;
};
var tree = try std.zig.Ast.parse(gpa, source_code, mode);
defer tree.deinit(gpa);
if (tree.errors.len != 0) {
try std.zig.printAstErrorsToStderr(gpa, tree, file_path, fmt.color);
fmt.any_error = true;
return;
}
if (fmt.check_ast) {
if (stat.size > std.zig.max_src_size)
return error.FileTooBig;
switch (mode) {
.zig => {
var zir = try std.zig.AstGen.generate(gpa, tree);
defer zir.deinit(gpa);
if (zir.hasCompileErrors()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
try wip_errors.init(gpa);
defer wip_errors.deinit();
try wip_errors.addZirErrorMessages(zir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(fmt.color.renderOptions());
fmt.any_error = true;
}
},
.zon => {
var zoir = try std.zig.ZonGen.generate(gpa, tree, .{});
defer zoir.deinit(gpa);
if (zoir.hasCompileErrors()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
try wip_errors.init(gpa);
defer wip_errors.deinit();
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(fmt.color.renderOptions());
fmt.any_error = true;
}
},
}
}
// As a heuristic, we make enough capacity for the same as the input source.
fmt.out_buffer.shrinkRetainingCapacity(0);
try fmt.out_buffer.ensureTotalCapacity(source_code.len);
try tree.renderToArrayList(&fmt.out_buffer, .{});
if (mem.eql(u8, fmt.out_buffer.items, source_code))
return;
if (check_mode) {
const stdout = std.io.getStdOut().writer();
try stdout.print("{s}\n", .{file_path});
fmt.any_error = true;
} else {
var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode });
defer af.deinit();
try af.file.writeAll(fmt.out_buffer.items);
try af.finish();
const stdout = std.io.getStdOut().writer();
try stdout.print("{s}\n", .{file_path});
}
}
const std = @import("std");
const mem = std.mem;
const fs = std.fs;
const process = std.process;
const Allocator = std.mem.Allocator;
const Color = std.zig.Color;
const fatal = std.process.fatal;

View File

@ -129,7 +129,7 @@ pub const std_options: std.Options = .{ // MARK: std_options
types = types ++ &[_]type{i64}; types = types ++ &[_]type{i64};
} else if(@TypeOf(args[i_1]) == comptime_float) { } else if(@TypeOf(args[i_1]) == comptime_float) {
types = types ++ &[_]type{f64}; types = types ++ &[_]type{f64};
} else if(TI == .pointer and TI.pointer.size == .Slice and TI.pointer.child == u8) { } else if(TI == .pointer and TI.pointer.size == .slice and TI.pointer.child == u8) {
types = types ++ &[_]type{[]const u8}; types = types ++ &[_]type{[]const u8};
} else if(TI == .int and TI.int.bits <= 64) { } else if(TI == .int and TI.int.bits <= 64) {
if(TI.int.signedness == .signed) { if(TI.int.signedness == .signed) {

View File

@ -104,7 +104,7 @@ pub const ChannelChunk = struct {
} }
fn propagateDirect(self: *ChannelChunk, lightQueue: *main.utils.CircularBufferQueue(Entry), lightRefreshList: *main.List(*chunk_meshing.ChunkMesh)) void { fn propagateDirect(self: *ChannelChunk, lightQueue: *main.utils.CircularBufferQueue(Entry), lightRefreshList: *main.List(*chunk_meshing.ChunkMesh)) void {
var neighborLists: [6]main.ListUnmanaged(Entry) = .{.{}} ** 6; var neighborLists: [6]main.ListUnmanaged(Entry) = @splat(.{});
defer { defer {
for(&neighborLists) |*list| { for(&neighborLists) |*list| {
list.deinit(main.stackAllocator); list.deinit(main.stackAllocator);
@ -166,7 +166,7 @@ pub const ChannelChunk = struct {
} }
fn propagateDestructive(self: *ChannelChunk, lightQueue: *main.utils.CircularBufferQueue(Entry), constructiveEntries: *main.ListUnmanaged(ChunkEntries), isFirstBlock: bool, lightRefreshList: *main.List(*chunk_meshing.ChunkMesh)) main.ListUnmanaged(PositionEntry) { fn propagateDestructive(self: *ChannelChunk, lightQueue: *main.utils.CircularBufferQueue(Entry), constructiveEntries: *main.ListUnmanaged(ChunkEntries), isFirstBlock: bool, lightRefreshList: *main.List(*chunk_meshing.ChunkMesh)) main.ListUnmanaged(PositionEntry) {
var neighborLists: [6]main.ListUnmanaged(Entry) = .{.{}} ** 6; var neighborLists: [6]main.ListUnmanaged(Entry) = @splat(.{});
var constructiveList: main.ListUnmanaged(PositionEntry) = .{}; var constructiveList: main.ListUnmanaged(PositionEntry) = .{};
defer { defer {
for(&neighborLists) |*list| { for(&neighborLists) |*list| {

View File

@ -157,13 +157,13 @@ fn hashGeneric(input: anytype) u64 {
}, },
.optional => if(input) |_input| hashGeneric(_input) else 0, .optional => if(input) |_input| hashGeneric(_input) else 0,
.pointer => switch(@typeInfo(T).pointer.size) { .pointer => switch(@typeInfo(T).pointer.size) {
.One => blk: { .one => blk: {
if(@typeInfo(@typeInfo(T).pointer.child) == .@"fn") break :blk 0; if(@typeInfo(@typeInfo(T).pointer.child) == .@"fn") break :blk 0;
if(@typeInfo(T).pointer.child == Biome) return hashGeneric(input.id); if(@typeInfo(T).pointer.child == Biome) return hashGeneric(input.id);
if(@typeInfo(T).pointer.child == anyopaque) break :blk 0; if(@typeInfo(T).pointer.child == anyopaque) break :blk 0;
break :blk hashGeneric(input.*); break :blk hashGeneric(input.*);
}, },
.Slice => blk: { .slice => blk: {
var result: u64 = 0; var result: u64 = 0;
for(input) |val| { for(input) |val| {
result = result*%33 +% hashGeneric(val); result = result*%33 +% hashGeneric(val);

View File

@ -80,7 +80,7 @@ pub fn init() void {
} }
@field(@This(), decl.name) = zon.get(declType, decl.name, @field(@This(), decl.name)); @field(@This(), decl.name) = zon.get(declType, decl.name, @field(@This(), decl.name));
if(@typeInfo(declType) == .pointer) { if(@typeInfo(declType) == .pointer) {
if(@typeInfo(declType).pointer.size == .Slice) { if(@typeInfo(declType).pointer.size == .slice) {
@field(@This(), decl.name) = main.globalAllocator.dupe(@typeInfo(declType).pointer.child, @field(@This(), decl.name)); @field(@This(), decl.name) = main.globalAllocator.dupe(@typeInfo(declType).pointer.child, @field(@This(), decl.name));
} else { } else {
@compileError("Not implemented yet."); @compileError("Not implemented yet.");
@ -111,7 +111,7 @@ pub fn deinit() void {
@compileError("Not implemented yet."); @compileError("Not implemented yet.");
} }
if(@typeInfo(declType) == .pointer) { if(@typeInfo(declType) == .pointer) {
if(@typeInfo(declType).pointer.size == .Slice) { if(@typeInfo(declType).pointer.size == .slice) {
main.globalAllocator.free(@field(@This(), decl.name)); main.globalAllocator.free(@field(@This(), decl.name));
} else { } else {
@compileError("Not implemented yet."); @compileError("Not implemented yet.");

View File

@ -453,6 +453,7 @@ pub const StackAllocator = struct { // MARK: StackAllocator
.vtable = &.{ .vtable = &.{
.alloc = &alloc, .alloc = &alloc,
.resize = &resize, .resize = &resize,
.remap = &remap,
.free = &free, .free = &free,
}, },
.ptr = self, .ptr = self,
@ -484,42 +485,22 @@ pub const StackAllocator = struct { // MARK: StackAllocator
return @ptrCast(@alignCast(self.buffer[trailerStart..].ptr)); return @ptrCast(@alignCast(self.buffer[trailerStart..].ptr));
} }
/// Attempt to allocate exactly `len` bytes aligned to `1 << ptr_align`. fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 {
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
const self: *StackAllocator = @ptrCast(@alignCast(ctx)); const self: *StackAllocator = @ptrCast(@alignCast(ctx));
const start = std.mem.alignForward(usize, self.index, @as(usize, 1) << @intCast(ptr_align)); const start = std.mem.alignForward(usize, self.index, @as(usize, 1) << @intCast(@intFromEnum(alignment)));
const end = getTrueAllocationEnd(start, len); const end = getTrueAllocationEnd(start, len);
if(end >= self.buffer.len) return self.backingAllocator.rawAlloc(len, ptr_align, ret_addr); if(end >= self.buffer.len) return self.backingAllocator.rawAlloc(len, alignment, ret_addr);
const trailer = self.getTrailerBefore(end); const trailer = self.getTrailerBefore(end);
trailer.* = .{.wasFreed = false, .previousAllocationTrailer = @intCast(self.index)}; trailer.* = .{.wasFreed = false, .previousAllocationTrailer = @intCast(self.index)};
self.index = end; self.index = end;
return self.buffer.ptr + start; return self.buffer.ptr + start;
} }
/// Attempt to expand or shrink memory in place. `buf.len` must equal the fn resize(ctx: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool {
/// length requested from the most recent successful call to `alloc` or
/// `resize`. `buf_align` must equal the same value that was passed as the
/// `ptr_align` parameter to the original `alloc` call.
///
/// A result of `true` indicates the resize was successful and the
/// allocation now has the same address but a size of `new_len`. `false`
/// indicates the resize could not be completed without moving the
/// allocation to a different address.
///
/// `new_len` must be greater than zero.
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool {
const self: *StackAllocator = @ptrCast(@alignCast(ctx)); const self: *StackAllocator = @ptrCast(@alignCast(ctx));
if(self.isInsideBuffer(buf)) { if(self.isInsideBuffer(memory)) {
const start = self.indexInBuffer(buf); const start = self.indexInBuffer(memory);
const end = getTrueAllocationEnd(start, buf.len); const end = getTrueAllocationEnd(start, memory.len);
if(end != self.index) return false; if(end != self.index) return false;
const newEnd = getTrueAllocationEnd(start, new_len); const newEnd = getTrueAllocationEnd(start, new_len);
if(newEnd >= self.buffer.len) return false; if(newEnd >= self.buffer.len) return false;
@ -532,26 +513,20 @@ pub const StackAllocator = struct { // MARK: StackAllocator
self.index = newEnd; self.index = newEnd;
return true; return true;
} else { } else {
return self.backingAllocator.rawResize(buf, buf_align, new_len, ret_addr); return self.backingAllocator.rawResize(memory, alignment, new_len, ret_addr);
} }
} }
/// Free and invalidate a buffer. fn remap(ctx: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
/// if(resize(ctx, memory, alignment, new_len, ret_addr)) return memory.ptr;
/// `buf.len` must equal the most recent length returned by `alloc` or return null;
/// given to a successful `resize` call. }
///
/// `buf_align` must equal the same value that was passed as the fn free(ctx: *anyopaque, memory: []u8, alignment: std.mem.Alignment, ret_addr: usize) void {
/// `ptr_align` parameter to the original `alloc` call.
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void {
const self: *StackAllocator = @ptrCast(@alignCast(ctx)); const self: *StackAllocator = @ptrCast(@alignCast(ctx));
if(self.isInsideBuffer(buf)) { if(self.isInsideBuffer(memory)) {
const start = self.indexInBuffer(buf); const start = self.indexInBuffer(memory);
const end = getTrueAllocationEnd(start, buf.len); const end = getTrueAllocationEnd(start, memory.len);
const trailer = self.getTrailerBefore(end); const trailer = self.getTrailerBefore(end);
std.debug.assert(!trailer.wasFreed); // Double Free std.debug.assert(!trailer.wasFreed); // Double Free
@ -569,7 +544,7 @@ pub const StackAllocator = struct { // MARK: StackAllocator
trailer.wasFreed = true; trailer.wasFreed = true;
} }
} else { } else {
self.backingAllocator.rawFree(buf, buf_align, ret_addr); self.backingAllocator.rawFree(memory, alignment, ret_addr);
} }
} }
}; };
@ -590,6 +565,7 @@ pub const ErrorHandlingAllocator = struct { // MARK: ErrorHandlingAllocator
.vtable = &.{ .vtable = &.{
.alloc = &alloc, .alloc = &alloc,
.resize = &resize, .resize = &resize,
.remap = &remap,
.free = &free, .free = &free,
}, },
.ptr = self, .ptr = self,
@ -602,20 +578,23 @@ pub const ErrorHandlingAllocator = struct { // MARK: ErrorHandlingAllocator
@panic("Out Of Memory. Please download more RAM, reduce the render distance, or close some of your 100 browser tabs."); @panic("Out Of Memory. Please download more RAM, reduce the render distance, or close some of your 100 browser tabs.");
} }
/// Attempt to allocate exactly `len` bytes aligned to `1 << ptr_align`. /// Return a pointer to `len` bytes with specified `alignment`, or return
/// `null` indicating the allocation failed.
/// ///
/// `ret_addr` is optionally provided as the first return address of the /// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address /// allocation call stack. If the value is `0` it means no return address
/// has been provided. /// has been provided.
fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 {
const self: *ErrorHandlingAllocator = @ptrCast(@alignCast(ctx)); const self: *ErrorHandlingAllocator = @ptrCast(@alignCast(ctx));
return self.backingAllocator.rawAlloc(len, ptr_align, ret_addr) orelse handleError(); return self.backingAllocator.rawAlloc(len, alignment, ret_addr) orelse handleError();
} }
/// Attempt to expand or shrink memory in place. `buf.len` must equal the /// Attempt to expand or shrink memory in place.
/// length requested from the most recent successful call to `alloc` or ///
/// `resize`. `buf_align` must equal the same value that was passed as the /// `memory.len` must equal the length requested from the most recent
/// `ptr_align` parameter to the original `alloc` call. /// successful call to `alloc`, `resize`, or `remap`. `alignment` must
/// equal the same value that was passed as the `alignment` parameter to
/// the original `alloc` call.
/// ///
/// A result of `true` indicates the resize was successful and the /// A result of `true` indicates the resize was successful and the
/// allocation now has the same address but a size of `new_len`. `false` /// allocation now has the same address but a size of `new_len`. `false`
@ -627,25 +606,48 @@ pub const ErrorHandlingAllocator = struct { // MARK: ErrorHandlingAllocator
/// `ret_addr` is optionally provided as the first return address of the /// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address /// allocation call stack. If the value is `0` it means no return address
/// has been provided. /// has been provided.
fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool { fn resize(ctx: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool {
const self: *ErrorHandlingAllocator = @ptrCast(@alignCast(ctx)); const self: *ErrorHandlingAllocator = @ptrCast(@alignCast(ctx));
return self.backingAllocator.rawResize(buf, buf_align, new_len, ret_addr); return self.backingAllocator.rawResize(memory, alignment, new_len, ret_addr);
} }
/// Free and invalidate a buffer. /// Attempt to expand or shrink memory, allowing relocation.
/// ///
/// `buf.len` must equal the most recent length returned by `alloc` or /// `memory.len` must equal the length requested from the most recent
/// given to a successful `resize` call. /// successful call to `alloc`, `resize`, or `remap`. `alignment` must
/// equal the same value that was passed as the `alignment` parameter to
/// the original `alloc` call.
/// ///
/// `buf_align` must equal the same value that was passed as the /// A non-`null` return value indicates the resize was successful. The
/// `ptr_align` parameter to the original `alloc` call. /// allocation may have same address, or may have been relocated. In either
/// case, the allocation now has size of `new_len`. A `null` return value
/// indicates that the resize would be equivalent to allocating new memory,
/// copying the bytes from the old memory, and then freeing the old memory.
/// In such case, it is more efficient for the caller to perform the copy.
///
/// `new_len` must be greater than zero.
/// ///
/// `ret_addr` is optionally provided as the first return address of the /// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address /// allocation call stack. If the value is `0` it means no return address
/// has been provided. /// has been provided.
fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void { fn remap(ctx: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
const self: *ErrorHandlingAllocator = @ptrCast(@alignCast(ctx)); const self: *ErrorHandlingAllocator = @ptrCast(@alignCast(ctx));
self.backingAllocator.rawFree(buf, buf_align, ret_addr); return self.backingAllocator.rawRemap(memory, alignment, new_len, ret_addr);
}
/// Free and invalidate a region of memory.
///
/// `memory.len` must equal the length requested from the most recent
/// successful call to `alloc`, `resize`, or `remap`. `alignment` must
/// equal the same value that was passed as the `alignment` parameter to
/// the original `alloc` call.
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
fn free(ctx: *anyopaque, memory: []u8, alignment: std.mem.Alignment, ret_addr: usize) void {
const self: *ErrorHandlingAllocator = @ptrCast(@alignCast(ctx));
self.backingAllocator.rawFree(memory, alignment, ret_addr);
} }
}; };
@ -654,22 +656,31 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator
allocator: Allocator, allocator: Allocator,
IAssertThatTheProvidedAllocatorCantFail: void, IAssertThatTheProvidedAllocatorCantFail: void,
const Alignment = std.mem.Alignment;
const math = std.math;
/// This function is not intended to be called except from within the /// This function is not intended to be called except from within the
/// implementation of an Allocator /// implementation of an `Allocator`.
pub inline fn rawAlloc(self: NeverFailingAllocator, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { pub inline fn rawAlloc(a: NeverFailingAllocator, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 {
return self.allocator.vtable.alloc(self.allocator.ptr, len, ptr_align, ret_addr); return a.allocator.vtable.alloc(a.allocator.ptr, len, alignment, ret_addr);
} }
/// This function is not intended to be called except from within the /// This function is not intended to be called except from within the
/// implementation of an Allocator /// implementation of an `Allocator`.
pub inline fn rawResize(self: NeverFailingAllocator, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { pub inline fn rawResize(a: NeverFailingAllocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
return self.allocator.vtable.resize(self.allocator.ptr, buf, log2_buf_align, new_len, ret_addr); return a.allocator.vtable.resize(a.allocator.ptr, memory, alignment, new_len, ret_addr);
} }
/// This function is not intended to be called except from within the /// This function is not intended to be called except from within the
/// implementation of an Allocator /// implementation of an `Allocator`.
pub inline fn rawFree(self: NeverFailingAllocator, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { pub inline fn rawRemap(a: NeverFailingAllocator, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
return self.allocator.vtable.free(self.allocator.ptr, buf, log2_buf_align, ret_addr); return a.allocator.vtable.remap(a.allocator.ptr, memory, alignment, new_len, ret_addr);
}
/// This function is not intended to be called except from within the
/// implementation of an `Allocator`.
pub inline fn rawFree(a: NeverFailingAllocator, memory: []u8, alignment: Alignment, ret_addr: usize) void {
return a.allocator.vtable.free(a.allocator.ptr, memory, alignment, ret_addr);
} }
/// Returns a pointer to undefined memory. /// Returns a pointer to undefined memory.
@ -681,7 +692,7 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator
/// `ptr` should be the return value of `create`, or otherwise /// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property. /// have the same address and alignment property.
pub fn destroy(self: NeverFailingAllocator, ptr: anytype) void { pub fn destroy(self: NeverFailingAllocator, ptr: anytype) void {
return self.allocator.destroy(ptr); self.allocator.destroy(ptr);
} }
/// Allocates an array of `n` items of type `T` and sets all the /// Allocates an array of `n` items of type `T` and sets all the
@ -765,16 +776,58 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator
return self.allocator.allocAdvancedWithRetAddr(T, alignment, n, return_address) catch unreachable; return self.allocator.allocAdvancedWithRetAddr(T, alignment, n, return_address) catch unreachable;
} }
/// Requests to modify the size of an allocation. It is guaranteed to not move fn allocWithSizeAndAlignment(self: NeverFailingAllocator, comptime size: usize, comptime alignment: u29, n: usize, return_address: usize) [*]align(alignment) u8 {
/// the pointer, however the allocator implementation may refuse the resize return self.allocator.allocWithSizeAndAlignment(alignment, size, alignment, n, return_address) catch unreachable;
/// request by returning `false`. }
pub fn resize(self: NeverFailingAllocator, old_mem: anytype, new_n: usize) bool {
return self.allocator.resize(old_mem, new_n); fn allocBytesWithAlignment(self: NeverFailingAllocator, comptime alignment: u29, byte_count: usize, return_address: usize) [*]align(alignment) u8 {
return self.allocator.allocBytesWithAlignment(alignment, byte_count, return_address) catch unreachable;
}
/// Request to modify the size of an allocation.
///
/// It is guaranteed to not move the pointer, however the allocator
/// implementation may refuse the resize request by returning `false`.
///
/// `allocation` may be an empty slice, in which case a new allocation is made.
///
/// `new_len` may be zero, in which case the allocation is freed.
pub fn resize(self: NeverFailingAllocator, allocation: anytype, new_len: usize) bool {
return self.allocator.resize(allocation, new_len);
}
/// Request to modify the size of an allocation, allowing relocation.
///
/// A non-`null` return value indicates the resize was successful. The
/// allocation may have same address, or may have been relocated. In either
/// case, the allocation now has size of `new_len`. A `null` return value
/// indicates that the resize would be equivalent to allocating new memory,
/// copying the bytes from the old memory, and then freeing the old memory.
/// In such case, it is more efficient for the caller to perform those
/// operations.
///
/// `allocation` may be an empty slice, in which case a new allocation is made.
///
/// `new_len` may be zero, in which case the allocation is freed.
pub fn remap(self: NeverFailingAllocator, allocation: anytype, new_len: usize) t: {
const Slice = @typeInfo(@TypeOf(allocation)).pointer;
break :t ?[]align(Slice.alignment) Slice.child;
} {
return self.allocator.remap(allocation, new_len);
} }
/// This function requests a new byte size for an existing allocation, which /// This function requests a new byte size for an existing allocation, which
/// can be larger, smaller, or the same size as the old memory allocation. /// can be larger, smaller, or the same size as the old memory allocation.
///
/// If `new_n` is 0, this is the same as `free` and it always succeeds. /// If `new_n` is 0, this is the same as `free` and it always succeeds.
///
/// `old_mem` may have length zero, which makes a new allocation.
///
/// This function only fails on out-of-memory conditions, unlike:
/// * `remap` which returns `null` when the `Allocator` implementation cannot
/// do the realloc more efficiently than the caller
/// * `resize` which returns `false` when the `Allocator` implementation cannot
/// change the size without relocating the allocation.
pub fn realloc(self: NeverFailingAllocator, old_mem: anytype, new_n: usize) t: { pub fn realloc(self: NeverFailingAllocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).pointer; const Slice = @typeInfo(@TypeOf(old_mem)).pointer;
break :t []align(Slice.alignment) Slice.child; break :t []align(Slice.alignment) Slice.child;
@ -794,8 +847,9 @@ pub const NeverFailingAllocator = struct { // MARK: NeverFailingAllocator
return self.allocator.reallocAdvanced(old_mem, new_n, return_address) catch unreachable; return self.allocator.reallocAdvanced(old_mem, new_n, return_address) catch unreachable;
} }
/// Free an array allocated with `alloc`. To free a single item, /// Free an array allocated with `alloc`.
/// see `destroy`. /// If memory has length 0, free is a no-op.
/// To free a single item, see `destroy`.
pub fn free(self: NeverFailingAllocator, memory: anytype) void { pub fn free(self: NeverFailingAllocator, memory: anytype) void {
self.allocator.free(memory); self.allocator.free(memory);
} }
@ -850,7 +904,7 @@ pub const NeverFailingArenaAllocator = struct { // MARK: NeverFailingArena
const node = self.arena.state.buffer_list.first orelse return; const node = self.arena.state.buffer_list.first orelse return;
const allocBuf = @as([*]u8, @ptrCast(node))[0..node.data]; const allocBuf = @as([*]u8, @ptrCast(node))[0..node.data];
const dataSize = std.mem.alignForward(usize, @sizeOf(std.SinglyLinkedList(usize).Node) + self.arena.state.end_index, @alignOf(std.SinglyLinkedList(usize).Node)); const dataSize = std.mem.alignForward(usize, @sizeOf(std.SinglyLinkedList(usize).Node) + self.arena.state.end_index, @alignOf(std.SinglyLinkedList(usize).Node));
if(self.arena.child_allocator.rawResize(allocBuf, std.math.log2(@alignOf(std.SinglyLinkedList(usize).Node)), dataSize, @returnAddress())) { if(self.arena.child_allocator.rawResize(allocBuf, @enumFromInt(std.math.log2(@alignOf(std.SinglyLinkedList(usize).Node))), dataSize, @returnAddress())) {
node.data = dataSize; node.data = dataSize;
} }
} }

View File

@ -213,11 +213,11 @@ pub const ZonElement = union(enum) { // MARK: Zon
} }
}, },
.pointer => |ptr| { .pointer => |ptr| {
if(ptr.child == u8 and ptr.size == .Slice) { if(ptr.child == u8 and ptr.size == .slice) {
return .{.string = value}; return .{.string = value};
} else { } else {
const childInfo = @typeInfo(ptr.child); const childInfo = @typeInfo(ptr.child);
if(ptr.size == .One and childInfo == .array and childInfo.array.child == u8) { if(ptr.size == .one and childInfo == .array and childInfo.array.child == u8) {
return .{.string = value}; return .{.string = value};
} else { } else {
@compileError("Unknown value type."); @compileError("Unknown value type.");