Batch block updates (#1313)

* Batch block updates

* Apply review change requests

* Allow blockUpdate to carry multiple block updates in single message

* Read until there is nothing left

* Use mesh_storage.BlockUpdate

* Break instead of boolean

* Restore client side neighbor updates

* Move side check in blockUpdate out of the loop

* Update src/utils.zig

* Fix minor issues

* Reverse ownership logic + change contains into liesInChunk

* Update liesInChunk

* No name for upadeBlock param

* Apply review change requests

* Fix formatting

* Restore onBreakClient where it should be

* Update src/renderer/chunk_meshing.zig

* Update src/renderer/chunk_meshing.zig

* Converge formatting with master

* fix formatting (https://github.com/ziglang/zig-spec/issues/38 is so stupid)

---------

Co-authored-by: IntegratedQuantum <jahe788@gmail.com>
This commit is contained in:
Krzysztof Wiśniewski 2025-05-01 10:10:03 +02:00 committed by GitHub
parent e2a65d21d1
commit 64ce0ed991
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 130 additions and 80 deletions

View File

@ -1654,7 +1654,8 @@ pub const Command = struct { // MARK: Command
}) {
if(side == .server) {
// Inform the client of the actual block:
main.network.Protocols.blockUpdate.send(user.?.conn, self.pos[0], self.pos[1], self.pos[2], main.server.world.?.getBlock(self.pos[0], self.pos[1], self.pos[2]) orelse return);
const actualBlock = main.server.world.?.getBlock(self.pos[0], self.pos[1], self.pos[2]) orelse return;
main.network.Protocols.blockUpdate.send(user.?.conn, &.{.init(self.pos, actualBlock)});
}
return;
}
@ -1662,7 +1663,7 @@ pub const Command = struct { // MARK: Command
if(side == .server) {
if(main.server.world.?.cmpxchgBlock(self.pos[0], self.pos[1], self.pos[2], self.oldBlock, self.newBlock)) |actualBlock| {
// Inform the client of the actual block:
main.network.Protocols.blockUpdate.send(user.?.conn, self.pos[0], self.pos[1], self.pos[2], actualBlock);
main.network.Protocols.blockUpdate.send(user.?.conn, &.{.init(self.pos, actualBlock)});
return error.serverFailure;
}
}

View File

@ -141,6 +141,7 @@ pub fn getIndex(x: i32, y: i32, z: i32) u32 {
std.debug.assert((x & chunkMask) == x and (y & chunkMask) == y and (z & chunkMask) == z);
return (@as(u32, @intCast(x)) << chunkShift2) | (@as(u32, @intCast(y)) << chunkShift) | @as(u32, @intCast(z));
}
/// Gets the x coordinate from a given index inside this chunk.
fn extractXFromIndex(index: usize) i32 {
return @intCast(index >> chunkShift2 & chunkMask);
@ -303,6 +304,11 @@ pub const Chunk = struct { // MARK: Chunk
return self.data.getValue(index);
}
/// Checks if the given relative coordinates lie within the bounds of this chunk.
pub fn liesInChunk(self: *const Chunk, x: i32, y: i32, z: i32) bool {
return x >= 0 and x < self.width and y >= 0 and y < self.width and z >= 0 and z < self.width;
}
pub fn getLocalBlockIndex(self: *const Chunk, worldPos: Vec3i) u32 {
return getIndex(
(worldPos[0] - self.pos.wx) >> self.voxelSizeShift,
@ -376,7 +382,7 @@ pub const ServerChunk = struct { // MARK: ServerChunk
/// Checks if the given relative coordinates lie within the bounds of this chunk.
pub fn liesInChunk(self: *const ServerChunk, x: i32, y: i32, z: i32) bool {
return x >= 0 and x < self.super.width and y >= 0 and y < self.super.width and z >= 0 and z < self.super.width;
return self.super.liesInChunk(x, y, z);
}
/// This is useful to convert for loops to work for reduced resolution:

View File

@ -20,6 +20,7 @@ const Vec3d = vec.Vec3d;
const Vec3f = vec.Vec3f;
const Vec3i = vec.Vec3i;
const NeverFailingAllocator = main.heap.NeverFailingAllocator;
const BlockUpdate = renderer.mesh_storage.BlockUpdate;
//TODO: Might want to use SSL or something similar to encode the message
@ -943,23 +944,28 @@ pub const Protocols = struct {
pub const id: u8 = 7;
pub const asynchronous = false;
fn receive(conn: *Connection, reader: *utils.BinaryReader) !void {
const x = try reader.readInt(i32);
const y = try reader.readInt(i32);
const z = try reader.readInt(i32);
const newBlock = Block.fromInt(try reader.readInt(u32));
if(conn.isServerSide()) {
return error.InvalidPacket;
} else {
renderer.mesh_storage.updateBlock(x, y, z, newBlock);
}
while(reader.remaining.len != 0) {
renderer.mesh_storage.updateBlock(.{
.x = try reader.readInt(i32),
.y = try reader.readInt(i32),
.z = try reader.readInt(i32),
.newBlock = Block.fromInt(try reader.readInt(u32)),
});
}
}
pub fn send(conn: *Connection, x: i32, y: i32, z: i32, newBlock: Block) void {
pub fn send(conn: *Connection, updates: []const BlockUpdate) void {
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 16);
defer writer.deinit();
writer.writeInt(i32, x);
writer.writeInt(i32, y);
writer.writeInt(i32, z);
writer.writeInt(u32, newBlock.toInt());
for(updates) |update| {
writer.writeInt(i32, update.x);
writer.writeInt(i32, update.y);
writer.writeInt(i32, update.z);
writer.writeInt(u32, update.newBlock.toInt());
}
conn.send(.fast, id, writer.data.items);
}
};

View File

@ -1111,7 +1111,7 @@ pub const MeshSelection = struct { // MARK: MeshSelection
.newBlock = newBlock,
},
});
mesh_storage.updateBlock(x, y, z, newBlock);
mesh_storage.updateBlock(.{.x = x, .y = y, .z = z, .newBlock = newBlock});
}
pub fn drawCube(projectionMatrix: Mat4f, viewMatrix: Mat4f, relativePositionToPlayer: Vec3d, min: Vec3f, max: Vec3f) void {

View File

@ -1172,9 +1172,7 @@ pub const ChunkMesh = struct { // MARK: ChunkMesh
}
}
pub fn updateBlock(self: *ChunkMesh, _x: i32, _y: i32, _z: i32, _newBlock: Block) void {
var lightRefreshList = main.List(*ChunkMesh).init(main.stackAllocator);
defer lightRefreshList.deinit();
pub fn updateBlock(self: *ChunkMesh, _x: i32, _y: i32, _z: i32, _newBlock: Block, lightRefreshList: *main.List(*ChunkMesh), regenerateMeshList: *main.List(*ChunkMesh)) void {
const x: u5 = @intCast(_x & chunk.chunkMask);
const y: u5 = @intCast(_y & chunk.chunkMask);
const z: u5 = @intCast(_z & chunk.chunkMask);
@ -1194,36 +1192,41 @@ pub const ChunkMesh = struct { // MARK: ChunkMesh
var neighborBlocks: [6]Block = undefined;
@memset(&neighborBlocks, .{.typ = 0, .data = 0});
for(chunk.Neighbor.iterable) |neighbor| {
const nx = x + neighbor.relX();
const ny = y + neighbor.relY();
const nz = z + neighbor.relZ();
if(nx & chunk.chunkMask != nx or ny & chunk.chunkMask != ny or nz & chunk.chunkMask != nz) {
const nnx: u5 = @intCast(nx & chunk.chunkMask);
const nny: u5 = @intCast(ny & chunk.chunkMask);
const nnz: u5 = @intCast(nz & chunk.chunkMask);
const neighborChunkMesh = mesh_storage.getNeighborAndIncreaseRefCount(self.pos, self.pos.voxelSize, neighbor) orelse continue;
defer neighborChunkMesh.decreaseRefCount();
const index = chunk.getIndex(nx & chunk.chunkMask, ny & chunk.chunkMask, nz & chunk.chunkMask);
const index = chunk.getIndex(nnx, nny, nnz);
neighborChunkMesh.mutex.lock();
var neighborBlock = neighborChunkMesh.chunk.data.getValue(index);
if(neighborBlock.mode().dependsOnNeighbors) {
if(neighborBlock.mode().updateData(&neighborBlock, neighbor.reverse(), newBlock)) {
if(neighborBlock.mode().dependsOnNeighbors and neighborBlock.mode().updateData(&neighborBlock, neighbor.reverse(), newBlock)) {
neighborChunkMesh.chunk.data.setValue(index, neighborBlock);
neighborChunkMesh.mutex.unlock();
neighborChunkMesh.updateBlockLight(@intCast(nx & chunk.chunkMask), @intCast(ny & chunk.chunkMask), @intCast(nz & chunk.chunkMask), neighborBlock, &lightRefreshList);
neighborChunkMesh.generateMesh(&lightRefreshList);
neighborChunkMesh.updateBlockLight(nnx, nny, nnz, neighborBlock, lightRefreshList);
appendIfNotContained(regenerateMeshList, neighborChunkMesh);
neighborChunkMesh.mutex.lock();
}
}
neighborChunkMesh.mutex.unlock();
neighborBlocks[neighbor.toInt()] = neighborBlock;
} else {
const index = chunk.getIndex(nx, ny, nz);
self.mutex.lock();
var neighborBlock = self.chunk.data.getValue(index);
if(neighborBlock.mode().dependsOnNeighbors) {
if(neighborBlock.mode().updateData(&neighborBlock, neighbor.reverse(), newBlock)) {
if(neighborBlock.mode().dependsOnNeighbors and neighborBlock.mode().updateData(&neighborBlock, neighbor.reverse(), newBlock)) {
self.chunk.data.setValue(index, neighborBlock);
self.updateBlockLight(@intCast(nx & chunk.chunkMask), @intCast(ny & chunk.chunkMask), @intCast(nz & chunk.chunkMask), neighborBlock, &lightRefreshList);
}
self.updateBlockLight(@intCast(nx), @intCast(ny), @intCast(nz), neighborBlock, lightRefreshList);
}
self.mutex.unlock();
neighborBlocks[neighbor.toInt()] = neighborBlock;
@ -1242,9 +1245,9 @@ pub const ChunkMesh = struct { // MARK: ChunkMesh
class.onPlaceClient(.{_x, _y, _z}, self.chunk);
}
self.updateBlockLight(x, y, z, newBlock, &lightRefreshList);
self.updateBlockLight(x, y, z, newBlock, lightRefreshList);
self.mutex.lock();
defer self.mutex.unlock();
// Update neighbor chunks:
if(x == 0) {
self.lastNeighborsHigherLod[chunk.Neighbor.dirNegX.toInt()] = null;
@ -1268,16 +1271,18 @@ pub const ChunkMesh = struct { // MARK: ChunkMesh
self.lastNeighborsSameLod[chunk.Neighbor.dirUp.toInt()] = null;
}
self.mutex.unlock();
self.generateMesh(&lightRefreshList); // TODO: Batch mesh updates instead of applying them for each block changes.
self.mutex.lock();
for(lightRefreshList.items) |other| {
if(other.needsLightRefresh.load(.unordered)) {
other.scheduleLightRefreshAndDecreaseRefCount1();
} else {
other.decreaseRefCount();
appendIfNotContained(regenerateMeshList, self);
}
fn appendIfNotContained(list: *main.List(*ChunkMesh), mesh: *ChunkMesh) void {
for(list.items) |other| {
if(other == mesh) {
return;
}
}
self.uploadData();
mesh.increaseRefCount();
list.append(mesh);
}
fn clearNeighborA(self: *ChunkMesh, neighbor: chunk.Neighbor, comptime isLod: bool) void {

View File

@ -141,16 +141,7 @@ pub const ChannelChunk = struct {
}
self.data.optimizeLayout();
self.lock.unlockWrite();
if(mesh_storage.getMeshAndIncreaseRefCount(self.ch.pos)) |mesh| outer: {
for(lightRefreshList.items) |other| {
if(mesh == other) {
mesh.decreaseRefCount();
break :outer;
}
}
mesh.needsLightRefresh.store(true, .release);
lightRefreshList.append(mesh);
}
self.addSelfToLightRefreshList(lightRefreshList);
for(chunk.Neighbor.iterable) |neighbor| {
if(neighborLists[neighbor.toInt()].items.len == 0) continue;
@ -160,6 +151,19 @@ pub const ChannelChunk = struct {
}
}
fn addSelfToLightRefreshList(self: *ChannelChunk, lightRefreshList: *main.List(*chunk_meshing.ChunkMesh)) void {
if(mesh_storage.getMeshAndIncreaseRefCount(self.ch.pos)) |mesh| {
for(lightRefreshList.items) |other| {
if(mesh == other) {
mesh.decreaseRefCount();
return;
}
}
mesh.needsLightRefresh.store(true, .release);
lightRefreshList.append(mesh);
}
}
fn propagateDestructive(self: *ChannelChunk, lightQueue: *main.utils.CircularBufferQueue(Entry), constructiveEntries: *main.ListUnmanaged(ChunkEntries), isFirstBlock: bool, lightRefreshList: *main.List(*chunk_meshing.ChunkMesh)) main.ListUnmanaged(PositionEntry) {
var neighborLists: [6]main.ListUnmanaged(Entry) = @splat(.{});
var constructiveList: main.ListUnmanaged(PositionEntry) = .{};
@ -230,16 +234,7 @@ pub const ChannelChunk = struct {
}
}
self.lock.unlockWrite();
if(mesh_storage.getMeshAndIncreaseRefCount(self.ch.pos)) |mesh| outer: {
for(lightRefreshList.items) |other| {
if(mesh == other) {
mesh.decreaseRefCount();
break :outer;
}
}
mesh.needsLightRefresh.store(true, .release);
lightRefreshList.append(mesh);
}
self.addSelfToLightRefreshList(lightRefreshList);
for(chunk.Neighbor.iterable) |neighbor| {
if(neighborLists[neighbor.toInt()].items.len == 0) continue;

View File

@ -19,6 +19,7 @@ const Mat4f = vec.Mat4f;
const EventStatus = main.entity_data.EventStatus;
const chunk_meshing = @import("chunk_meshing.zig");
const ChunkMesh = chunk_meshing.ChunkMesh;
const ChunkMeshNode = struct {
mesh: ?*chunk_meshing.ChunkMesh = null,
@ -44,12 +45,18 @@ var lastPy: i32 = 0;
var lastPz: i32 = 0;
var lastRD: u16 = 0;
var mutex: std.Thread.Mutex = .{};
const BlockUpdate = struct {
pub const BlockUpdate = struct {
x: i32,
y: i32,
z: i32,
newBlock: blocks.Block,
pub fn init(pos: Vec3i, block: blocks.Block) BlockUpdate {
return .{.x = pos[0], .y = pos[1], .z = pos[2], .newBlock = block};
}
};
var blockUpdateList: main.utils.ConcurrentQueue(BlockUpdate) = undefined;
var meshMemoryPool: main.heap.MemoryPool(chunk_meshing.ChunkMesh) = undefined;
@ -758,15 +765,8 @@ pub noinline fn updateAndGetRenderChunks(conn: *network.Connection, frustum: *co
return meshList.items;
}
pub fn updateMeshes(targetTime: i64) void { // MARK: updateMeshes()
// First of all process all the block updates:
while(blockUpdateList.dequeue()) |blockUpdate| {
const pos = chunk.ChunkPosition{.wx = blockUpdate.x, .wy = blockUpdate.y, .wz = blockUpdate.z, .voxelSize = 1};
if(getMeshAndIncreaseRefCount(pos)) |mesh| {
defer mesh.decreaseRefCount();
mesh.updateBlock(blockUpdate.x, blockUpdate.y, blockUpdate.z, blockUpdate.newBlock);
} // TODO: It seems like we simply ignore the block update if we don't have the mesh yet.
}
pub fn updateMeshes(targetTime: i64) void { // MARK: updateMeshes()=
if(!blockUpdateList.empty()) batchUpdateBlocks();
mutex.lock();
defer mutex.unlock();
@ -859,6 +859,39 @@ pub fn updateMeshes(targetTime: i64) void { // MARK: updateMeshes()
}
}
fn batchUpdateBlocks() void {
var lightRefreshList = main.List(*ChunkMesh).init(main.stackAllocator);
defer lightRefreshList.deinit();
var regenerateMeshList = main.List(*ChunkMesh).init(main.stackAllocator);
defer regenerateMeshList.deinit();
// First of all process all the block updates:
while(blockUpdateList.dequeue()) |blockUpdate| {
const pos = chunk.ChunkPosition{.wx = blockUpdate.x, .wy = blockUpdate.y, .wz = blockUpdate.z, .voxelSize = 1};
if(getMeshAndIncreaseRefCount(pos)) |mesh| {
mesh.updateBlock(blockUpdate.x, blockUpdate.y, blockUpdate.z, blockUpdate.newBlock, &lightRefreshList, &regenerateMeshList);
mesh.decreaseRefCount();
} // TODO: It seems like we simply ignore the block update if we don't have the mesh yet.
}
for(regenerateMeshList.items) |mesh| {
mesh.generateMesh(&lightRefreshList);
}
{
for(lightRefreshList.items) |mesh| {
if(mesh.needsLightRefresh.load(.unordered)) {
mesh.scheduleLightRefreshAndDecreaseRefCount1();
} else {
mesh.decreaseRefCount();
}
}
}
for(regenerateMeshList.items) |mesh| {
mesh.uploadData();
mesh.decreaseRefCount();
}
}
// MARK: adders
pub fn addMeshToClearListAndDecreaseRefCount(mesh: *chunk_meshing.ChunkMesh) void {
@ -953,8 +986,8 @@ pub const MeshGenerationTask = struct { // MARK: MeshGenerationTask
// MARK: updaters
pub fn updateBlock(x: i32, y: i32, z: i32, newBlock: blocks.Block) void {
blockUpdateList.enqueue(.{.x = x, .y = y, .z = z, .newBlock = newBlock});
pub fn updateBlock(update: BlockUpdate) void {
blockUpdateList.enqueue(update);
}
pub fn updateChunkMesh(mesh: *chunk.Chunk) void {

View File

@ -1045,13 +1045,14 @@ pub const ServerWorld = struct { // MARK: ServerWorld
}
}
baseChunk.mutex.unlock();
var newBlock = _newBlock;
for(chunk.Neighbor.iterable) |neighbor| {
const nx = x + neighbor.relX();
const ny = y + neighbor.relY();
const nz = z + neighbor.relZ();
var ch = baseChunk;
if(nx & chunk.chunkMask != nx or ny & chunk.chunkMask != ny or nz & chunk.chunkMask != nz) {
if(!ch.liesInChunk(nx, ny, nz)) {
ch = ChunkManager.getOrGenerateChunkAndIncreaseRefCount(.{
.wx = baseChunk.super.pos.wx + nx & ~@as(i32, chunk.chunkMask),
.wy = baseChunk.super.pos.wy + ny & ~@as(i32, chunk.chunkMask),
@ -1062,25 +1063,28 @@ pub const ServerWorld = struct { // MARK: ServerWorld
defer if(ch != baseChunk) {
ch.decreaseRefCount();
};
ch.mutex.lock();
defer ch.mutex.unlock();
var neighborBlock = ch.getBlock(nx & chunk.chunkMask, ny & chunk.chunkMask, nz & chunk.chunkMask);
if(neighborBlock.mode().dependsOnNeighbors) {
if(neighborBlock.mode().updateData(&neighborBlock, neighbor.reverse(), newBlock)) {
if(neighborBlock.mode().dependsOnNeighbors and neighborBlock.mode().updateData(&neighborBlock, neighbor.reverse(), newBlock)) {
ch.updateBlockAndSetChanged(nx & chunk.chunkMask, ny & chunk.chunkMask, nz & chunk.chunkMask, neighborBlock);
}
}
if(newBlock.mode().dependsOnNeighbors) {
_ = newBlock.mode().updateData(&newBlock, neighbor, neighborBlock);
}
}
baseChunk.mutex.lock();
defer baseChunk.mutex.unlock();
baseChunk.updateBlockAndSetChanged(x, y, z, newBlock);
const userList = server.getUserListAndIncreaseRefCount(main.stackAllocator);
defer server.freeUserListAndDecreaseRefCount(main.stackAllocator, userList);
for(userList) |user| {
main.network.Protocols.blockUpdate.send(user.conn, wx, wy, wz, _newBlock);
main.network.Protocols.blockUpdate.send(user.conn, &.{.{.x = wx, .y = wy, .z = wz, .newBlock = newBlock}});
}
return null;
}