Placing blocks is actually doable (no lagspikes)

Moving the mesh update at high priority into the threadpool, allows for faster and more efficient(previously meshes may have been updated multiple times in a row) updates.
fixes #223
This commit is contained in:
IntegratedQuantum 2023-12-07 19:56:04 +01:00
parent 451cac2923
commit 7c1ec82b03
3 changed files with 65 additions and 6 deletions

View File

@ -804,6 +804,7 @@ pub const meshing = struct {
culledSortingCount: u31 = 0,
lastTransparentUpdatePos: Vec3i = Vec3i{0, 0, 0},
refCount: std.atomic.Value(u32) = std.atomic.Value(u32).init(1),
needsLightRefresh: std.atomic.Value(bool) = std.atomic.Value(bool).init(false),
needsMeshUpdate: bool = false,
finishedMeshing: bool = false,
mutex: std.Thread.Mutex = .{},
@ -860,6 +861,57 @@ pub const meshing = struct {
}
}
pub fn scheduleLightRefreshAndDecreaseRefCount(self: *ChunkMesh) !void {
if(!self.needsLightRefresh.swap(true, .AcqRel)) {
try LightRefreshTask.scheduleAndDecreaseRefCount(self);
} else {
self.decreaseRefCount();
}
}
const LightRefreshTask = struct {
mesh: *ChunkMesh,
pub const vtable = main.utils.ThreadPool.VTable{
.getPriority = @ptrCast(&getPriority),
.isStillNeeded = @ptrCast(&isStillNeeded),
.run = @ptrCast(&run),
.clean = @ptrCast(&clean),
};
pub fn scheduleAndDecreaseRefCount(mesh: *ChunkMesh) !void {
const task = try main.globalAllocator.create(LightRefreshTask);
task.* = .{
.mesh = mesh,
};
try main.threadPool.addTask(task, &vtable);
}
pub fn getPriority(_: *LightRefreshTask) f32 {
return 1000000;
}
pub fn isStillNeeded(_: *LightRefreshTask) bool {
return true; // TODO: Is it worth checking for this?
}
pub fn run(self: *LightRefreshTask) Allocator.Error!void {
if(self.mesh.needsLightRefresh.swap(false, .AcqRel)) {
self.mesh.mutex.lock();
try self.mesh.finishData();
self.mesh.mutex.unlock();
try renderer.RenderStructure.addToUpdateListAndDecreaseRefCount(self.mesh);
} else {
self.mesh.decreaseRefCount();
}
main.globalAllocator.destroy(self);
}
pub fn clean(self: *LightRefreshTask) void {
self.mesh.decreaseRefCount();
main.globalAllocator.destroy(self);
}
};
pub fn isEmpty(self: *const ChunkMesh) bool {
return self.opaqueMesh.vertexCount == 0 and self.transparentMesh.vertexCount == 0;
}
@ -1067,10 +1119,12 @@ pub const meshing = struct {
}
}
if(neighborMesh != self) {
_ = neighborMesh.needsLightRefresh.swap(false, .AcqRel);
try neighborMesh.finishData();
try neighborMesh.uploadData();
}
}
_ = self.needsLightRefresh.swap(false, .AcqRel);
try self.finishData();
try self.uploadData();
}
@ -1179,6 +1233,7 @@ pub const meshing = struct {
}
}
}
_ = neighborMesh.needsLightRefresh.swap(false, .AcqRel);
try neighborMesh.finishData();
if(inRenderThread) {
try neighborMesh.uploadData();
@ -1258,6 +1313,7 @@ pub const meshing = struct {
}
self.mutex.lock();
defer self.mutex.unlock();
_ = self.needsLightRefresh.swap(false, .AcqRel);
try self.finishData();
}

View File

@ -76,10 +76,7 @@ pub const ChannelChunk = struct {
}
self.mutex.unlock();
if(main.renderer.RenderStructure.getMeshAndIncreaseRefCount(self.ch.pos)) |mesh| {
mesh.mutex.lock();
defer mesh.mutex.unlock();
try mesh.finishData();
try main.renderer.RenderStructure.addToUpdateListAndDecreaseRefCount(mesh);
try mesh.scheduleLightRefreshAndDecreaseRefCount();
}
for(0..6) |neighbor| {

View File

@ -1612,8 +1612,14 @@ pub const RenderStructure = struct {
std.debug.assert(mesh.refCount.load(.Monotonic) != 0);
mutex.lock();
defer mutex.unlock();
try priorityMeshUpdateList.append(mesh);
mesh.needsMeshUpdate = true;
if(mesh.finishedMeshing) {
try priorityMeshUpdateList.append(mesh);
mesh.needsMeshUpdate = true;
} else {
mutex.unlock();
defer mutex.lock();
mesh.decreaseRefCount();
}
}
pub fn addMeshToStorage(mesh: *chunk.meshing.ChunkMesh) void {