Refcount the chunks and fix a crash when placing blocks at a chunk border.

Fixes #338
This commit is contained in:
IntegratedQuantum 2024-05-16 20:47:53 +02:00
parent 13f27a605f
commit 48bf2a6d0a
6 changed files with 62 additions and 31 deletions

View File

@ -181,8 +181,9 @@ pub const Chunk = struct {
voxelSizeMask: i32,
widthShift: u5,
mutex: std.Thread.Mutex,
refCount: std.atomic.Value(u16),
pub fn init(pos: ChunkPosition) *Chunk {
pub fn initAndIncreaseRefCount(pos: ChunkPosition) *Chunk {
memoryPoolMutex.lock();
const self = memoryPool.create() catch unreachable;
memoryPoolMutex.unlock();
@ -196,12 +197,14 @@ pub const Chunk = struct {
.voxelSizeMask = pos.voxelSize - 1,
.widthShift = voxelSizeShift + chunkShift,
.mutex = std.Thread.Mutex{},
.refCount = std.atomic.Value(u16).init(1),
};
self.data.init();
return self;
}
pub fn deinit(self: *Chunk) void {
std.debug.assert(self.refCount.raw == 0);
if(self.wasChanged) {
self.save(main.server.world.?);
}
@ -211,11 +214,25 @@ pub const Chunk = struct {
memoryPoolMutex.unlock();
}
pub fn increaseRefCount(self: *Chunk) void {
const prevVal = self.refCount.fetchAdd(1, .monotonic);
std.debug.assert(prevVal != 0);
}
pub fn decreaseRefCount(self: *Chunk) void {
const prevVal = self.refCount.fetchSub(1, .monotonic);
std.debug.assert(prevVal != 0);
if(prevVal == 1) {
self.deinit();
}
}
fn setChanged(self: *Chunk) void {
main.utils.assertLocked(&self.mutex);
if(!self.wasChanged) {
self.wasChanged = true;
main.server.world.?.queueChunkUpdate(self);
self.increaseRefCount();
main.server.world.?.queueChunkUpdateAndDecreaseRefCount(self);
}
}
@ -384,7 +401,8 @@ pub const Chunk = struct {
pos.wy &= ~(pos.voxelSize*chunkSize);
pos.wz &= ~(pos.voxelSize*chunkSize);
pos.voxelSize *= 2;
const nextHigherLod = world.getOrGenerateChunk(pos);
const nextHigherLod = world.getOrGenerateChunkAndIncreaseRefCount(pos);
defer nextHigherLod.decreaseRefCount();
nextHigherLod.updateFromLowerResolution(self);
}
}

View File

@ -721,8 +721,8 @@ pub const Protocols = struct {
pub const chunkTransmission = struct {
pub const id: u8 = 3;
fn receive(_: *Connection, data: []const u8) !void {
const ch = try main.server.storage.ChunkCompression.decompressChunk(data);
renderer.mesh_storage.updateChunkMesh(ch);
const ch = try main.server.storage.ChunkCompression.decompressChunkAndIncreaseRefCount(data);
renderer.mesh_storage.updateChunkMeshAndDecreaseRefCount(ch);
}
fn sendChunkOverTheNetwork(conn: *Connection, ch: *chunk.Chunk) void {
ch.mutex.lock();
@ -732,10 +732,10 @@ pub const Protocols = struct {
conn.sendImportant(id, data);
}
fn sendChunkLocally(ch: *chunk.Chunk) void {
const chunkCopy = chunk.Chunk.init(ch.pos);
const chunkCopy = chunk.Chunk.initAndIncreaseRefCount(ch.pos);
chunkCopy.data.deinit();
chunkCopy.data.initCopy(&ch.data);
renderer.mesh_storage.updateChunkMesh(chunkCopy);
renderer.mesh_storage.updateChunkMeshAndDecreaseRefCount(chunkCopy);
}
pub fn sendChunk(conn: *Connection, ch: *chunk.Chunk) void {
if(conn.user.?.isLocal) {

View File

@ -515,7 +515,7 @@ pub const ChunkMesh = struct {
std.debug.assert(self.refCount.load(.monotonic) == 0);
self.opaqueMesh.deinit();
self.transparentMesh.deinit();
self.chunk.deinit();
self.chunk.decreaseRefCount();
main.globalAllocator.free(self.currentSorting);
main.globalAllocator.free(self.sortingOutputBuffer);
for(self.lightingData) |lightingChunk| {

View File

@ -1072,7 +1072,7 @@ pub const MeshGenerationTask = struct {
}
pub fn clean(self: *MeshGenerationTask) void {
self.mesh.deinit();
self.mesh.decreaseRefCount();
main.globalAllocator.destroy(self);
}
};
@ -1083,7 +1083,7 @@ pub fn updateBlock(x: i32, y: i32, z: i32, newBlock: blocks.Block) void {
blockUpdateList.append(BlockUpdate{.x=x, .y=y, .z=z, .newBlock=newBlock});
}
pub fn updateChunkMesh(mesh: *chunk.Chunk) void {
pub fn updateChunkMeshAndDecreaseRefCount(mesh: *chunk.Chunk) void {
MeshGenerationTask.schedule(mesh);
}

View File

@ -160,7 +160,8 @@ pub const RegionFile = struct {
@memcpy(self.chunks[index], ch);
if(!self.modified) {
self.modified = true;
main.server.world.?.queueRegionFileUpdate(self);
self.increaseRefCount();
main.server.world.?.queueRegionFileUpdateAndDecreaseRefCount(self);
}
}
@ -227,7 +228,7 @@ pub const ChunkCompression = struct {
return data;
}
pub fn decompressChunk(_data: []const u8) error{corrupted}!*chunk.Chunk {
pub fn decompressChunkAndIncreaseRefCount(_data: []const u8) error{corrupted}!*chunk.Chunk {
var data = _data;
if(data.len < 4) return error.corrupted;
const algo: CompressionAlgo = @enumFromInt(std.mem.readInt(u32, data[0..4], .big));
@ -248,7 +249,7 @@ pub const ChunkCompression = struct {
return error.corrupted;
}
data = _inflatedData;
const ch = chunk.Chunk.init(pos);
const ch = chunk.Chunk.initAndIncreaseRefCount(pos);
for(0..chunk.chunkVolume) |i| {
ch.data.setValue(i, main.blocks.Block.fromInt(std.mem.readInt(u32, data[0..4], .big)));
data = data[4..];

View File

@ -178,7 +178,8 @@ const ChunkManager = struct {
}
pub fn generateChunk(pos: ChunkPosition, source: ?*User) void {
const ch = getOrGenerateChunk(pos);
const ch = getOrGenerateChunkAndIncreaseRefCount(pos);
defer ch.decreaseRefCount();
if(source) |_source| {
main.network.Protocols.chunkTransmission.sendChunk(_source.conn, ch);
} else {
@ -190,7 +191,7 @@ const ChunkManager = struct {
}
}
fn chunkInitFunctionForCache(pos: ChunkPosition) *Chunk {
fn chunkInitFunctionForCacheAndIncreaseRefCount(pos: ChunkPosition) *Chunk {
const regionSize = pos.voxelSize*chunk.chunkSize*storage.RegionFile.regionSize;
const regionMask: i32 = regionSize - 1;
const region = storage.loadRegionFileAndIncreaseRefCount(pos.wx & ~regionMask, pos.wy & ~regionMask, pos.wz & ~regionMask, pos.voxelSize);
@ -202,12 +203,12 @@ const ChunkManager = struct {
@as(usize, @intCast(pos.wz -% region.pos.wz))/pos.voxelSize/chunk.chunkSize,
)) |ch| blk: { // Load chunk from file:
defer main.stackAllocator.free(ch);
return storage.ChunkCompression.decompressChunk(ch) catch {
return storage.ChunkCompression.decompressChunkAndIncreaseRefCount(ch) catch {
std.log.err("Storage for chunk {} in region file at {} is corrupted", .{pos, region.pos});
break :blk;
};
}
const ch = Chunk.init(pos);
const ch = Chunk.initAndIncreaseRefCount(pos);
ch.generated = true;
const caveMap = terrain.CaveMap.CaveMapView.init(ch);
defer caveMap.deinit();
@ -220,19 +221,22 @@ const ChunkManager = struct {
}
fn chunkDeinitFunctionForCache(ch: *Chunk) void {
ch.deinit();
ch.decreaseRefCount();
}
/// Generates a normal chunk at a given location, or if possible gets it from the cache.
pub fn getOrGenerateChunk(pos: ChunkPosition) *Chunk { // TODO: This is not thread safe! The chunk could get removed from the cache while in use. Reference counting should probably be used here.
pub fn getOrGenerateChunkAndIncreaseRefCount(pos: ChunkPosition) *Chunk { // TODO: This is not thread safe! The chunk could get removed from the cache while in use. Reference counting should probably be used here.
const mask = pos.voxelSize*chunk.chunkSize - 1;
std.debug.assert(pos.wx & mask == 0 and pos.wy & mask == 0 and pos.wz & mask == 0);
return chunkCache.findOrCreate(pos, chunkInitFunctionForCache, null);
const result = chunkCache.findOrCreate(pos, chunkInitFunctionForCacheAndIncreaseRefCount, Chunk.increaseRefCount);
return result;
}
pub fn getChunkFromCache(pos: ChunkPosition) ?*Chunk {
pub fn getChunkFromCacheAndIncreaseRefCount(pos: ChunkPosition) ?*Chunk {
const mask = pos.voxelSize*chunk.chunkSize - 1;
std.debug.assert(pos.wx & mask == 0 and pos.wy & mask == 0 and pos.wz & mask == 0);
return chunkCache.find(pos);
const result = chunkCache.find(pos, Chunk.increaseRefCount) orelse return null;
result.increaseRefCount();
return result;
}
};
@ -391,10 +395,12 @@ pub const ServerWorld = struct {
pub fn deinit(self: *ServerWorld) void {
while(self.chunkUpdateQueue.dequeue()) |updateRequest| {
updateRequest.ch.save(self);
updateRequest.ch.decreaseRefCount();
}
self.chunkUpdateQueue.deinit();
while(self.regionUpdateQueue.dequeue()) |updateRequest| {
updateRequest.region.store();
updateRequest.region.decreaseRefCount();
}
self.regionUpdateQueue.deinit();
self.chunkManager.deinit();
@ -494,10 +500,12 @@ pub const ServerWorld = struct {
const insertionTime = newTime -% main.settings.storageTime;
while(self.chunkUpdateQueue.dequeue()) |updateRequest| {
updateRequest.ch.save(self);
updateRequest.ch.decreaseRefCount();
if(updateRequest.milliTimeStamp -% insertionTime <= 0) break;
}
while(self.regionUpdateQueue.dequeue()) |updateRequest| {
updateRequest.region.store();
updateRequest.region.decreaseRefCount();
if(updateRequest.milliTimeStamp -% insertionTime <= 0) break;
}
}
@ -525,8 +533,8 @@ pub const ServerWorld = struct {
return null;
}
pub fn getOrGenerateChunk(_: *ServerWorld, pos: chunk.ChunkPosition) *Chunk {
return ChunkManager.getOrGenerateChunk(pos);
pub fn getOrGenerateChunkAndIncreaseRefCount(_: *ServerWorld, pos: chunk.ChunkPosition) *Chunk {
return ChunkManager.getOrGenerateChunkAndIncreaseRefCount(pos);
}
pub fn getBiome(_: *const ServerWorld, wx: i32, wy: i32, wz: i32) *const terrain.biomes.Biome {
@ -543,7 +551,8 @@ pub const ServerWorld = struct {
}
pub fn updateBlock(_: *ServerWorld, wx: i32, wy: i32, wz: i32, _newBlock: Block) void {
const baseChunk = ChunkManager.getOrGenerateChunk(.{.wx = wx & ~@as(i32, chunk.chunkMask), .wy = wy & ~@as(i32, chunk.chunkMask), .wz = wz & ~@as(i32, chunk.chunkMask), .voxelSize = 1});
const baseChunk = ChunkManager.getOrGenerateChunkAndIncreaseRefCount(.{.wx = wx & ~@as(i32, chunk.chunkMask), .wy = wy & ~@as(i32, chunk.chunkMask), .wz = wz & ~@as(i32, chunk.chunkMask), .voxelSize = 1});
defer baseChunk.decreaseRefCount();
const x: u5 = @intCast(wx & chunk.chunkMask);
const y: u5 = @intCast(wy & chunk.chunkMask);
const z: u5 = @intCast(wz & chunk.chunkMask);
@ -554,13 +563,16 @@ pub const ServerWorld = struct {
const nz = z + chunk.Neighbors.relZ[neighbor];
var ch = baseChunk;
if(nx & chunk.chunkMask != nx or ny & chunk.chunkMask != ny or nz & chunk.chunkMask != nz) {
ch = ChunkManager.getOrGenerateChunk(.{
.wx = baseChunk.pos.wx + nx,
.wy = baseChunk.pos.wy + ny,
.wz = baseChunk.pos.wz + nz,
ch = ChunkManager.getOrGenerateChunkAndIncreaseRefCount(.{
.wx = baseChunk.pos.wx + nx & ~@as(i32, chunk.chunkMask),
.wy = baseChunk.pos.wy + ny & ~@as(i32, chunk.chunkMask),
.wz = baseChunk.pos.wz + nz & ~@as(i32, chunk.chunkMask),
.voxelSize = 1,
});
}
defer if(ch != baseChunk) {
ch.decreaseRefCount();
};
ch.mutex.lock();
defer ch.mutex.unlock();
var neighborBlock = ch.getBlock(nx & chunk.chunkMask, ny & chunk.chunkMask, nz & chunk.chunkMask);
@ -578,13 +590,13 @@ pub const ServerWorld = struct {
baseChunk.updateBlock(x, y, z, newBlock);
}
pub fn queueChunkUpdate(self: *ServerWorld, ch: *Chunk) void {
pub fn queueChunkUpdateAndDecreaseRefCount(self: *ServerWorld, ch: *Chunk) void {
self.mutex.lock();
self.chunkUpdateQueue.enqueue(.{.ch = ch, .milliTimeStamp = std.time.milliTimestamp()});
self.mutex.unlock();
}
pub fn queueRegionFileUpdate(self: *ServerWorld, region: *storage.RegionFile) void {
pub fn queueRegionFileUpdateAndDecreaseRefCount(self: *ServerWorld, region: *storage.RegionFile) void {
self.mutex.lock();
self.regionUpdateQueue.enqueue(.{.region = region, .milliTimeStamp = std.time.milliTimestamp()});
self.mutex.unlock();