Refactor the Chunk, sperating the Server-specific functionality into a new struct.

Also fixes some data races and changes the default compression so it no longer includes the (redundant) chunk position.
This commit is contained in:
IntegratedQuantum 2024-05-17 17:28:53 +02:00
parent 0628c24b6c
commit 001416dde6
20 changed files with 370 additions and 302 deletions

View File

@ -100,13 +100,17 @@ fn extractZFromIndex(index: usize) i32 {
var memoryPool: std.heap.MemoryPoolAligned(Chunk, @alignOf(Chunk)) = undefined;
var memoryPoolMutex: std.Thread.Mutex = .{};
var serverPool: std.heap.MemoryPoolAligned(ServerChunk, @alignOf(ServerChunk)) = undefined;
var serverPoolMutex: std.Thread.Mutex = .{};
pub fn init() void {
memoryPool = std.heap.MemoryPoolAligned(Chunk, @alignOf(Chunk)).init(main.globalAllocator.allocator);
serverPool = std.heap.MemoryPoolAligned(ServerChunk, @alignOf(ServerChunk)).init(main.globalAllocator.allocator);
}
pub fn deinit() void {
memoryPool.deinit();
serverPool.deinit();
}
pub const ChunkPosition = struct {
@ -126,6 +130,8 @@ pub const ChunkPosition = struct {
return self.equals(notNull);
}
return false;
} else if(@TypeOf(other.*) == ServerChunk) {
return self.wx == other.super.pos.wx and self.wy == other.super.pos.wy and self.wz == other.super.pos.wz and self.voxelSize == other.super.pos.voxelSize;
} else if(@typeInfo(@TypeOf(other)) == .Pointer) {
return self.wx == other.pos.wx and self.wy == other.pos.wy and self.wz == other.pos.wz and self.voxelSize == other.pos.voxelSize;
} else @compileError("Unsupported");
@ -173,17 +179,12 @@ pub const Chunk = struct {
pos: ChunkPosition,
data: main.utils.PaletteCompressedRegion(Block, chunkVolume) = undefined,
wasChanged: bool = false,
generated: bool = false,
width: u31,
voxelSizeShift: u5,
voxelSizeMask: i32,
widthShift: u5,
mutex: std.Thread.Mutex,
refCount: std.atomic.Value(u16),
pub fn initAndIncreaseRefCount(pos: ChunkPosition) *Chunk {
pub fn init(pos: ChunkPosition) *Chunk {
memoryPoolMutex.lock();
const self = memoryPool.create() catch unreachable;
memoryPoolMutex.unlock();
@ -196,75 +197,18 @@ pub const Chunk = struct {
.voxelSizeShift = voxelSizeShift,
.voxelSizeMask = pos.voxelSize - 1,
.widthShift = voxelSizeShift + chunkShift,
.mutex = std.Thread.Mutex{},
.refCount = std.atomic.Value(u16).init(1),
};
self.data.init();
return self;
}
pub fn deinit(self: *Chunk) void {
std.debug.assert(self.refCount.raw == 0);
if(self.wasChanged) {
self.save(main.server.world.?);
}
self.data.deinit();
memoryPoolMutex.lock();
memoryPool.destroy(@alignCast(self));
memoryPoolMutex.unlock();
}
pub fn increaseRefCount(self: *Chunk) void {
const prevVal = self.refCount.fetchAdd(1, .monotonic);
std.debug.assert(prevVal != 0);
}
pub fn decreaseRefCount(self: *Chunk) void {
const prevVal = self.refCount.fetchSub(1, .monotonic);
std.debug.assert(prevVal != 0);
if(prevVal == 1) {
self.deinit();
}
}
fn setChanged(self: *Chunk) void {
main.utils.assertLocked(&self.mutex);
if(!self.wasChanged) {
self.wasChanged = true;
self.increaseRefCount();
main.server.world.?.queueChunkUpdateAndDecreaseRefCount(self);
}
}
/// Checks if the given relative coordinates lie within the bounds of this chunk.
pub fn liesInChunk(self: *const Chunk, x: i32, y: i32, z: i32) bool {
return x >= 0 and x < self.width
and y >= 0 and y < self.width
and z >= 0 and z < self.width;
}
/// This is useful to convert for loops to work for reduced resolution:
/// Instead of using
/// for(int x = start; x < end; x++)
/// for(int x = chunk.startIndex(start); x < end; x += chunk.getVoxelSize())
/// should be used to only activate those voxels that are used in Cubyz's downscaling technique.
pub fn startIndex(self: *const Chunk, start: i32) i32 {
return start+self.voxelSizeMask & ~self.voxelSizeMask; // Rounds up to the nearest valid voxel coordinate.
}
/// Updates a block if current value is air or the current block is degradable.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn updateBlockIfDegradable(self: *Chunk, _x: i32, _y: i32, _z: i32, newBlock: Block) void {
const x = _x >> self.voxelSizeShift;
const y = _y >> self.voxelSizeShift;
const z = _z >> self.voxelSizeShift;
const index = getIndex(x, y, z);
const oldBlock = self.data.getValue(index);
if(oldBlock.typ == 0 or oldBlock.degradable()) {
self.data.setValue(index, newBlock);
}
}
/// Updates a block if it is inside this chunk.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn updateBlock(self: *Chunk, _x: i32, _y: i32, _z: i32, newBlock: Block) void {
@ -275,27 +219,6 @@ pub const Chunk = struct {
self.data.setValue(index, newBlock);
}
/// Updates a block if it is inside this chunk.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn updateBlockAndSetChanged(self: *Chunk, _x: i32, _y: i32, _z: i32, newBlock: Block) void {
const x = _x >> self.voxelSizeShift;
const y = _y >> self.voxelSizeShift;
const z = _z >> self.voxelSizeShift;
const index = getIndex(x, y, z);
self.data.setValue(index, newBlock);
self.setChanged();
}
/// Updates a block if it is inside this chunk. Should be used in generation to prevent accidently storing these as changes.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn updateBlockInGeneration(self: *Chunk, _x: i32, _y: i32, _z: i32, newBlock: Block) void {
const x = _x >> self.voxelSizeShift;
const y = _y >> self.voxelSizeShift;
const z = _z >> self.voxelSizeShift;
const index = getIndex(x, y, z);
self.data.setValue(index, newBlock);
}
/// Gets a block if it is inside this chunk.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn getBlock(self: *const Chunk, _x: i32, _y: i32, _z: i32) Block {
@ -305,11 +228,139 @@ pub const Chunk = struct {
const index = getIndex(x, y, z);
return self.data.getValue(index);
}
};
pub fn updateFromLowerResolution(self: *Chunk, other: *Chunk) void {
const xOffset = if(other.pos.wx != self.pos.wx) chunkSize/2 else 0; // Offsets of the lower resolution chunk in this chunk.
const yOffset = if(other.pos.wy != self.pos.wy) chunkSize/2 else 0;
const zOffset = if(other.pos.wz != self.pos.wz) chunkSize/2 else 0;
pub const ServerChunk = struct {
super: Chunk,
wasChanged: bool = false,
generated: bool = false,
mutex: std.Thread.Mutex = .{},
refCount: std.atomic.Value(u16),
pub fn initAndIncreaseRefCount(pos: ChunkPosition) *ServerChunk {
serverPoolMutex.lock();
const self = serverPool.create() catch unreachable;
serverPoolMutex.unlock();
std.debug.assert((pos.voxelSize - 1 & pos.voxelSize) == 0);
std.debug.assert(@mod(pos.wx, pos.voxelSize) == 0 and @mod(pos.wy, pos.voxelSize) == 0 and @mod(pos.wz, pos.voxelSize) == 0);
const voxelSizeShift: u5 = @intCast(std.math.log2_int(u31, pos.voxelSize));
self.* = ServerChunk {
.super = .{
.pos = pos,
.width = pos.voxelSize*chunkSize,
.voxelSizeShift = voxelSizeShift,
.voxelSizeMask = pos.voxelSize - 1,
.widthShift = voxelSizeShift + chunkShift,
},
.refCount = std.atomic.Value(u16).init(1),
};
self.super.data.init();
return self;
}
pub fn deinit(self: *ServerChunk) void {
std.debug.assert(self.refCount.raw == 0);
if(self.wasChanged) {
self.save(main.server.world.?);
}
self.super.data.deinit();
serverPoolMutex.lock();
serverPool.destroy(@alignCast(self));
serverPoolMutex.unlock();
}
pub fn setChanged(self: *ServerChunk) void {
main.utils.assertLocked(&self.mutex);
if(!self.wasChanged) {
self.wasChanged = true;
self.increaseRefCount();
main.server.world.?.queueChunkUpdateAndDecreaseRefCount(self);
}
}
pub fn increaseRefCount(self: *ServerChunk) void {
const prevVal = self.refCount.fetchAdd(1, .monotonic);
std.debug.assert(prevVal != 0);
}
pub fn decreaseRefCount(self: *ServerChunk) void {
const prevVal = self.refCount.fetchSub(1, .monotonic);
std.debug.assert(prevVal != 0);
if(prevVal == 1) {
self.deinit();
}
}
/// Checks if the given relative coordinates lie within the bounds of this chunk.
pub fn liesInChunk(self: *const ServerChunk, x: i32, y: i32, z: i32) bool {
return x >= 0 and x < self.super.width
and y >= 0 and y < self.super.width
and z >= 0 and z < self.super.width;
}
/// This is useful to convert for loops to work for reduced resolution:
/// Instead of using
/// for(int x = start; x < end; x++)
/// for(int x = chunk.startIndex(start); x < end; x += chunk.getVoxelSize())
/// should be used to only activate those voxels that are used in Cubyz's downscaling technique.
pub fn startIndex(self: *const ServerChunk, start: i32) i32 {
return start+self.super.voxelSizeMask & ~self.super.voxelSizeMask; // Rounds up to the nearest valid voxel coordinate.
}
/// Gets a block if it is inside this chunk.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn getBlock(self: *const ServerChunk, _x: i32, _y: i32, _z: i32) Block {
main.utils.assertLocked(&self.mutex);
const x = _x >> self.super.voxelSizeShift;
const y = _y >> self.super.voxelSizeShift;
const z = _z >> self.super.voxelSizeShift;
const index = getIndex(x, y, z);
return self.super.data.getValue(index);
}
/// Updates a block if it is inside this chunk.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn updateBlockAndSetChanged(self: *ServerChunk, _x: i32, _y: i32, _z: i32, newBlock: Block) void {
main.utils.assertLocked(&self.mutex);
const x = _x >> self.super.voxelSizeShift;
const y = _y >> self.super.voxelSizeShift;
const z = _z >> self.super.voxelSizeShift;
const index = getIndex(x, y, z);
self.super.data.setValue(index, newBlock);
self.setChanged();
}
/// Updates a block if current value is air or the current block is degradable.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn updateBlockIfDegradable(self: *ServerChunk, _x: i32, _y: i32, _z: i32, newBlock: Block) void {
main.utils.assertLocked(&self.mutex);
const x = _x >> self.super.voxelSizeShift;
const y = _y >> self.super.voxelSizeShift;
const z = _z >> self.super.voxelSizeShift;
const index = getIndex(x, y, z);
const oldBlock = self.super.data.getValue(index);
if(oldBlock.typ == 0 or oldBlock.degradable()) {
self.super.data.setValue(index, newBlock);
}
}
/// Updates a block if it is inside this chunk. Should be used in generation to prevent accidently storing these as changes.
/// Does not do any bound checks. They are expected to be done with the `liesInChunk` function.
pub fn updateBlockInGeneration(self: *ServerChunk, _x: i32, _y: i32, _z: i32, newBlock: Block) void {
main.utils.assertLocked(&self.mutex);
const x = _x >> self.super.voxelSizeShift;
const y = _y >> self.super.voxelSizeShift;
const z = _z >> self.super.voxelSizeShift;
const index = getIndex(x, y, z);
self.super.data.setValue(index, newBlock);
}
pub fn updateFromLowerResolution(self: *ServerChunk, other: *ServerChunk) void {
const xOffset = if(other.super.pos.wx != self.super.pos.wx) chunkSize/2 else 0; // Offsets of the lower resolution chunk in this chunk.
const yOffset = if(other.super.pos.wy != self.super.pos.wy) chunkSize/2 else 0;
const zOffset = if(other.super.pos.wz != self.super.pos.wz) chunkSize/2 else 0;
self.mutex.lock();
defer self.mutex.unlock();
main.utils.assertLocked(&other.mutex);
@ -332,7 +383,7 @@ pub const Chunk = struct {
while(dz <= 1): (dz += 1) {
const index = getIndex(x*2 + dx, y*2 + dy, z*2 + dz);
const i = dx*4 + dz*2 + dy;
octantBlocks[i] = other.data.getValue(index);
octantBlocks[i] = other.super.data.getValue(index);
if(octantBlocks[i].typ == 0) {
neighborCount[i] = 0;
continue; // I don't care about air blocks.
@ -345,7 +396,7 @@ pub const Chunk = struct {
const nz = z*2 + dz + Neighbors.relZ[n];
if((nx & chunkMask) == nx and (ny & chunkMask) == ny and (nz & chunkMask) == nz) { // If it's inside the chunk.
const neighborIndex = getIndex(nx, ny, nz);
if(other.data.getValue(neighborIndex).transparent()) {
if(other.super.data.getValue(neighborIndex).transparent()) {
count += 5;
}
} else {
@ -368,7 +419,7 @@ pub const Chunk = struct {
}
// Update the block:
const thisIndex = getIndex(x + xOffset, y + yOffset, z + zOffset);
self.data.setValue(thisIndex, block);
self.super.data.setValue(thisIndex, block);
}
}
}
@ -376,32 +427,33 @@ pub const Chunk = struct {
self.setChanged();
}
pub fn save(self: *Chunk, world: *main.server.ServerWorld) void {
pub fn save(self: *ServerChunk, world: *main.server.ServerWorld) void {
self.mutex.lock();
defer self.mutex.unlock();
if(self.wasChanged) {
const regionSize = self.pos.voxelSize*chunkSize*main.server.storage.RegionFile.regionSize;
const pos = self.super.pos;
const regionSize = pos.voxelSize*chunkSize*main.server.storage.RegionFile.regionSize;
const regionMask: i32 = regionSize - 1;
const region = main.server.storage.loadRegionFileAndIncreaseRefCount(self.pos.wx & ~regionMask, self.pos.wy & ~regionMask, self.pos.wz & ~regionMask, self.pos.voxelSize);
const region = main.server.storage.loadRegionFileAndIncreaseRefCount(pos.wx & ~regionMask, pos.wy & ~regionMask, pos.wz & ~regionMask, pos.voxelSize);
defer region.decreaseRefCount();
const data = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, self);
const data = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, &self.super);
defer main.stackAllocator.free(data);
region.storeChunk(
data,
@as(usize, @intCast(self.pos.wx -% region.pos.wx))/self.pos.voxelSize/chunkSize,
@as(usize, @intCast(self.pos.wy -% region.pos.wy))/self.pos.voxelSize/chunkSize,
@as(usize, @intCast(self.pos.wz -% region.pos.wz))/self.pos.voxelSize/chunkSize,
@as(usize, @intCast(pos.wx -% region.pos.wx))/pos.voxelSize/chunkSize,
@as(usize, @intCast(pos.wy -% region.pos.wy))/pos.voxelSize/chunkSize,
@as(usize, @intCast(pos.wz -% region.pos.wz))/pos.voxelSize/chunkSize,
);
self.wasChanged = false;
// Update the next lod chunk:
if(self.pos.voxelSize != 1 << settings.highestLOD) {
var pos = self.pos;
pos.wx &= ~(pos.voxelSize*chunkSize);
pos.wy &= ~(pos.voxelSize*chunkSize);
pos.wz &= ~(pos.voxelSize*chunkSize);
pos.voxelSize *= 2;
const nextHigherLod = world.getOrGenerateChunkAndIncreaseRefCount(pos);
if(pos.voxelSize != 1 << settings.highestLOD) {
var nextPos = pos;
nextPos.wx &= ~(pos.voxelSize*chunkSize);
nextPos.wy &= ~(pos.voxelSize*chunkSize);
nextPos.wz &= ~(pos.voxelSize*chunkSize);
nextPos.voxelSize *= 2;
const nextHigherLod = world.getOrGenerateChunkAndIncreaseRefCount(nextPos);
defer nextHigherLod.decreaseRefCount();
nextHigherLod.updateFromLowerResolution(self);
}

View File

@ -2,7 +2,7 @@ const std = @import("std");
const blocks = @import("blocks.zig");
const chunk_zig = @import("chunk.zig");
const Chunk = chunk_zig.Chunk;
const ServerChunk = chunk_zig.ServerChunk;
const game = @import("game.zig");
const World = game.World;
const ServerWorld = main.server.ServerWorld;
@ -294,7 +294,7 @@ pub const ItemDropManager = struct {
// }
}
fn updateEnt(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
fn updateEnt(self: *ItemDropManager, chunk: *ServerChunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
main.utils.assertLocked(&self.mutex);
const startedInABlock = self.checkBlocks(chunk, pos);
if(startedInABlock) {
@ -318,7 +318,7 @@ pub const ItemDropManager = struct {
vel.* *= @splat(@max(0, 1 - drag*deltaTime));
}
fn fixStuckInBlock(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
fn fixStuckInBlock(self: *ItemDropManager, chunk: *ServerChunk, pos: *Vec3d, vel: *Vec3d, deltaTime: f64) void {
main.utils.assertLocked(&self.mutex);
const centeredPos = pos.* - @as(Vec3d, @splat(0.5));
const pos0: Vec3i = @intFromFloat(@floor(centeredPos));
@ -355,7 +355,7 @@ pub const ItemDropManager = struct {
}
}
fn checkBlocks(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d) bool {
fn checkBlocks(self: *ItemDropManager, chunk: *ServerChunk, pos: *Vec3d) bool {
const lowerCornerPos = pos.* - @as(Vec3d, @splat(radius));
const pos0f64 = @floor(lowerCornerPos);
const pos0: Vec3i = @intFromFloat(pos0f64);
@ -389,7 +389,7 @@ pub const ItemDropManager = struct {
return isSolid;
}
fn checkBlock(self: *ItemDropManager, chunk: *Chunk, pos: *Vec3d, blockPos: Vec3i) bool {
fn checkBlock(self: *ItemDropManager, chunk: *ServerChunk, pos: *Vec3d, blockPos: Vec3i) bool {
// TODO: Check if the item drop collides with the block in the given location.
_ = self;
_ = chunk;

View File

@ -721,23 +721,36 @@ pub const Protocols = struct {
pub const chunkTransmission = struct {
pub const id: u8 = 3;
fn receive(_: *Connection, data: []const u8) !void {
const ch = try main.server.storage.ChunkCompression.decompressChunkAndIncreaseRefCount(data);
renderer.mesh_storage.updateChunkMeshAndDecreaseRefCount(ch);
const pos = chunk.ChunkPosition{
.wx = std.mem.readInt(i32, data[0..4], .big),
.wy = std.mem.readInt(i32, data[4..8], .big),
.wz = std.mem.readInt(i32, data[8..12], .big),
.voxelSize = @intCast(std.mem.readInt(i32, data[12..16], .big)),
};
const ch = chunk.Chunk.init(pos);
try main.server.storage.ChunkCompression.decompressChunk(ch, data[16..]);
renderer.mesh_storage.updateChunkMesh(ch);
}
fn sendChunkOverTheNetwork(conn: *Connection, ch: *chunk.Chunk) void {
fn sendChunkOverTheNetwork(conn: *Connection, ch: *chunk.ServerChunk) void {
ch.mutex.lock();
const data = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, ch);
const chunkData = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, &ch.super);
ch.mutex.unlock();
defer main.stackAllocator.free(data);
defer main.stackAllocator.free(chunkData);
const data = main.stackAllocator.alloc(u8, chunkData.len + 16);
std.mem.writeInt(i32, data[0..4], ch.super.pos.wx, .big);
std.mem.writeInt(i32, data[4..8], ch.super.pos.wy, .big);
std.mem.writeInt(i32, data[8..12], ch.super.pos.wz, .big);
std.mem.writeInt(i32, data[12..16], ch.super.pos.voxelSize, .big);
@memcpy(data[16..], chunkData);
conn.sendImportant(id, data);
}
fn sendChunkLocally(ch: *chunk.Chunk) void {
const chunkCopy = chunk.Chunk.initAndIncreaseRefCount(ch.pos);
fn sendChunkLocally(ch: *chunk.ServerChunk) void {
const chunkCopy = chunk.Chunk.init(ch.super.pos);
chunkCopy.data.deinit();
chunkCopy.data.initCopy(&ch.data);
renderer.mesh_storage.updateChunkMeshAndDecreaseRefCount(chunkCopy);
chunkCopy.data.initCopy(&ch.super.data);
renderer.mesh_storage.updateChunkMesh(chunkCopy);
}
pub fn sendChunk(conn: *Connection, ch: *chunk.Chunk) void {
pub fn sendChunk(conn: *Connection, ch: *chunk.ServerChunk) void {
if(conn.user.?.isLocal) {
sendChunkLocally(ch);
} else {

View File

@ -515,7 +515,7 @@ pub const ChunkMesh = struct {
std.debug.assert(self.refCount.load(.monotonic) == 0);
self.opaqueMesh.deinit();
self.transparentMesh.deinit();
self.chunk.decreaseRefCount();
self.chunk.deinit();
main.globalAllocator.free(self.currentSorting);
main.globalAllocator.free(self.sortingOutputBuffer);
for(self.lightingData) |lightingChunk| {

View File

@ -1072,7 +1072,7 @@ pub const MeshGenerationTask = struct {
}
pub fn clean(self: *MeshGenerationTask) void {
self.mesh.decreaseRefCount();
self.mesh.deinit();
main.globalAllocator.destroy(self);
}
};
@ -1083,7 +1083,7 @@ pub fn updateBlock(x: i32, y: i32, z: i32, newBlock: blocks.Block) void {
blockUpdateList.append(BlockUpdate{.x=x, .y=y, .z=z, .newBlock=newBlock});
}
pub fn updateChunkMeshAndDecreaseRefCount(mesh: *chunk.Chunk) void {
pub fn updateChunkMesh(mesh: *chunk.Chunk) void {
MeshGenerationTask.schedule(mesh);
}

View File

@ -207,54 +207,43 @@ pub fn loadRegionFileAndIncreaseRefCount(wx: i32, wy: i32, wz: i32, voxelSize: u
pub const ChunkCompression = struct {
const CompressionAlgo = enum(u32) {
deflate = 0, // TODO: Investigate if palette compression (or palette compression with huffman coding) is more efficient.
deflate_with_position = 0,
deflate = 1, // TODO: Investigate if palette compression (or palette compression with huffman coding) is more efficient.
_, // TODO: Add more algorithms for specific cases like uniform chunks.
};
pub fn compressChunk(allocator: main.utils.NeverFailingAllocator, ch: *chunk.Chunk) []const u8 {
main.utils.assertLocked(&ch.mutex);
var uncompressedData: [chunk.chunkVolume*@sizeOf(u32)]u8 = undefined;
for(0..chunk.chunkVolume) |i| {
std.mem.writeInt(u32, uncompressedData[4*i..][0..4], ch.data.getValue(i).toInt(), .big);
}
const compressedData = main.utils.Compression.deflate(main.stackAllocator, &uncompressedData);
defer main.stackAllocator.free(compressedData);
const data = allocator.alloc(u8, 20 + compressedData.len);
@memcpy(data[20..], compressedData);
const data = allocator.alloc(u8, 4 + compressedData.len);
@memcpy(data[4..], compressedData);
std.mem.writeInt(i32, data[0..4], @intFromEnum(CompressionAlgo.deflate), .big);
std.mem.writeInt(i32, data[4..8], ch.pos.wx, .big);
std.mem.writeInt(i32, data[8..12], ch.pos.wy, .big);
std.mem.writeInt(i32, data[12..16], ch.pos.wz, .big);
std.mem.writeInt(i32, data[16..20], ch.pos.voxelSize, .big);
return data;
}
pub fn decompressChunkAndIncreaseRefCount(_data: []const u8) error{corrupted}!*chunk.Chunk {
pub fn decompressChunk(ch: *chunk.Chunk, _data: []const u8) error{corrupted}!void {
var data = _data;
if(data.len < 4) return error.corrupted;
const algo: CompressionAlgo = @enumFromInt(std.mem.readInt(u32, data[0..4], .big));
data = data[4..];
if(algo == .deflate_with_position) data = data[16..];
switch(algo) {
.deflate => {
if(data.len < 16) return error.corrupted;
const pos = chunk.ChunkPosition{
.wx = std.mem.readInt(i32, data[0..4], .big),
.wy = std.mem.readInt(i32, data[4..8], .big),
.wz = std.mem.readInt(i32, data[8..12], .big),
.voxelSize = @intCast(std.mem.readInt(i32, data[12..16], .big)),
};
.deflate, .deflate_with_position => {
const _inflatedData = main.stackAllocator.alloc(u8, chunk.chunkVolume*4);
defer main.stackAllocator.free(_inflatedData);
const _inflatedLen = main.utils.Compression.inflateTo(_inflatedData, data[16..]) catch return error.corrupted;
const _inflatedLen = main.utils.Compression.inflateTo(_inflatedData, data[0..]) catch return error.corrupted;
if(_inflatedLen != chunk.chunkVolume*4) {
return error.corrupted;
}
data = _inflatedData;
const ch = chunk.Chunk.initAndIncreaseRefCount(pos);
for(0..chunk.chunkVolume) |i| {
ch.data.setValue(i, main.blocks.Block.fromInt(std.mem.readInt(u32, data[0..4], .big)));
data = data[4..];
}
return ch;
return;
},
_ => {
return error.corrupted;

View File

@ -3,7 +3,7 @@ const std = @import("std");
const main = @import("root");
const Array3D = main.utils.Array3D;
const Cache = main.utils.Cache;
const Chunk = main.chunk.Chunk;
const ServerChunk = main.chunk.ServerChunk;
const ChunkPosition = main.chunk.ChunkPosition;
const JsonElement = main.JsonElement;
const vec = main.vec;
@ -383,17 +383,19 @@ pub const CaveBiomeMapView = struct {
noiseY: ?CachedFractalNoise3D = null,
noiseZ: ?CachedFractalNoise3D = null,
pub fn init(chunk: *Chunk) CaveBiomeMapView {
pub fn init(chunk: *ServerChunk) CaveBiomeMapView {
const pos = chunk.super.pos;
const width = chunk.super.width;
var self = CaveBiomeMapView {
.super = InterpolatableCaveBiomeMapView.init(chunk.pos, chunk.width),
.super = InterpolatableCaveBiomeMapView.init(pos, width),
};
if(chunk.pos.voxelSize < 8) {
const startX = (chunk.pos.wx -% 32) & ~@as(i32, 63);
const startY = (chunk.pos.wy -% 32) & ~@as(i32, 63);
const startZ = (chunk.pos.wz -% 32) & ~@as(i32, 63);
self.noiseX = CachedFractalNoise3D.init(startX, startY, startZ, chunk.pos.voxelSize*4, chunk.width + 128, main.server.world.?.seed ^ 0x764923684396, 64);
self.noiseY = CachedFractalNoise3D.init(startX, startY, startZ, chunk.pos.voxelSize*4, chunk.width + 128, main.server.world.?.seed ^ 0x6547835649265429, 64);
self.noiseZ = CachedFractalNoise3D.init(startX, startY, startZ, chunk.pos.voxelSize*4, chunk.width + 128, main.server.world.?.seed ^ 0x56789365396783, 64);
if(pos.voxelSize < 8) {
const startX = (pos.wx -% 32) & ~@as(i32, 63);
const startY = (pos.wy -% 32) & ~@as(i32, 63);
const startZ = (pos.wz -% 32) & ~@as(i32, 63);
self.noiseX = CachedFractalNoise3D.init(startX, startY, startZ, pos.voxelSize*4, width + 128, main.server.world.?.seed ^ 0x764923684396, 64);
self.noiseY = CachedFractalNoise3D.init(startX, startY, startZ, pos.voxelSize*4, width + 128, main.server.world.?.seed ^ 0x6547835649265429, 64);
self.noiseZ = CachedFractalNoise3D.init(startX, startY, startZ, pos.voxelSize*4, width + 128, main.server.world.?.seed ^ 0x56789365396783, 64);
}
return self;
}

View File

@ -2,7 +2,7 @@ const std = @import("std");
const Atomic = std.atomic.Value;
const main = @import("root");
const Chunk = main.chunk.Chunk;
const ServerChunk = main.chunk.ServerChunk;
const ChunkPosition = main.chunk.ChunkPosition;
const Cache = main.utils.Cache;
const JsonElement = main.JsonElement;
@ -141,21 +141,23 @@ pub const CaveGenerator = struct {
};
pub const CaveMapView = struct {
reference: *Chunk,
reference: *ServerChunk,
fragments: [8]*CaveMapFragment,
pub fn init(chunk: *Chunk) CaveMapView {
pub fn init(chunk: *ServerChunk) CaveMapView {
const pos = chunk.super.pos;
const width = chunk.super.width;
return CaveMapView {
.reference = chunk,
.fragments = [_]*CaveMapFragment {
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx -% chunk.width, chunk.pos.wy -% chunk.width, chunk.pos.wz -% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx -% chunk.width, chunk.pos.wy -% chunk.width, chunk.pos.wz +% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx -% chunk.width, chunk.pos.wy +% chunk.width, chunk.pos.wz -% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx -% chunk.width, chunk.pos.wy +% chunk.width, chunk.pos.wz +% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx +% chunk.width, chunk.pos.wy -% chunk.width, chunk.pos.wz -% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx +% chunk.width, chunk.pos.wy -% chunk.width, chunk.pos.wz +% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx +% chunk.width, chunk.pos.wy +% chunk.width, chunk.pos.wz -% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(chunk.pos.wx +% chunk.width, chunk.pos.wy +% chunk.width, chunk.pos.wz +% chunk.width, chunk.pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx -% width, pos.wy -% width, pos.wz -% width, pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx -% width, pos.wy -% width, pos.wz +% width, pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx -% width, pos.wy +% width, pos.wz -% width, pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx -% width, pos.wy +% width, pos.wz +% width, pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx +% width, pos.wy -% width, pos.wz -% width, pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx +% width, pos.wy -% width, pos.wz +% width, pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx +% width, pos.wy +% width, pos.wz -% width, pos.voxelSize),
getOrGenerateFragmentAndIncreaseRefCount(pos.wx +% width, pos.wy +% width, pos.wz +% width, pos.voxelSize),
},
};
}
@ -167,40 +169,40 @@ pub const CaveMapView = struct {
}
pub fn isSolid(self: CaveMapView, relX: i32, relY: i32, relZ: i32) bool {
const wx = relX +% self.reference.pos.wx;
const wy = relY +% self.reference.pos.wy;
const wz = relZ +% self.reference.pos.wz;
const wx = relX +% self.reference.super.pos.wx;
const wy = relY +% self.reference.super.pos.wy;
const wz = relZ +% self.reference.super.pos.wz;
var index: u8 = 0;
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 4;
}
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 2;
}
if(wz -% self.fragments[0].pos.wz >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wz -% self.fragments[0].pos.wz >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 1;
}
const fragmentRelX = wx - self.fragments[index].pos.wx;
const fragmentRelY = wy - self.fragments[index].pos.wy;
const fragmentRelZ = @divFloor(wz - self.fragments[index].pos.wz, self.reference.pos.voxelSize);
const fragmentRelZ = @divFloor(wz - self.fragments[index].pos.wz, self.reference.super.pos.voxelSize);
const height = self.fragments[index].getColumnData(fragmentRelX, fragmentRelY);
return (height & @as(u64, 1)<<@intCast(fragmentRelZ)) != 0;
}
pub fn getHeightData(self: CaveMapView, relX: i32, relY: i32) u32 {
const wx = relX +% self.reference.pos.wx;
const wy = relY +% self.reference.pos.wy;
const wx = relX +% self.reference.super.pos.wx;
const wy = relY +% self.reference.super.pos.wy;
var index: u8 = 0;
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 4;
}
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 2;
}
var deltaZ = self.reference.pos.wz -% self.fragments[0].pos.wz;
if(deltaZ >= CaveMapFragment.height*self.reference.pos.voxelSize) {
var deltaZ = self.reference.super.pos.wz -% self.fragments[0].pos.wz;
if(deltaZ >= CaveMapFragment.height*self.reference.super.pos.voxelSize) {
index += 1;
deltaZ -= CaveMapFragment.height*self.reference.pos.voxelSize;
deltaZ -= CaveMapFragment.height*self.reference.super.pos.voxelSize;
}
const fragmentRelX = wx - self.fragments[index].pos.wx;
const fragmentRelY = wy - self.fragments[index].pos.wy;
@ -213,16 +215,16 @@ pub const CaveMapView = struct {
}
pub fn findTerrainChangeAbove(self: CaveMapView, relX: i32, relY: i32, z: i32) i32 {
const wx = relX +% self.reference.pos.wx;
const wy = relY +% self.reference.pos.wy;
const wx = relX +% self.reference.super.pos.wx;
const wy = relY +% self.reference.super.pos.wy;
var index: u8 = 0;
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 4;
}
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 2;
}
var relativeZ = @divFloor(z +% self.reference.pos.wz -% self.fragments[0].pos.wz, self.reference.pos.voxelSize);
var relativeZ = @divFloor(z +% self.reference.super.pos.wz -% self.fragments[0].pos.wz, self.reference.super.pos.voxelSize);
std.debug.assert(relativeZ >= 0 and relativeZ < 2*CaveMapFragment.height);
const fragmentRelX = wx - self.fragments[index].pos.wx;
const fragmentRelY = wy - self.fragments[index].pos.wy;
@ -250,20 +252,20 @@ pub const CaveMapView = struct {
}
}
result += @ctz(height);
return result*self.reference.pos.voxelSize +% self.fragments[0].pos.wz -% self.reference.pos.wz;
return result*self.reference.super.pos.voxelSize +% self.fragments[0].pos.wz -% self.reference.super.pos.wz;
}
pub fn findTerrainChangeBelow(self: CaveMapView, relX: i32, relY: i32, z: i32) i32 {
const wx = relX +% self.reference.pos.wx;
const wy = relY +% self.reference.pos.wy;
const wx = relX +% self.reference.super.pos.wx;
const wy = relY +% self.reference.super.pos.wy;
var index: u8 = 0;
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wx -% self.fragments[0].pos.wx >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 4;
}
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.pos.voxelSize) {
if(wy -% self.fragments[0].pos.wy >= CaveMapFragment.width*self.reference.super.pos.voxelSize) {
index += 2;
}
var relativeZ = @divFloor(z +% self.reference.pos.wz -% self.fragments[0].pos.wz, self.reference.pos.voxelSize);
var relativeZ = @divFloor(z +% self.reference.super.pos.wz -% self.fragments[0].pos.wz, self.reference.super.pos.voxelSize);
std.debug.assert(relativeZ >= 0 and relativeZ < 2*CaveMapFragment.height);
const fragmentRelX = wx - self.fragments[index].pos.wx;
const fragmentRelY = wy - self.fragments[index].pos.wy;
@ -291,7 +293,7 @@ pub const CaveMapView = struct {
}
}
result -= @clz(height);
return result*self.reference.pos.voxelSize +% self.fragments[0].pos.wz -% self.reference.pos.wz;
return result*self.reference.super.pos.voxelSize +% self.fragments[0].pos.wz -% self.reference.super.pos.wz;
}
};

View File

@ -2,7 +2,7 @@ const std = @import("std");
const main = @import("root");
const blocks = main.blocks;
const Chunk = main.chunk.Chunk;
const ServerChunk = main.chunk.ServerChunk;
const JsonElement = main.JsonElement;
const terrain = main.server.terrain;
const NeverFailingAllocator = main.utils.NeverFailingAllocator;
@ -10,7 +10,7 @@ const NeverFailingAllocator = main.utils.NeverFailingAllocator;
const StructureModel = struct {
const VTable = struct {
loadModel: *const fn(arenaAllocator: NeverFailingAllocator, parameters: JsonElement) *anyopaque,
generate: *const fn(self: *anyopaque, x: i32, y: i32, z: i32, chunk: *Chunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void,
generate: *const fn(self: *anyopaque, x: i32, y: i32, z: i32, chunk: *ServerChunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void,
};
vtable: VTable,
@ -30,7 +30,7 @@ const StructureModel = struct {
};
}
pub fn generate(self: StructureModel, x: i32, y: i32, z: i32, chunk: *Chunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
pub fn generate(self: StructureModel, x: i32, y: i32, z: i32, chunk: *ServerChunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
self.vtable.generate(self.data, x, y, z, chunk, caveMap, seed);
}
@ -215,7 +215,7 @@ pub const BlockStructure = struct {
allocator.free(self.structure);
}
pub fn addSubTerranian(self: BlockStructure, chunk: *Chunk, startingDepth: i32, minDepth: i32, x: i32, y: i32, seed: *u64) i32 {
pub fn addSubTerranian(self: BlockStructure, chunk: *ServerChunk, startingDepth: i32, minDepth: i32, x: i32, y: i32, seed: *u64) i32 {
var depth = startingDepth;
for(self.structure) |blockStack| {
const total = blockStack.min + main.random.nextIntBounded(u32, seed, @as(u32, 1) + blockStack.max - blockStack.min);
@ -225,12 +225,12 @@ pub const BlockStructure = struct {
if(chunk.liesInChunk(x, y, depth)) {
chunk.updateBlockInGeneration(x, y, depth, block);
}
depth -%= chunk.pos.voxelSize;
depth -%= chunk.super.pos.voxelSize;
if(depth -% minDepth <= 0)
return depth +% chunk.pos.voxelSize;
return depth +% chunk.super.pos.voxelSize;
}
}
return depth +% chunk.pos.voxelSize;
return depth +% chunk.super.pos.voxelSize;
}
};

View File

@ -39,16 +39,16 @@ pub fn deinit() void {
}
pub fn generate(worldSeed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
if(chunk.pos.voxelSize > 2) return;
const size = chunk.width;
pub fn generate(worldSeed: u64, chunk: *main.chunk.ServerChunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
if(chunk.super.pos.voxelSize > 2) return;
const size = chunk.super.width;
// Generate caves from all nearby chunks:
var x = chunk.pos.wx -% main.chunk.chunkSize;
while(x != chunk.pos.wx +% size +% main.chunk.chunkSize) : (x +%= main.chunk.chunkSize) {
var y = chunk.pos.wy -% main.chunk.chunkSize;
while(y != chunk.pos.wy +% size +% main.chunk.chunkSize) : (y +%= main.chunk.chunkSize) {
var z = chunk.pos.wz -% main.chunk.chunkSize;
while(z != chunk.pos.wz +% size +% main.chunk.chunkSize) : (z +%= main.chunk.chunkSize) {
var x = chunk.super.pos.wx -% main.chunk.chunkSize;
while(x != chunk.super.pos.wx +% size +% main.chunk.chunkSize) : (x +%= main.chunk.chunkSize) {
var y = chunk.super.pos.wy -% main.chunk.chunkSize;
while(y != chunk.super.pos.wy +% size +% main.chunk.chunkSize) : (y +%= main.chunk.chunkSize) {
var z = chunk.super.pos.wz -% main.chunk.chunkSize;
while(z != chunk.super.pos.wz +% size +% main.chunk.chunkSize) : (z +%= main.chunk.chunkSize) {
var seed = random.initSeed3D(worldSeed, .{x, y, z});
considerCoordinates(x, y, z, chunk, caveMap, biomeMap, &seed);
}
@ -60,10 +60,10 @@ fn distSqr(x: f32, y: f32, z: f32) f32 {
return x*x + y*y + z*z;
}
fn considerCrystal(x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, seed: *u64, useNeedles: bool, types: []u16) void {
const relX: f32 = @floatFromInt(x -% chunk.pos.wx);
const relY: f32 = @floatFromInt(y -% chunk.pos.wy);
const relZ: f32 = @floatFromInt(z -% chunk.pos.wz);
fn considerCrystal(x: i32, y: i32, z: i32, chunk: *main.chunk.ServerChunk, seed: *u64, useNeedles: bool, types: []u16) void {
const relX: f32 = @floatFromInt(x -% chunk.super.pos.wx);
const relY: f32 = @floatFromInt(y -% chunk.super.pos.wy);
const relZ: f32 = @floatFromInt(z -% chunk.super.pos.wz);
const typ = types[random.nextIntBounded(u32, seed, @as(u32, @intCast(types.len)))];
// Make some crystal spikes in random directions:
var spikes: f32 = 4;
@ -102,7 +102,7 @@ fn considerCrystal(x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, seed: *u64,
while(z3 <= zMax) : (z3 += 1) {
const dist = distSqr(@as(f32, @floatFromInt(x3)) - x2, @as(f32, @floatFromInt(y3)) - y2, @as(f32, @floatFromInt(z3)) - z2);
if(dist < size*size) {
if(x3 >= 0 and x3 < chunk.width and y3 >= 0 and y3 < chunk.width and z3 >= 0 and z3 < chunk.width) {
if(x3 >= 0 and x3 < chunk.super.width and y3 >= 0 and y3 < chunk.super.width and z3 >= 0 and z3 < chunk.super.width) {
const block: main.blocks.Block = chunk.getBlock(x3, y3, z3);
if(block.typ == 0 or block.degradable() or block.blockClass() == .fluid) {
chunk.updateBlockInGeneration(x3, y3, z3, .{.typ = typ, .data = 0}); // TODO: Use natural standard.
@ -119,9 +119,9 @@ fn considerCrystal(x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, seed: *u64,
}
}
fn considerCoordinates(x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView, seed: *u64) void {
fn considerCoordinates(x: i32, y: i32, z: i32, chunk: *main.chunk.ServerChunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView, seed: *u64) void {
const oldSeed = seed.*;
const crystalSpawns = biomeMap.getBiomeAndSeed(x +% main.chunk.chunkSize/2 -% chunk.pos.wx, y +% main.chunk.chunkSize/2 -% chunk.pos.wy, z +% main.chunk.chunkSize/2 -% chunk.pos.wz, true, seed).crystals;
const crystalSpawns = biomeMap.getBiomeAndSeed(x +% main.chunk.chunkSize/2 -% chunk.super.pos.wx, y +% main.chunk.chunkSize/2 -% chunk.super.pos.wy, z +% main.chunk.chunkSize/2 -% chunk.super.pos.wz, true, seed).crystals;
random.scrambleSeed(seed);
var differendColors: u32 = 1;
if(random.nextInt(u1, seed) != 0) {
@ -143,9 +143,9 @@ fn considerCoordinates(x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, caveMap
const worldX = x + random.nextIntBounded(u31, seed, main.chunk.chunkSize);
const worldY = y + random.nextIntBounded(u31, seed, main.chunk.chunkSize);
const worldZ = z + random.nextIntBounded(u31, seed, main.chunk.chunkSize);
const relX = worldX -% chunk.pos.wx;
const relY = worldY -% chunk.pos.wy;
const relZ = worldZ -% chunk.pos.wz;
const relX = worldX -% chunk.super.pos.wx;
const relY = worldY -% chunk.super.pos.wy;
const relZ = worldZ -% chunk.super.pos.wz;
if(caveMap.isSolid(relX, relY, relZ)) { // Only start crystal in solid blocks
// Only start crystal when they are close to the surface (±SURFACE_DIST blocks)
if(

View File

@ -32,13 +32,13 @@ pub fn deinit() void {
}
// Works basically similar to cave generation, but considers a lot less chunks and has a few other differences.
pub fn generate(worldSeed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
pub fn generate(worldSeed: u64, chunk: *main.chunk.ServerChunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
_ = caveMap;
_ = biomeMap;
if(chunk.pos.voxelSize != 1) return;
const cx = chunk.pos.wx >> main.chunk.chunkShift;
const cy = chunk.pos.wy >> main.chunk.chunkShift;
const cz = chunk.pos.wz >> main.chunk.chunkShift;
if(chunk.super.pos.voxelSize != 1) return;
const cx = chunk.super.pos.wx >> main.chunk.chunkShift;
const cy = chunk.super.pos.wy >> main.chunk.chunkShift;
const cz = chunk.super.pos.wz >> main.chunk.chunkShift;
// Generate caves from all nearby chunks:
var x = cx - 1;
while(x < cx + 1) : (x +%= 1) {
@ -59,7 +59,7 @@ pub fn generate(worldSeed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveM
}
}
fn considerCoordinates(ore: *const main.blocks.Ore, relX: f32, relY: f32, relZ: f32, chunk: *main.chunk.Chunk, startSeed: u64) void {
fn considerCoordinates(ore: *const main.blocks.Ore, relX: f32, relY: f32, relZ: f32, chunk: *main.chunk.ServerChunk, startSeed: u64) void {
const chunkSizeFloat: f32 = @floatFromInt(main.chunk.chunkSize);
// Compose the seeds from some random stats of the ore. They generally shouldn't be the same for two different ores. TODO: Give each block a hash function (id based) that can be used in cases like this.
var seed = startSeed ^ @as(u32, @bitCast(ore.maxHeight)) ^ @as(u32, @bitCast(ore.size)) ^ @as(u32, @bitCast(main.blocks.Block.hardness(.{.typ = ore.blockType, .data = 0})));
@ -80,9 +80,9 @@ fn considerCoordinates(ore: *const main.blocks.Ore, relX: f32, relY: f32, relZ:
var yMin: i32 = @intFromFloat(veinRelY - radius);
var yMax: i32 = @intFromFloat(@ceil(veinRelY + radius));
xMin = @max(xMin, 0);
xMax = @min(xMax, chunk.width);
xMax = @min(xMax, chunk.super.width);
yMin = @max(yMin, 0);
yMax = @min(yMax, chunk.width);
yMax = @min(yMax, chunk.super.width);
var veinSeed = random.nextInt(u64, &seed);
var curX = xMin;
@ -97,7 +97,7 @@ fn considerCoordinates(ore: *const main.blocks.Ore, relX: f32, relY: f32, relZ:
var zMin: i32 = @intFromFloat(veinRelZ - zDistance);
var zMax: i32 = @intFromFloat(@ceil(veinRelZ + zDistance));
zMin = @max(zMin, 0);
zMax = @min(zMax, chunk.width);
zMax = @min(zMax, chunk.super.width);
var curZ = zMin;
while(curZ < zMax) : (curZ += 1) {
const distToCenterZ = (@as(f32, @floatFromInt(curZ)) - veinRelZ)/radius;

View File

@ -27,26 +27,26 @@ pub fn deinit() void {
}
pub fn generate(worldSeed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
if(chunk.pos.voxelSize < 4) {
pub fn generate(worldSeed: u64, chunk: *main.chunk.ServerChunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
if(chunk.super.pos.voxelSize < 4) {
// Uses a blue noise pattern for all structure that shouldn't touch.
const blueNoise = noise.BlueNoise.getRegionData(main.stackAllocator, chunk.pos.wx -% 8, chunk.pos.wy -% 8, chunk.width + 16, chunk.width + 16);
const blueNoise = noise.BlueNoise.getRegionData(main.stackAllocator, chunk.super.pos.wx -% 8, chunk.super.pos.wy -% 8, chunk.super.width + 16, chunk.super.width + 16);
defer main.stackAllocator.free(blueNoise);
for(blueNoise) |coordinatePair| {
const px = @as(i32, @intCast(coordinatePair >> 16)) - 8; // TODO: Maybe add a blue-noise iterator or something like that?
const py = @as(i32, @intCast(coordinatePair & 0xffff)) - 8;
const wpx = chunk.pos.wx +% px;
const wpy = chunk.pos.wy +% py;
const wpx = chunk.super.pos.wx +% px;
const wpy = chunk.super.pos.wy +% py;
var pz : i32 = -32;
while(pz < chunk.width) : (pz += 32) {
const wpz = chunk.pos.wz +% pz;
while(pz < chunk.super.width) : (pz += 32) {
const wpz = chunk.super.pos.wz +% pz;
var seed = random.initSeed3D(worldSeed, .{wpx, wpy, wpz});
var relZ = pz + 16;
if(caveMap.isSolid(px, py, relZ)) {
relZ = caveMap.findTerrainChangeAbove(px, py, relZ);
} else {
relZ = caveMap.findTerrainChangeBelow(px, py, relZ) + chunk.pos.voxelSize;
relZ = caveMap.findTerrainChangeBelow(px, py, relZ) + chunk.super.pos.voxelSize;
}
if(relZ < pz or relZ >= pz + 32) continue;
const biome = biomeMap.getBiome(px, py, relZ);
@ -65,14 +65,14 @@ pub fn generate(worldSeed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveM
}
} else { // TODO: Make this case work with cave-structures. Low priority because caves aren't even generated this far out.
var px: i32 = 0;
while(px < chunk.width + 16) : (px += chunk.pos.voxelSize) {
while(px < chunk.super.width + 16) : (px += chunk.super.pos.voxelSize) {
var py: i32 = 0;
while(py < chunk.width + 16) : (py += chunk.pos.voxelSize) {
const wpx = px -% 8 +% chunk.pos.wx;
const wpy = py -% 8 +% chunk.pos.wy;
while(py < chunk.super.width + 16) : (py += chunk.super.pos.voxelSize) {
const wpx = px -% 8 +% chunk.super.pos.wx;
const wpy = py -% 8 +% chunk.super.pos.wy;
const relZ = @as(i32, @intFromFloat(biomeMap.getSurfaceHeight(wpx, wpy))) -% chunk.pos.wz;
if(relZ < -32 or relZ >= chunk.width + 32) continue;
const relZ = @as(i32, @intFromFloat(biomeMap.getSurfaceHeight(wpx, wpy))) -% chunk.super.pos.wz;
if(relZ < -32 or relZ >= chunk.super.width + 32) continue;
var seed = random.initSeed3D(worldSeed, .{wpx, wpy, relZ});
var randomValue = random.nextFloat(&seed);
@ -80,7 +80,7 @@ pub fn generate(worldSeed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveM
for(biome.vegetationModels) |model| { // TODO: Could probably use an alias table here.
var adaptedChance = model.chance;
// Increase chance if there are less spawn points considered. Messes up positions, but at that distance density matters more.
adaptedChance = 1 - std.math.pow(f32, 1 - adaptedChance, @as(f32, @floatFromInt(chunk.pos.voxelSize*chunk.pos.voxelSize)));
adaptedChance = 1 - std.math.pow(f32, 1 - adaptedChance, @as(f32, @floatFromInt(chunk.super.pos.voxelSize*chunk.super.pos.voxelSize)));
if(randomValue < adaptedChance) {
model.generate(px - 8, py - 8, relZ, chunk, caveMap, &seed);
break;

View File

@ -29,31 +29,31 @@ pub fn deinit() void {
}
pub fn generate(worldSeed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
const voxelSizeShift = @ctz(chunk.pos.voxelSize);
pub fn generate(worldSeed: u64, chunk: *main.chunk.ServerChunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void {
const voxelSizeShift = @ctz(chunk.super.pos.voxelSize);
var x: u31 = 0;
while(x < chunk.width) : (x += chunk.pos.voxelSize) {
while(x < chunk.super.width) : (x += chunk.super.pos.voxelSize) {
var y: u31 = 0;
while(y < chunk.width) : (y += chunk.pos.voxelSize) {
while(y < chunk.super.width) : (y += chunk.super.pos.voxelSize) {
const heightData = caveMap.getHeightData(x, y);
var makeSurfaceStructure = true;
var z: i32 = chunk.width - chunk.pos.voxelSize;
while(z >= 0) : (z -= chunk.pos.voxelSize) {
var z: i32 = chunk.super.width - chunk.super.pos.voxelSize;
while(z >= 0) : (z -= chunk.super.pos.voxelSize) {
const mask = @as(u64, 1) << @intCast(z >> voxelSizeShift);
if(heightData & mask != 0) {
const biome = biomeMap.getBiome(x, y, z);
if(makeSurfaceStructure) {
const surfaceBlock = caveMap.findTerrainChangeAbove(x, y, z) - chunk.pos.voxelSize;
var seed: u64 = random.initSeed3D(worldSeed, .{chunk.pos.wx + x, chunk.pos.wy + y, chunk.pos.wz + z});
const surfaceBlock = caveMap.findTerrainChangeAbove(x, y, z) - chunk.super.pos.voxelSize;
var seed: u64 = random.initSeed3D(worldSeed, .{chunk.super.pos.wx + x, chunk.super.pos.wy + y, chunk.super.pos.wz + z});
// Add the biomes surface structure:
z = @min(z + chunk.pos.voxelSize, biome.structure.addSubTerranian(chunk, surfaceBlock, caveMap.findTerrainChangeBelow(x, y, z), x, y, &seed));
z = @min(z + chunk.super.pos.voxelSize, biome.structure.addSubTerranian(chunk, surfaceBlock, caveMap.findTerrainChangeBelow(x, y, z), x, y, &seed));
makeSurfaceStructure = false;
} else {
chunk.updateBlockInGeneration(x, y, z, .{.typ = biome.stoneBlockType, .data = 0}); // TODO: Natural standard.
}
} else {
if(z + chunk.pos.wz < 0 and z + chunk.pos.wz >= @as(i32, @intFromFloat(biomeMap.getSurfaceHeight(x + chunk.pos.wx, y + chunk.pos.wy))) - (chunk.pos.voxelSize - 1)) {
if(z + chunk.super.pos.wz < 0 and z + chunk.super.pos.wz >= @as(i32, @intFromFloat(biomeMap.getSurfaceHeight(x + chunk.super.pos.wx, y + chunk.super.pos.wy))) - (chunk.super.pos.voxelSize - 1)) {
chunk.updateBlockInGeneration(x, y, z, .{.typ = water, .data = 0}); // TODO: Natural standard.
} else {
chunk.updateBlockInGeneration(x, y, z, .{.typ = 0, .data = 0});

View File

@ -29,7 +29,7 @@ pub fn loadModel(arenaAllocator: NeverFailingAllocator, parameters: JsonElement)
return self;
}
pub fn generate(self: *Boulder, x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
pub fn generate(self: *Boulder, x: i32, y: i32, z: i32, chunk: *main.chunk.ServerChunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
_ = caveMap;
const radius = self.size + self.sizeVariation*(random.nextFloat(seed)*2 - 1);
// My basic idea is to use a point cloud and a potential function to achieve somewhat smooth boulders without being a sphere.
@ -46,11 +46,11 @@ pub fn generate(self: *Boulder, x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk
// This ensures that the entire boulder is inside of a square with sidelength 2*radius.
const maxRadius: i32 = @intFromFloat(@ceil(radius));
var px = chunk.startIndex(x - maxRadius);
while(px < x + maxRadius) : (px += chunk.pos.voxelSize) {
while(px < x + maxRadius) : (px += chunk.super.pos.voxelSize) {
var py = chunk.startIndex(y - maxRadius);
while(py < y + maxRadius) : (py += chunk.pos.voxelSize) {
while(py < y + maxRadius) : (py += chunk.super.pos.voxelSize) {
var pz = chunk.startIndex(z - maxRadius);
while(pz < z + maxRadius) : (pz += chunk.pos.voxelSize) {
while(pz < z + maxRadius) : (pz += chunk.super.pos.voxelSize) {
if(!chunk.liesInChunk(px, py, pz)) continue;
var potential: f32 = 0;
for(&pointCloud) |point| {

View File

@ -33,7 +33,7 @@ pub fn loadModel(arenaAllocator: NeverFailingAllocator, parameters: JsonElement)
return self;
}
pub fn generate(self: *GroundPatch, x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
pub fn generate(self: *GroundPatch, x: i32, y: i32, z: i32, chunk: *main.chunk.ServerChunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
const width = self.width + (random.nextFloat(seed) - 0.5)*self.variation;
const orientation = 2*std.math.pi*random.nextFloat(seed);
const ellipseParam = 1 + random.nextFloat(seed);
@ -46,9 +46,9 @@ pub fn generate(self: *GroundPatch, x: i32, y: i32, z: i32, chunk: *main.chunk.C
const ySecn = -ellipseParam*@sin(orientation)/width;
const xMin = @max(0, x - @as(i32, @intFromFloat(@ceil(width))));
const xMax = @min(chunk.width, x + @as(i32, @intFromFloat(@ceil(width))));
const xMax = @min(chunk.super.width, x + @as(i32, @intFromFloat(@ceil(width))));
const yMin = @max(0, y - @as(i32, @intFromFloat(@ceil(width))));
const yMax = @min(chunk.width, y + @as(i32, @intFromFloat(@ceil(width))));
const yMax = @min(chunk.super.width, y + @as(i32, @intFromFloat(@ceil(width))));
var px = chunk.startIndex(xMin);
while(px < xMax) : (px += 1) {
@ -66,7 +66,7 @@ pub fn generate(self: *GroundPatch, x: i32, y: i32, z: i32, chunk: *main.chunk.C
startHeight = caveMap.findTerrainChangeBelow(px, py, startHeight);
}
var pz = chunk.startIndex(startHeight - self.depth + 1);
while(pz <= startHeight) : (pz += chunk.pos.voxelSize) {
while(pz <= startHeight) : (pz += chunk.super.pos.voxelSize) {
if(dist <= self.smoothness or (dist - self.smoothness)/(1 - self.smoothness) < random.nextFloat(seed)) {
if(chunk.liesInChunk(px, py, pz)) {
chunk.updateBlockInGeneration(px, py, pz, .{.typ = self.blockType, .data = 0}); // TODO: Natural standard.

View File

@ -41,10 +41,10 @@ pub fn loadModel(arenaAllocator: NeverFailingAllocator, parameters: JsonElement)
return self;
}
pub fn generateStem(self: *SimpleTreeModel, x: i32, y: i32, z: i32, height: i32, chunk: *main.chunk.Chunk) void {
if(chunk.pos.voxelSize <= 2) {
pub fn generateStem(self: *SimpleTreeModel, x: i32, y: i32, z: i32, height: i32, chunk: *main.chunk.ServerChunk) void {
if(chunk.super.pos.voxelSize <= 2) {
var pz: i32 = chunk.startIndex(z);
while(pz < z + height) : (pz += chunk.pos.voxelSize) {
while(pz < z + height) : (pz += chunk.super.pos.voxelSize) {
if(chunk.liesInChunk(x, y, pz)) {
chunk.updateBlockIfDegradable(x, y, pz, if(pz == z + height-1) .{.typ = self.topWoodBlock, .data = 0} else .{.typ = self.woodBlock, .data = 0}); // TODO: Natural standard.
}
@ -52,21 +52,21 @@ pub fn generateStem(self: *SimpleTreeModel, x: i32, y: i32, z: i32, height: i32,
}
}
pub fn generate(self: *SimpleTreeModel, x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
pub fn generate(self: *SimpleTreeModel, x: i32, y: i32, z: i32, chunk: *main.chunk.ServerChunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
var height = self.height0 + random.nextIntBounded(u31, seed, self.deltaHeight);
if(z + height >= caveMap.findTerrainChangeAbove(x, y, z)) // Space is too small.Allocator
return;
if(z > chunk.width) return;
if(z > chunk.super.width) return;
if(chunk.pos.voxelSize >= 16) {
if(chunk.super.pos.voxelSize >= 16) {
// Ensures that even at lowest resolution some leaves are rendered for smaller trees.
if(chunk.liesInChunk(x, y, z)) {
chunk.updateBlockIfDegradable(x, y, z, .{.typ = self.leavesBlock, .data = 0}); // TODO: Natural standard
}
if(chunk.liesInChunk(x, y, z + chunk.pos.voxelSize)) {
chunk.updateBlockIfDegradable(x, y, z + chunk.pos.voxelSize, .{.typ = self.leavesBlock, .data = 0}); // TODO: Natural standard
if(chunk.liesInChunk(x, y, z + chunk.super.pos.voxelSize)) {
chunk.updateBlockIfDegradable(x, y, z + chunk.super.pos.voxelSize, .{.typ = self.leavesBlock, .data = 0}); // TODO: Natural standard
}
}
@ -76,12 +76,12 @@ pub fn generate(self: *SimpleTreeModel, x: i32, y: i32, z: i32, chunk: *main.chu
// Position of the first block of leaves
height = 3*height >> 1;
var pz = chunk.startIndex(z + @divTrunc(height, 3));
while(pz < z + height) : (pz += chunk.pos.voxelSize) {
while(pz < z + height) : (pz += chunk.super.pos.voxelSize) {
const j = @divFloor(height - (pz - z), 2);
var px = chunk.startIndex(x + 1 - j);
while(px < x + j) : (px += chunk.pos.voxelSize) {
while(px < x + j) : (px += chunk.super.pos.voxelSize) {
var py = chunk.startIndex(y + 1 - j);
while(py < y + j) : (py += chunk.pos.voxelSize) {
while(py < y + j) : (py += chunk.super.pos.voxelSize) {
if(chunk.liesInChunk(px, py, pz))
chunk.updateBlockIfDegradable(px, py, pz, .{.typ = self.leavesBlock, .data = 0}); // TODO: Natural standard.
}
@ -97,11 +97,11 @@ pub fn generate(self: *SimpleTreeModel, x: i32, y: i32, z: i32, chunk: *main.chu
const randomRadiusSqr: i32 = @intFromFloat((floatLeafRadius - 0.25)*(floatLeafRadius - 0.25));
const center = z + height;
var pz = chunk.startIndex(center - leafRadius);
while(pz < center + leafRadius) : (pz += chunk.pos.voxelSize) {
while(pz < center + leafRadius) : (pz += chunk.super.pos.voxelSize) {
var px = chunk.startIndex(x - leafRadius);
while(px < x + leafRadius) : (px += chunk.pos.voxelSize) {
while(px < x + leafRadius) : (px += chunk.super.pos.voxelSize) {
var py = chunk.startIndex(y - leafRadius);
while(py < y + leafRadius) : (py += chunk.pos.voxelSize) {
while(py < y + leafRadius) : (py += chunk.super.pos.voxelSize) {
const distSqr = (pz - center)*(pz - center) + (px - x)*(px - x) + (py - y)*(py - y);
if(chunk.liesInChunk(px, py, pz) and distSqr < radiusSqr and (distSqr < randomRadiusSqr or random.nextInt(u1, seed) != 0)) { // TODO: Use another seed to make this more reliable!
chunk.updateBlockIfDegradable(px, py, pz, .{.typ = self.leavesBlock, .data = 0}); // TODO: Natural standard.
@ -122,11 +122,11 @@ pub fn generate(self: *SimpleTreeModel, x: i32, y: i32, z: i32, chunk: *main.chu
const randomRadiusSqr: i32 = @intFromFloat((floatLeafRadius - 0.25)*(floatLeafRadius - 0.25));
const center = z + height;
var pz = chunk.startIndex(center - leafRadius);
while(pz < center + leafRadius) : (pz += chunk.pos.voxelSize) {
while(pz < center + leafRadius) : (pz += chunk.super.pos.voxelSize) {
var px = chunk.startIndex(x - leafRadius);
while(px < x + leafRadius) : (px += chunk.pos.voxelSize) {
while(px < x + leafRadius) : (px += chunk.super.pos.voxelSize) {
var py = chunk.startIndex(y - leafRadius);
while(py < y + leafRadius) : (py += chunk.pos.voxelSize) {
while(py < y + leafRadius) : (py += chunk.super.pos.voxelSize) {
const distSqr = (pz - center)*(pz - center) + (px - x)*(px - x) + (py - y)*(py - y);
if(chunk.liesInChunk(px, py, pz) and distSqr < radiusSqr and (distSqr < randomRadiusSqr or random.nextInt(u1, seed) != 0)) { // TODO: Use another seed to make this more reliable!
chunk.updateBlockIfDegradable(px, py, pz, .{.typ = self.leavesBlock, .data = 0}); // TODO: Natural standard.

View File

@ -29,13 +29,13 @@ pub fn loadModel(arenaAllocator: NeverFailingAllocator, parameters: JsonElement)
return self;
}
pub fn generate(self: *SimpleVegetation, x: i32, y: i32, z: i32, chunk: *main.chunk.Chunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
if(chunk.pos.voxelSize > 2 and (x & chunk.pos.voxelSize-1 != 0 or y & chunk.pos.voxelSize-1 != 0)) return;
pub fn generate(self: *SimpleVegetation, x: i32, y: i32, z: i32, chunk: *main.chunk.ServerChunk, caveMap: terrain.CaveMap.CaveMapView, seed: *u64) void {
if(chunk.super.pos.voxelSize > 2 and (x & chunk.super.pos.voxelSize-1 != 0 or y & chunk.super.pos.voxelSize-1 != 0)) return;
if(chunk.liesInChunk(x, y, z)) {
const height = self.height0 + random.nextIntBounded(u31, seed, self.deltaHeight+1);
if(z + height >= caveMap.findTerrainChangeAbove(x, y, z)) return; // Space is too small.
var pz: i32 = chunk.startIndex(z);
while(pz < z + height) : (pz += chunk.pos.voxelSize) {
while(pz < z + height) : (pz += chunk.super.pos.voxelSize) {
if(chunk.liesInChunk(x, y, pz)) {
chunk.updateBlockIfDegradable(x, y, pz, .{.typ = self.blockType, .data = 0}); // TODO: Natural standard.
}

View File

@ -22,7 +22,7 @@ pub const CaveMap = @import("CaveMap.zig");
pub const BlockGenerator = struct {
init: *const fn(parameters: JsonElement) void,
deinit: *const fn() void,
generate: *const fn(seed: u64, chunk: *main.chunk.Chunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void,
generate: *const fn(seed: u64, chunk: *main.chunk.ServerChunk, caveMap: CaveMap.CaveMapView, biomeMap: CaveBiomeMap.CaveBiomeMapView) void,
/// Used to prioritize certain generators over others.
priority: i32,
/// To avoid duplicate seeds in similar generation algorithms, the SurfaceGenerator xors the world-seed with the generator specific seed.

View File

@ -5,7 +5,7 @@ const Block = main.blocks.Block;
const Cache = main.utils.Cache;
const chunk = main.chunk;
const ChunkPosition = chunk.ChunkPosition;
const Chunk = chunk.Chunk;
const ServerChunk = chunk.ServerChunk;
const files = main.files;
const utils = main.utils;
const ItemDropManager = main.itemdrop.ItemDropManager;
@ -29,7 +29,7 @@ const ChunkManager = struct {
// There will be at most 1 GiB of chunks in here. TODO: Allow configuring this in the server settings.
const reducedChunkCacheMask = 2047;
var chunkCache: Cache(Chunk, reducedChunkCacheMask+1, 4, chunkDeinitFunctionForCache) = .{};
var chunkCache: Cache(ServerChunk, reducedChunkCacheMask+1, 4, chunkDeinitFunctionForCache) = .{};
const ChunkLoadTask = struct {
pos: ChunkPosition,
@ -191,24 +191,27 @@ const ChunkManager = struct {
}
}
fn chunkInitFunctionForCacheAndIncreaseRefCount(pos: ChunkPosition) *Chunk {
fn chunkInitFunctionForCacheAndIncreaseRefCount(pos: ChunkPosition) *ServerChunk {
const regionSize = pos.voxelSize*chunk.chunkSize*storage.RegionFile.regionSize;
const regionMask: i32 = regionSize - 1;
const region = storage.loadRegionFileAndIncreaseRefCount(pos.wx & ~regionMask, pos.wy & ~regionMask, pos.wz & ~regionMask, pos.voxelSize);
defer region.decreaseRefCount();
const ch = ServerChunk.initAndIncreaseRefCount(pos);
ch.mutex.lock();
defer ch.mutex.unlock();
if(region.getChunk(
main.stackAllocator,
@as(usize, @intCast(pos.wx -% region.pos.wx))/pos.voxelSize/chunk.chunkSize,
@as(usize, @intCast(pos.wy -% region.pos.wy))/pos.voxelSize/chunk.chunkSize,
@as(usize, @intCast(pos.wz -% region.pos.wz))/pos.voxelSize/chunk.chunkSize,
)) |ch| blk: { // Load chunk from file:
defer main.stackAllocator.free(ch);
return storage.ChunkCompression.decompressChunkAndIncreaseRefCount(ch) catch {
)) |data| blk: { // Load chunk from file:
defer main.stackAllocator.free(data);
storage.ChunkCompression.decompressChunk(&ch.super, data) catch {
std.log.err("Storage for chunk {} in region file at {} is corrupted", .{pos, region.pos});
break :blk;
};
return ch;
}
const ch = Chunk.initAndIncreaseRefCount(pos);
ch.generated = true;
const caveMap = terrain.CaveMap.CaveMapView.init(ch);
defer caveMap.deinit();
@ -217,24 +220,25 @@ const ChunkManager = struct {
for(server.world.?.chunkManager.terrainGenerationProfile.generators) |generator| {
generator.generate(server.world.?.seed ^ generator.generatorSeed, ch, caveMap, biomeMap);
}
ch.setChanged();
return ch;
}
fn chunkDeinitFunctionForCache(ch: *Chunk) void {
fn chunkDeinitFunctionForCache(ch: *ServerChunk) void {
ch.decreaseRefCount();
}
/// Generates a normal chunk at a given location, or if possible gets it from the cache.
pub fn getOrGenerateChunkAndIncreaseRefCount(pos: ChunkPosition) *Chunk { // TODO: This is not thread safe! The chunk could get removed from the cache while in use. Reference counting should probably be used here.
pub fn getOrGenerateChunkAndIncreaseRefCount(pos: ChunkPosition) *ServerChunk {
const mask = pos.voxelSize*chunk.chunkSize - 1;
std.debug.assert(pos.wx & mask == 0 and pos.wy & mask == 0 and pos.wz & mask == 0);
const result = chunkCache.findOrCreate(pos, chunkInitFunctionForCacheAndIncreaseRefCount, Chunk.increaseRefCount);
const result = chunkCache.findOrCreate(pos, chunkInitFunctionForCacheAndIncreaseRefCount, ServerChunk.increaseRefCount);
return result;
}
pub fn getChunkFromCacheAndIncreaseRefCount(pos: ChunkPosition) ?*Chunk {
pub fn getChunkFromCacheAndIncreaseRefCount(pos: ChunkPosition) ?*ServerChunk {
const mask = pos.voxelSize*chunk.chunkSize - 1;
std.debug.assert(pos.wx & mask == 0 and pos.wy & mask == 0 and pos.wz & mask == 0);
const result = chunkCache.find(pos, Chunk.increaseRefCount) orelse return null;
const result = chunkCache.find(pos, ServerChunk.increaseRefCount) orelse return null;
return result;
}
};
@ -329,7 +333,7 @@ pub const ServerWorld = struct {
regionUpdateQueue: main.utils.CircularBufferQueue(RegionUpdateRequest),
const ChunkUpdateRequest = struct {
ch: *Chunk,
ch: *ServerChunk,
milliTimeStamp: i64,
};
@ -497,12 +501,18 @@ pub const ServerWorld = struct {
// Stores at least one chunk and one region per iteration.
// All chunks and regions will be stored within the storage time.
const insertionTime = newTime -% main.settings.storageTime;
self.mutex.lock();
defer self.mutex.unlock();
while(self.chunkUpdateQueue.dequeue()) |updateRequest| {
self.mutex.unlock();
defer self.mutex.lock();
updateRequest.ch.save(self);
updateRequest.ch.decreaseRefCount();
if(updateRequest.milliTimeStamp -% insertionTime <= 0) break;
}
while(self.regionUpdateQueue.dequeue()) |updateRequest| {
self.mutex.unlock();
defer self.mutex.lock();
updateRequest.region.store();
updateRequest.region.decreaseRefCount();
if(updateRequest.milliTimeStamp -% insertionTime <= 0) break;
@ -523,7 +533,7 @@ pub const ServerWorld = struct {
self.chunkManager.queueLightMap(pos, source);
}
pub fn getChunk(self: *ServerWorld, x: i32, y: i32, z: i32) ?*Chunk {
pub fn getChunk(self: *ServerWorld, x: i32, y: i32, z: i32) ?*ServerChunk {
_ = self;
_ = x;
_ = y;
@ -532,7 +542,7 @@ pub const ServerWorld = struct {
return null;
}
pub fn getOrGenerateChunkAndIncreaseRefCount(_: *ServerWorld, pos: chunk.ChunkPosition) *Chunk {
pub fn getOrGenerateChunkAndIncreaseRefCount(_: *ServerWorld, pos: chunk.ChunkPosition) *ServerChunk {
return ChunkManager.getOrGenerateChunkAndIncreaseRefCount(pos);
}
@ -563,9 +573,9 @@ pub const ServerWorld = struct {
var ch = baseChunk;
if(nx & chunk.chunkMask != nx or ny & chunk.chunkMask != ny or nz & chunk.chunkMask != nz) {
ch = ChunkManager.getOrGenerateChunkAndIncreaseRefCount(.{
.wx = baseChunk.pos.wx + nx & ~@as(i32, chunk.chunkMask),
.wy = baseChunk.pos.wy + ny & ~@as(i32, chunk.chunkMask),
.wz = baseChunk.pos.wz + nz & ~@as(i32, chunk.chunkMask),
.wx = baseChunk.super.pos.wx + nx & ~@as(i32, chunk.chunkMask),
.wy = baseChunk.super.pos.wy + ny & ~@as(i32, chunk.chunkMask),
.wz = baseChunk.super.pos.wz + nz & ~@as(i32, chunk.chunkMask),
.voxelSize = 1,
});
}
@ -589,7 +599,7 @@ pub const ServerWorld = struct {
baseChunk.updateBlockAndSetChanged(x, y, z, newBlock);
}
pub fn queueChunkUpdateAndDecreaseRefCount(self: *ServerWorld, ch: *Chunk) void {
pub fn queueChunkUpdateAndDecreaseRefCount(self: *ServerWorld, ch: *ServerChunk) void {
self.mutex.lock();
self.chunkUpdateQueue.enqueue(.{.ch = ch, .milliTimeStamp = std.time.milliTimestamp()});
self.mutex.unlock();

View File

@ -1599,14 +1599,14 @@ pub const TimeDifference = struct {
}
};
pub fn assertLocked(mutex: *std.Thread.Mutex) void {
pub fn assertLocked(mutex: *const std.Thread.Mutex) void {
if(builtin.mode == .Debug) {
std.debug.assert(!mutex.tryLock());
std.debug.assert(!@constCast(mutex).tryLock());
}
}
pub fn assertLockedShared(lock: *std.Thread.RwLock) void {
pub fn assertLockedShared(lock: *const std.Thread.RwLock) void {
if(builtin.mode == .Debug) {
std.debug.assert(!lock.tryLock());
std.debug.assert(!@constCast(lock).tryLock());
}
}