mirror of
https://github.com/PixelGuys/Cubyz.git
synced 2025-08-03 11:17:05 -04:00
Store all chunks when the world is closed and generate lods for the chunks that need to be stored.
Closes #80
This commit is contained in:
parent
73b8229b24
commit
628404393c
@ -202,6 +202,9 @@ pub const Chunk = struct {
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Chunk) void {
|
||||
if(self.wasChanged) {
|
||||
self.save(main.server.world.?);
|
||||
}
|
||||
self.data.deinit();
|
||||
memoryPoolMutex.lock();
|
||||
memoryPool.destroy(@alignCast(self));
|
||||
@ -278,10 +281,13 @@ pub const Chunk = struct {
|
||||
return self.data.getValue(index);
|
||||
}
|
||||
|
||||
pub fn updateFromLowerResolution(self: *Chunk, other: *const Chunk) void {
|
||||
pub fn updateFromLowerResolution(self: *Chunk, other: *Chunk) void {
|
||||
const xOffset = if(other.pos.wx != self.pos.wx) chunkSize/2 else 0; // Offsets of the lower resolution chunk in this chunk.
|
||||
const yOffset = if(other.pos.wy != self.pos.wy) chunkSize/2 else 0;
|
||||
const zOffset = if(other.pos.wz != self.pos.wz) chunkSize/2 else 0;
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
main.utils.assertLocked(&other.mutex);
|
||||
|
||||
var x: u31 = 0;
|
||||
while(x < chunkSize/2): (x += 1) {
|
||||
@ -302,7 +308,10 @@ pub const Chunk = struct {
|
||||
const index = getIndex(x*2 + dx, y*2 + dy, z*2 + dz);
|
||||
const i = dx*4 + dz*2 + dy;
|
||||
octantBlocks[i] = other.data.getValue(index);
|
||||
if(octantBlocks[i].typ == 0) continue; // I don't care about air blocks.
|
||||
if(octantBlocks[i].typ == 0) {
|
||||
neighborCount[i] = 0;
|
||||
continue; // I don't care about air blocks.
|
||||
}
|
||||
|
||||
var count: u31 = 0;
|
||||
for(Neighbors.iterable) |n| {
|
||||
|
@ -126,18 +126,18 @@ pub const World = struct {
|
||||
|
||||
pub fn deinit(self: *World) void {
|
||||
// TODO: Close all world related guis.
|
||||
main.threadPool.clear();
|
||||
self.conn.deinit();
|
||||
self.itemDrops.deinit();
|
||||
self.blockPalette.deinit();
|
||||
Player.inventory__SEND_CHANGES_TO_SERVER.deinit(main.globalAllocator);
|
||||
self.manager.deinit();
|
||||
assets.unloadAssets();
|
||||
main.server.stop();
|
||||
main.threadPool.clear();
|
||||
if(main.server.thread) |serverThread| {
|
||||
serverThread.join();
|
||||
main.server.thread = null;
|
||||
}
|
||||
assets.unloadAssets();
|
||||
}
|
||||
|
||||
pub fn finishHandshake(self: *World, json: JsonElement) !void {
|
||||
|
@ -440,9 +440,6 @@ pub fn main() void {
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that threadPool is done before freeing any data
|
||||
threadPool.clear();
|
||||
|
||||
if(game.world) |world| {
|
||||
world.deinit();
|
||||
game.world = null;
|
||||
|
@ -817,12 +817,10 @@ pub const Protocols = struct {
|
||||
const y = std.mem.readInt(i32, data[4..8], .big);
|
||||
const z = std.mem.readInt(i32, data[8..12], .big);
|
||||
const newBlock = Block.fromInt(std.mem.readInt(u32, data[12..16], .big));
|
||||
if(conn.user != null) {
|
||||
// TODO: Store changes in batches to reduce cost of singe block updates.
|
||||
if(conn.user != null) { // TODO: Send update event to other players.
|
||||
const mask = ~@as(i32, chunk.chunkMask);
|
||||
const ch = main.server.world.?.getOrGenerateChunk(.{.wx = x & mask, .wy = y & mask, .wz = z & mask, .voxelSize = 1});
|
||||
ch.updateBlockAndSetChanged(x & chunk.chunkMask, y & chunk.chunkMask, z & chunk.chunkMask, newBlock);
|
||||
ch.save(main.server.world.?);
|
||||
} else {
|
||||
renderer.mesh_storage.updateBlock(x, y, z, newBlock);
|
||||
}
|
||||
|
@ -158,8 +158,9 @@ const ChunkManager = struct {
|
||||
}
|
||||
|
||||
pub fn deinit(self: ChunkManager) void {
|
||||
// TODO: Save chunks
|
||||
chunkCache.clear();
|
||||
for(0..main.settings.highestLOD) |_| {
|
||||
chunkCache.clear();
|
||||
}
|
||||
server.terrain.deinit();
|
||||
main.assets.unloadAssets();
|
||||
self.terrainGenerationProfile.deinit();
|
||||
@ -180,7 +181,7 @@ const ChunkManager = struct {
|
||||
const ch = getOrGenerateChunk(pos);
|
||||
if(source) |_source| {
|
||||
main.network.Protocols.chunkTransmission.sendChunk(_source.conn, ch);
|
||||
} else { // TODO: This feature was temporarily removed to keep compatibility with the zig version.
|
||||
} else {
|
||||
server.mutex.lock();
|
||||
defer server.mutex.unlock();
|
||||
for(server.users.items) |user| {
|
||||
@ -220,7 +221,6 @@ const ChunkManager = struct {
|
||||
|
||||
fn chunkDeinitFunctionForCache(ch: *Chunk) void {
|
||||
ch.deinit();
|
||||
// TODO: Store chunk.
|
||||
}
|
||||
/// Generates a normal chunk at a given location, or if possible gets it from the cache.
|
||||
pub fn getOrGenerateChunk(pos: ChunkPosition) *Chunk { // TODO: This is not thread safe! The chunk could get removed from the cache while in use. Reference counting should probably be used here.
|
||||
|
Loading…
x
Reference in New Issue
Block a user