mirror of
https://github.com/PixelGuys/Cubyz.git
synced 2025-09-08 03:29:48 -04:00
Store all chunks when the world is closed and generate lods for the chunks that need to be stored.
Closes #80
This commit is contained in:
parent
73b8229b24
commit
628404393c
@ -202,6 +202,9 @@ pub const Chunk = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(self: *Chunk) void {
|
pub fn deinit(self: *Chunk) void {
|
||||||
|
if(self.wasChanged) {
|
||||||
|
self.save(main.server.world.?);
|
||||||
|
}
|
||||||
self.data.deinit();
|
self.data.deinit();
|
||||||
memoryPoolMutex.lock();
|
memoryPoolMutex.lock();
|
||||||
memoryPool.destroy(@alignCast(self));
|
memoryPool.destroy(@alignCast(self));
|
||||||
@ -278,10 +281,13 @@ pub const Chunk = struct {
|
|||||||
return self.data.getValue(index);
|
return self.data.getValue(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn updateFromLowerResolution(self: *Chunk, other: *const Chunk) void {
|
pub fn updateFromLowerResolution(self: *Chunk, other: *Chunk) void {
|
||||||
const xOffset = if(other.pos.wx != self.pos.wx) chunkSize/2 else 0; // Offsets of the lower resolution chunk in this chunk.
|
const xOffset = if(other.pos.wx != self.pos.wx) chunkSize/2 else 0; // Offsets of the lower resolution chunk in this chunk.
|
||||||
const yOffset = if(other.pos.wy != self.pos.wy) chunkSize/2 else 0;
|
const yOffset = if(other.pos.wy != self.pos.wy) chunkSize/2 else 0;
|
||||||
const zOffset = if(other.pos.wz != self.pos.wz) chunkSize/2 else 0;
|
const zOffset = if(other.pos.wz != self.pos.wz) chunkSize/2 else 0;
|
||||||
|
self.mutex.lock();
|
||||||
|
defer self.mutex.unlock();
|
||||||
|
main.utils.assertLocked(&other.mutex);
|
||||||
|
|
||||||
var x: u31 = 0;
|
var x: u31 = 0;
|
||||||
while(x < chunkSize/2): (x += 1) {
|
while(x < chunkSize/2): (x += 1) {
|
||||||
@ -302,7 +308,10 @@ pub const Chunk = struct {
|
|||||||
const index = getIndex(x*2 + dx, y*2 + dy, z*2 + dz);
|
const index = getIndex(x*2 + dx, y*2 + dy, z*2 + dz);
|
||||||
const i = dx*4 + dz*2 + dy;
|
const i = dx*4 + dz*2 + dy;
|
||||||
octantBlocks[i] = other.data.getValue(index);
|
octantBlocks[i] = other.data.getValue(index);
|
||||||
if(octantBlocks[i].typ == 0) continue; // I don't care about air blocks.
|
if(octantBlocks[i].typ == 0) {
|
||||||
|
neighborCount[i] = 0;
|
||||||
|
continue; // I don't care about air blocks.
|
||||||
|
}
|
||||||
|
|
||||||
var count: u31 = 0;
|
var count: u31 = 0;
|
||||||
for(Neighbors.iterable) |n| {
|
for(Neighbors.iterable) |n| {
|
||||||
|
@ -126,18 +126,18 @@ pub const World = struct {
|
|||||||
|
|
||||||
pub fn deinit(self: *World) void {
|
pub fn deinit(self: *World) void {
|
||||||
// TODO: Close all world related guis.
|
// TODO: Close all world related guis.
|
||||||
|
main.threadPool.clear();
|
||||||
self.conn.deinit();
|
self.conn.deinit();
|
||||||
self.itemDrops.deinit();
|
self.itemDrops.deinit();
|
||||||
self.blockPalette.deinit();
|
self.blockPalette.deinit();
|
||||||
Player.inventory__SEND_CHANGES_TO_SERVER.deinit(main.globalAllocator);
|
Player.inventory__SEND_CHANGES_TO_SERVER.deinit(main.globalAllocator);
|
||||||
self.manager.deinit();
|
self.manager.deinit();
|
||||||
assets.unloadAssets();
|
|
||||||
main.server.stop();
|
main.server.stop();
|
||||||
main.threadPool.clear();
|
|
||||||
if(main.server.thread) |serverThread| {
|
if(main.server.thread) |serverThread| {
|
||||||
serverThread.join();
|
serverThread.join();
|
||||||
main.server.thread = null;
|
main.server.thread = null;
|
||||||
}
|
}
|
||||||
|
assets.unloadAssets();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn finishHandshake(self: *World, json: JsonElement) !void {
|
pub fn finishHandshake(self: *World, json: JsonElement) !void {
|
||||||
|
@ -440,9 +440,6 @@ pub fn main() void {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that threadPool is done before freeing any data
|
|
||||||
threadPool.clear();
|
|
||||||
|
|
||||||
if(game.world) |world| {
|
if(game.world) |world| {
|
||||||
world.deinit();
|
world.deinit();
|
||||||
game.world = null;
|
game.world = null;
|
||||||
|
@ -817,12 +817,10 @@ pub const Protocols = struct {
|
|||||||
const y = std.mem.readInt(i32, data[4..8], .big);
|
const y = std.mem.readInt(i32, data[4..8], .big);
|
||||||
const z = std.mem.readInt(i32, data[8..12], .big);
|
const z = std.mem.readInt(i32, data[8..12], .big);
|
||||||
const newBlock = Block.fromInt(std.mem.readInt(u32, data[12..16], .big));
|
const newBlock = Block.fromInt(std.mem.readInt(u32, data[12..16], .big));
|
||||||
if(conn.user != null) {
|
if(conn.user != null) { // TODO: Send update event to other players.
|
||||||
// TODO: Store changes in batches to reduce cost of singe block updates.
|
|
||||||
const mask = ~@as(i32, chunk.chunkMask);
|
const mask = ~@as(i32, chunk.chunkMask);
|
||||||
const ch = main.server.world.?.getOrGenerateChunk(.{.wx = x & mask, .wy = y & mask, .wz = z & mask, .voxelSize = 1});
|
const ch = main.server.world.?.getOrGenerateChunk(.{.wx = x & mask, .wy = y & mask, .wz = z & mask, .voxelSize = 1});
|
||||||
ch.updateBlockAndSetChanged(x & chunk.chunkMask, y & chunk.chunkMask, z & chunk.chunkMask, newBlock);
|
ch.updateBlockAndSetChanged(x & chunk.chunkMask, y & chunk.chunkMask, z & chunk.chunkMask, newBlock);
|
||||||
ch.save(main.server.world.?);
|
|
||||||
} else {
|
} else {
|
||||||
renderer.mesh_storage.updateBlock(x, y, z, newBlock);
|
renderer.mesh_storage.updateBlock(x, y, z, newBlock);
|
||||||
}
|
}
|
||||||
|
@ -158,8 +158,9 @@ const ChunkManager = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(self: ChunkManager) void {
|
pub fn deinit(self: ChunkManager) void {
|
||||||
// TODO: Save chunks
|
for(0..main.settings.highestLOD) |_| {
|
||||||
chunkCache.clear();
|
chunkCache.clear();
|
||||||
|
}
|
||||||
server.terrain.deinit();
|
server.terrain.deinit();
|
||||||
main.assets.unloadAssets();
|
main.assets.unloadAssets();
|
||||||
self.terrainGenerationProfile.deinit();
|
self.terrainGenerationProfile.deinit();
|
||||||
@ -180,7 +181,7 @@ const ChunkManager = struct {
|
|||||||
const ch = getOrGenerateChunk(pos);
|
const ch = getOrGenerateChunk(pos);
|
||||||
if(source) |_source| {
|
if(source) |_source| {
|
||||||
main.network.Protocols.chunkTransmission.sendChunk(_source.conn, ch);
|
main.network.Protocols.chunkTransmission.sendChunk(_source.conn, ch);
|
||||||
} else { // TODO: This feature was temporarily removed to keep compatibility with the zig version.
|
} else {
|
||||||
server.mutex.lock();
|
server.mutex.lock();
|
||||||
defer server.mutex.unlock();
|
defer server.mutex.unlock();
|
||||||
for(server.users.items) |user| {
|
for(server.users.items) |user| {
|
||||||
@ -220,7 +221,6 @@ const ChunkManager = struct {
|
|||||||
|
|
||||||
fn chunkDeinitFunctionForCache(ch: *Chunk) void {
|
fn chunkDeinitFunctionForCache(ch: *Chunk) void {
|
||||||
ch.deinit();
|
ch.deinit();
|
||||||
// TODO: Store chunk.
|
|
||||||
}
|
}
|
||||||
/// Generates a normal chunk at a given location, or if possible gets it from the cache.
|
/// Generates a normal chunk at a given location, or if possible gets it from the cache.
|
||||||
pub fn getOrGenerateChunk(pos: ChunkPosition) *Chunk { // TODO: This is not thread safe! The chunk could get removed from the cache while in use. Reference counting should probably be used here.
|
pub fn getOrGenerateChunk(pos: ChunkPosition) *Chunk { // TODO: This is not thread safe! The chunk could get removed from the cache while in use. Reference counting should probably be used here.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user