mirror of
https://github.com/PixelGuys/Cubyz.git
synced 2025-08-03 19:28:49 -04:00
Use lossy compression on LOD chunks when sending them over the network.
The lossy compression just simply replaces all interior blocks. It should not make any difference visually. This halves the amount of data that is sent when first joining. This makes #748 less important fixes #745
This commit is contained in:
parent
da25cbc19c
commit
b06b4e7224
@ -535,7 +535,7 @@ pub const ServerChunk = struct { // MARK: ServerChunk
|
||||
const regionMask: i32 = regionSize - 1;
|
||||
const region = main.server.storage.loadRegionFileAndIncreaseRefCount(pos.wx & ~regionMask, pos.wy & ~regionMask, pos.wz & ~regionMask, pos.voxelSize);
|
||||
defer region.decreaseRefCount();
|
||||
const data = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, &self.super);
|
||||
const data = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, &self.super, false);
|
||||
defer main.stackAllocator.free(data);
|
||||
region.storeChunk(
|
||||
data,
|
||||
|
@ -793,7 +793,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
fn sendChunkOverTheNetwork(conn: *Connection, ch: *chunk.ServerChunk) void {
|
||||
ch.mutex.lock();
|
||||
const chunkData = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, &ch.super);
|
||||
const chunkData = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, &ch.super, ch.super.pos.voxelSize != 1);
|
||||
ch.mutex.unlock();
|
||||
defer main.stackAllocator.free(chunkData);
|
||||
const data = main.stackAllocator.alloc(u8, chunkData.len + 16);
|
||||
|
@ -257,7 +257,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
deflate_with_8bit_palette = 3,
|
||||
_,
|
||||
};
|
||||
pub fn compressChunk(allocator: main.utils.NeverFailingAllocator, ch: *chunk.Chunk) []const u8 {
|
||||
pub fn compressChunk(allocator: main.utils.NeverFailingAllocator, ch: *chunk.Chunk, allowLossy: bool) []const u8 {
|
||||
if(ch.data.paletteLength == 1) {
|
||||
const data = allocator.alloc(u8, 8);
|
||||
std.mem.writeInt(u32, data[0..4], @intFromEnum(CompressionAlgo.uniform), .big);
|
||||
@ -266,8 +266,32 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
}
|
||||
if(ch.data.paletteLength < 256) {
|
||||
var uncompressedData: [chunk.chunkVolume]u8 = undefined;
|
||||
var solidMask: [chunk.chunkSize*chunk.chunkSize]u32 = undefined;
|
||||
for(0..chunk.chunkVolume) |i| {
|
||||
uncompressedData[i] = @intCast(ch.data.data.getValue(i));
|
||||
if(allowLossy) {
|
||||
if(ch.data.palette[uncompressedData[i]].solid()) {
|
||||
solidMask[i >> 5] |= @as(u32, 1) << @intCast(i & 31);
|
||||
} else {
|
||||
solidMask[i >> 5] &= ~(@as(u32, 1) << @intCast(i & 31));
|
||||
}
|
||||
}
|
||||
}
|
||||
if(allowLossy) {
|
||||
for(0..32) |x| {
|
||||
for(0..32) |y| {
|
||||
if(x == 0 or x == 31 or y == 0 or y == 31) {
|
||||
continue;
|
||||
}
|
||||
const index = x*32 + y;
|
||||
var colMask = solidMask[index] >> 1 & solidMask[index] << 1 & solidMask[index - 1] & solidMask[index + 1] & solidMask[index - 32] & solidMask[index + 32];
|
||||
while(colMask != 0) {
|
||||
const z = @ctz(colMask);
|
||||
colMask &= ~(@as(u32, 1) << @intCast(z));
|
||||
uncompressedData[index*32 + z] = uncompressedData[index*32 + z - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const compressedData = main.utils.Compression.deflate(main.stackAllocator, &uncompressedData, .default);
|
||||
defer main.stackAllocator.free(compressedData);
|
||||
|
Loading…
x
Reference in New Issue
Block a user