mirror of
https://github.com/PixelGuys/Cubyz.git
synced 2025-08-03 03:06:55 -04:00
The great unordering, part 2
This commit is contained in:
parent
daf44c76d7
commit
b57fa6666f
@ -807,7 +807,7 @@ pub const ChunkMesh = struct { // MARK: ChunkMesh
|
||||
self.mutex.unlock();
|
||||
self.lightingData[0].propagateLights(lightEmittingBlocks.items, true, lightRefreshList);
|
||||
sunLight: {
|
||||
var allSun: bool = self.chunk.data.palette().len == 1 and self.chunk.data.palette()[0].typ == 0;
|
||||
var allSun: bool = self.chunk.data.palette().len == 1 and self.chunk.data.palette()[0].load(.unordered).typ == 0;
|
||||
var sunStarters: [chunk.chunkSize*chunk.chunkSize][3]u8 = undefined;
|
||||
var index: usize = 0;
|
||||
const lightStartMap = mesh_storage.getLightMapPiece(self.pos.wx, self.pos.wy, self.pos.voxelSize) orelse break :sunLight;
|
||||
@ -918,7 +918,7 @@ pub const ChunkMesh = struct { // MARK: ChunkMesh
|
||||
var paletteCache = main.stackAllocator.alloc(OcclusionInfo, self.chunk.data.palette().len);
|
||||
defer main.stackAllocator.free(paletteCache);
|
||||
for(0..self.chunk.data.palette().len) |i| {
|
||||
const block = self.chunk.data.palette()[i];
|
||||
const block = self.chunk.data.palette()[i].load(.unordered);
|
||||
const model = blocks.meshes.model(block).model();
|
||||
var result: OcclusionInfo = .{};
|
||||
if(model.noNeighborsOccluded or block.viewThrough()) {
|
||||
@ -1002,7 +1002,7 @@ pub const ChunkMesh = struct { // MARK: ChunkMesh
|
||||
hasFaces[x][y] |= setBit;
|
||||
}
|
||||
if(occlusionInfo.hasInternalQuads) {
|
||||
const block = self.chunk.data.palette()[paletteId];
|
||||
const block = self.chunk.data.palette()[paletteId].load(.unordered);
|
||||
if(block.transparent()) {
|
||||
appendInternalQuads(block, x, y, z, false, &transparentCore, main.stackAllocator);
|
||||
} else {
|
||||
|
@ -17,6 +17,21 @@ pub fn deinit() void {
|
||||
memoryPool.deinit();
|
||||
}
|
||||
|
||||
const LightValue = packed struct(u32) {
|
||||
r: u8,
|
||||
g: u8,
|
||||
b: u8,
|
||||
pad: u8 = undefined,
|
||||
|
||||
fn fromArray(arr: [3]u8) LightValue {
|
||||
return .{.r = arr[0], .g = arr[1], .b = arr[2]};
|
||||
}
|
||||
|
||||
fn toArray(self: LightValue) [3]u8 {
|
||||
return .{self.r, self.g, self.b};
|
||||
}
|
||||
};
|
||||
|
||||
fn extractColor(in: u32) [3]u8 {
|
||||
return .{
|
||||
@truncate(in >> 16),
|
||||
@ -26,7 +41,7 @@ fn extractColor(in: u32) [3]u8 {
|
||||
}
|
||||
|
||||
pub const ChannelChunk = struct {
|
||||
data: main.utils.PaletteCompressedRegion([3]u8, chunk.chunkVolume),
|
||||
data: main.utils.PaletteCompressedRegion(LightValue, chunk.chunkVolume),
|
||||
lock: main.utils.ReadWriteLock,
|
||||
ch: *chunk.Chunk,
|
||||
isSun: bool,
|
||||
@ -68,7 +83,7 @@ pub const ChannelChunk = struct {
|
||||
pub fn getValue(self: *ChannelChunk, x: i32, y: i32, z: i32) [3]u8 {
|
||||
self.lock.assertLockedRead();
|
||||
const index = chunk.getIndex(x, y, z);
|
||||
return self.data.getValue(index);
|
||||
return self.data.getValue(index).toArray();
|
||||
}
|
||||
|
||||
fn calculateIncomingOcclusion(result: *[3]u8, block: blocks.Block, voxelSize: u31, neighbor: chunk.Neighbor) void {
|
||||
@ -109,14 +124,14 @@ pub const ChannelChunk = struct {
|
||||
self.lock.lockWrite();
|
||||
while(lightQueue.popFront()) |entry| {
|
||||
const index = chunk.getIndex(entry.x, entry.y, entry.z);
|
||||
const oldValue: [3]u8 = self.data.getValue(index);
|
||||
const oldValue: [3]u8 = self.data.getValue(index).toArray();
|
||||
const newValue: [3]u8 = .{
|
||||
@max(entry.value[0], oldValue[0]),
|
||||
@max(entry.value[1], oldValue[1]),
|
||||
@max(entry.value[2], oldValue[2]),
|
||||
};
|
||||
if(newValue[0] == oldValue[0] and newValue[1] == oldValue[1] and newValue[2] == oldValue[2]) continue;
|
||||
self.data.setValue(index, newValue);
|
||||
self.data.setValue(index, .fromArray(newValue));
|
||||
for(chunk.Neighbor.iterable) |neighbor| {
|
||||
if(neighbor.toInt() == entry.sourceDir) continue;
|
||||
const nx = entry.x + neighbor.relX();
|
||||
@ -175,7 +190,7 @@ pub const ChannelChunk = struct {
|
||||
self.lock.lockWrite();
|
||||
while(lightQueue.popFront()) |entry| {
|
||||
const index = chunk.getIndex(entry.x, entry.y, entry.z);
|
||||
const oldValue: [3]u8 = self.data.getValue(index);
|
||||
const oldValue: [3]u8 = self.data.getValue(index).toArray();
|
||||
var activeValue: @Vector(3, bool) = @bitCast(entry.activeValue);
|
||||
var append: bool = false;
|
||||
if(activeValue[0] and entry.value[0] != oldValue[0]) {
|
||||
@ -209,7 +224,7 @@ pub const ChannelChunk = struct {
|
||||
if(activeValue[0]) insertValue[0] = 0;
|
||||
if(activeValue[1]) insertValue[1] = 0;
|
||||
if(activeValue[2]) insertValue[2] = 0;
|
||||
self.data.setValue(index, insertValue);
|
||||
self.data.setValue(index, .fromArray(insertValue));
|
||||
for(chunk.Neighbor.iterable) |neighbor| {
|
||||
if(neighbor.toInt() == entry.sourceDir) continue;
|
||||
const nx = entry.x + neighbor.relX();
|
||||
@ -311,7 +326,7 @@ pub const ChannelChunk = struct {
|
||||
defer neighborLightChunk.lock.unlockRead();
|
||||
const index = chunk.getIndex(x, y, z);
|
||||
const neighborIndex = chunk.getIndex(otherX, otherY, otherZ);
|
||||
var value: [3]u8 = neighborLightChunk.data.getValue(neighborIndex);
|
||||
var value: [3]u8 = neighborLightChunk.data.getValue(neighborIndex).toArray();
|
||||
if(!self.isSun or neighbor != .dirUp or value[0] != 255 or value[1] != 255 or value[2] != 255) {
|
||||
value[0] -|= 8*|@as(u8, @intCast(self.ch.pos.voxelSize));
|
||||
value[1] -|= 8*|@as(u8, @intCast(self.ch.pos.voxelSize));
|
||||
@ -335,7 +350,7 @@ pub const ChannelChunk = struct {
|
||||
self.data.deferredDeinit();
|
||||
self.data.init();
|
||||
}
|
||||
self.data.palette()[0] = .{255, 255, 255};
|
||||
self.data.palette()[0].store(.fromArray(.{255, 255, 255}), .unordered);
|
||||
self.lock.unlockWrite();
|
||||
const val = 255 -| 8*|@as(u8, @intCast(self.ch.pos.voxelSize));
|
||||
var lightQueue = main.utils.CircularBufferQueue(Entry).init(main.stackAllocator, 1 << 12);
|
||||
@ -381,7 +396,7 @@ pub const ChannelChunk = struct {
|
||||
self.lock.lockRead();
|
||||
for(lights) |pos| {
|
||||
const index = chunk.getIndex(pos[0], pos[1], pos[2]);
|
||||
lightQueue.pushBack(.{.x = @intCast(pos[0]), .y = @intCast(pos[1]), .z = @intCast(pos[2]), .value = self.data.getValue(index), .sourceDir = 6, .activeValue = 0b111});
|
||||
lightQueue.pushBack(.{.x = @intCast(pos[0]), .y = @intCast(pos[1]), .z = @intCast(pos[2]), .value = self.data.getValue(index).toArray(), .sourceDir = 6, .activeValue = 0b111});
|
||||
}
|
||||
self.lock.unlockRead();
|
||||
var constructiveEntries: main.ListUnmanaged(ChunkEntries) = .{};
|
||||
@ -398,7 +413,7 @@ pub const ChannelChunk = struct {
|
||||
channelChunk.lock.lockWrite();
|
||||
for(entryList.items) |entry| {
|
||||
const index = chunk.getIndex(entry.x, entry.y, entry.z);
|
||||
var value = channelChunk.data.getValue(index);
|
||||
var value = channelChunk.data.getValue(index).toArray();
|
||||
const light = if(self.isSun) .{0, 0, 0} else extractColor(channelChunk.ch.data.getValue(index).light());
|
||||
value = .{
|
||||
@max(value[0], light[0]),
|
||||
@ -406,7 +421,7 @@ pub const ChannelChunk = struct {
|
||||
@max(value[2], light[2]),
|
||||
};
|
||||
if(value[0] == 0 and value[1] == 0 and value[2] == 0) continue;
|
||||
channelChunk.data.setValue(index, .{0, 0, 0});
|
||||
channelChunk.data.setValue(index, .fromArray(.{0, 0, 0}));
|
||||
lightQueue.pushBack(.{.x = entry.x, .y = entry.y, .z = entry.z, .value = value, .sourceDir = 6, .activeValue = 0b111});
|
||||
}
|
||||
channelChunk.lock.unlockWrite();
|
||||
|
@ -284,7 +284,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
fn compressBlockData(ch: *chunk.Chunk, allowLossy: bool, writer: *BinaryWriter) void {
|
||||
if(ch.data.palette().len == 1) {
|
||||
writer.writeEnum(ChunkCompressionAlgo, .uniform);
|
||||
writer.writeInt(u32, ch.data.palette()[0].toInt());
|
||||
writer.writeInt(u32, ch.data.palette()[0].load(.unordered).toInt());
|
||||
return;
|
||||
}
|
||||
if(ch.data.palette().len < 256) {
|
||||
@ -293,7 +293,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
for(0..chunk.chunkVolume) |i| {
|
||||
uncompressedData[i] = @intCast(ch.data.impl.raw.data.getValue(i));
|
||||
if(allowLossy) {
|
||||
const block = ch.data.palette()[uncompressedData[i]];
|
||||
const block = ch.data.palette()[uncompressedData[i]].load(.unordered);
|
||||
const model = main.blocks.meshes.model(block).model();
|
||||
const occluder = model.allNeighborsOccluded and !block.viewThrough();
|
||||
if(occluder) {
|
||||
@ -326,7 +326,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
writer.writeInt(u8, @intCast(ch.data.palette().len));
|
||||
|
||||
for(0..ch.data.palette().len) |i| {
|
||||
writer.writeInt(u32, ch.data.palette()[i].toInt());
|
||||
writer.writeInt(u32, ch.data.palette()[i].load(.unordered).toInt());
|
||||
}
|
||||
writer.writeVarInt(usize, compressedData.len);
|
||||
writer.writeSlice(compressedData);
|
||||
@ -375,7 +375,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
ch.data.initCapacity(paletteLength);
|
||||
|
||||
for(0..paletteLength) |i| {
|
||||
ch.data.palette()[i] = main.blocks.Block.fromInt(try reader.readInt(u32));
|
||||
ch.data.palette()[i] = .init(main.blocks.Block.fromInt(try reader.readInt(u32)));
|
||||
}
|
||||
|
||||
const decompressedData = main.stackAllocator.alloc(u8, chunk.chunkVolume);
|
||||
@ -392,7 +392,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
}
|
||||
},
|
||||
.uniform => {
|
||||
ch.data.palette()[0] = main.blocks.Block.fromInt(try reader.readInt(u32));
|
||||
ch.data.palette()[0] = .init(main.blocks.Block.fromInt(try reader.readInt(u32)));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -47,13 +47,13 @@ pub fn generate(worldSeed: u64, chunk: *main.chunk.ServerChunk, caveMap: CaveMap
|
||||
if(minHeight > chunk.super.pos.wz +| chunk.super.width) {
|
||||
chunk.super.data.deferredDeinit();
|
||||
chunk.super.data.init();
|
||||
chunk.super.data.palette()[0] = stone;
|
||||
chunk.super.data.palette()[0] = .init(stone);
|
||||
return;
|
||||
}
|
||||
if(maxHeight < chunk.super.pos.wz) {
|
||||
chunk.super.data.deferredDeinit();
|
||||
chunk.super.data.init();
|
||||
chunk.super.data.palette()[0] = air;
|
||||
chunk.super.data.palette()[0] = .init(air);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ const ChunkManager = struct { // MARK: ChunkManager
|
||||
}
|
||||
if(pos.voxelSize != 1) { // Generate LOD replacements
|
||||
for(ch.super.data.palette()) |*block| {
|
||||
block.typ = block.lodReplacement();
|
||||
block.store(.{.typ = block.load(.unordered).lodReplacement(), .data = block.load(.unordered).data}, .unordered);
|
||||
}
|
||||
}
|
||||
return ch;
|
||||
|
@ -1077,7 +1077,7 @@ pub fn DynamicPackedIntArray(size: comptime_int) type { // MARK: DynamicPackedIn
|
||||
pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: PaletteCompressedRegion
|
||||
const Impl = struct {
|
||||
data: DynamicPackedIntArray(size) = .{},
|
||||
palette: []T,
|
||||
palette: []Atomic(T),
|
||||
paletteOccupancy: []u32,
|
||||
paletteLength: u32,
|
||||
activePaletteEntries: u32,
|
||||
@ -1092,12 +1092,12 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
||||
.impl = .init(impl),
|
||||
};
|
||||
impl.* = .{
|
||||
.palette = main.globalAllocator.alloc(T, 1),
|
||||
.palette = main.globalAllocator.alloc(Atomic(T), 1),
|
||||
.paletteOccupancy = main.globalAllocator.alloc(u32, 1),
|
||||
.paletteLength = 1,
|
||||
.activePaletteEntries = 1,
|
||||
};
|
||||
impl.palette[0] = std.mem.zeroes(T);
|
||||
impl.palette[0] = .init(std.mem.zeroes(T));
|
||||
impl.paletteOccupancy[0] = size;
|
||||
}
|
||||
|
||||
@ -1111,7 +1111,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
||||
};
|
||||
impl.* = .{
|
||||
.data = dataDupe,
|
||||
.palette = main.globalAllocator.dupe(T, templateImpl.palette),
|
||||
.palette = main.globalAllocator.dupe(Atomic(T), templateImpl.palette),
|
||||
.paletteOccupancy = main.globalAllocator.dupe(u32, templateImpl.paletteOccupancy),
|
||||
.paletteLength = templateImpl.paletteLength,
|
||||
.activePaletteEntries = templateImpl.activePaletteEntries,
|
||||
@ -1128,12 +1128,12 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
||||
};
|
||||
impl.* = .{
|
||||
.data = DynamicPackedIntArray(size).initCapacity(bitSize),
|
||||
.palette = main.globalAllocator.alloc(T, bufferLength),
|
||||
.palette = main.globalAllocator.alloc(Atomic(T), bufferLength),
|
||||
.paletteOccupancy = main.globalAllocator.alloc(u32, bufferLength),
|
||||
.paletteLength = paletteLength,
|
||||
.activePaletteEntries = 1,
|
||||
};
|
||||
impl.palette[0] = std.mem.zeroes(T);
|
||||
impl.palette[0] = .init(std.mem.zeroes(T));
|
||||
impl.paletteOccupancy[0] = size;
|
||||
@memset(impl.paletteOccupancy[1..], 0);
|
||||
@memset(impl.data.data, .init(0));
|
||||
@ -1159,10 +1159,10 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
||||
|
||||
pub fn getValue(self: *const Self, i: usize) T {
|
||||
const impl = self.impl.load(.acquire);
|
||||
return impl.palette[impl.data.getValue(i)];
|
||||
return impl.palette[impl.data.getValue(i)].load(.unordered);
|
||||
}
|
||||
|
||||
pub fn palette(self: *const Self) []T {
|
||||
pub fn palette(self: *const Self) []Atomic(T) {
|
||||
const impl = self.impl.raw;
|
||||
return impl.palette[0..impl.paletteLength];
|
||||
}
|
||||
@ -1172,7 +1172,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
||||
std.debug.assert(impl.paletteLength <= impl.palette.len);
|
||||
var paletteIndex: u32 = 0;
|
||||
while(paletteIndex < impl.paletteLength) : (paletteIndex += 1) { // TODO: There got to be a faster way to do this. Either using SIMD or using a cache or hashmap.
|
||||
if(std.meta.eql(impl.palette[paletteIndex], val)) {
|
||||
if(std.meta.eql(impl.palette[paletteIndex].load(.unordered), val)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1192,7 +1192,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
||||
newSelf.deferredDeinit();
|
||||
impl = newImpl;
|
||||
}
|
||||
impl.palette[paletteIndex] = val;
|
||||
impl.palette[paletteIndex].store(val, .unordered);
|
||||
impl.paletteLength += 1;
|
||||
std.debug.assert(impl.paletteLength <= impl.palette.len);
|
||||
}
|
||||
@ -1270,7 +1270,7 @@ pub fn PaletteCompressedRegion(T: type, size: comptime_int) type { // MARK: Pale
|
||||
std.debug.assert(iNew < impl.activePaletteEntries);
|
||||
std.debug.assert(iOld < impl.paletteLength);
|
||||
paletteMap[iOld] = iNew;
|
||||
newImpl.palette[iNew] = impl.palette[iOld];
|
||||
newImpl.palette[iNew] = .init(impl.palette[iOld].load(.unordered));
|
||||
newImpl.paletteOccupancy[iNew] = impl.paletteOccupancy[iOld];
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user