mirror of
https://github.com/PixelGuys/Cubyz.git
synced 2025-08-03 11:17:05 -04:00
Always use big endian (#1274)
* Always use big endian * Apply review change requests
This commit is contained in:
parent
db3d640c2a
commit
00c6b29736
@ -36,7 +36,7 @@ pub const Sync = struct { // MARK: Sync
|
||||
pub fn deinit() void {
|
||||
mutex.lock();
|
||||
while(commands.dequeue()) |cmd| {
|
||||
var reader = utils.BinaryReader.init(&.{}, main.network.networkEndian);
|
||||
var reader = utils.BinaryReader.init(&.{});
|
||||
cmd.finalize(main.globalAllocator, .client, &reader) catch |err| {
|
||||
std.log.err("Got error while cleaning remaining inventory commands: {s}", .{@errorName(err)});
|
||||
};
|
||||
@ -110,7 +110,7 @@ pub const Sync = struct { // MARK: Sync
|
||||
}
|
||||
if(tempData.popOrNull()) |_cmd| {
|
||||
var cmd = _cmd;
|
||||
var reader = utils.BinaryReader.init(&.{}, main.network.networkEndian);
|
||||
var reader = utils.BinaryReader.init(&.{});
|
||||
cmd.finalize(main.globalAllocator, .client, &reader) catch |err| {
|
||||
std.log.err("Got error while cleaning rejected inventory command: {s}", .{@errorName(err)});
|
||||
};
|
||||
@ -281,7 +281,7 @@ pub const Sync = struct { // MARK: Sync
|
||||
}
|
||||
}
|
||||
}
|
||||
var reader = utils.BinaryReader.init(&.{}, main.network.networkEndian);
|
||||
var reader = utils.BinaryReader.init(&.{});
|
||||
command.finalize(main.globalAllocator, .server, &reader) catch |err| {
|
||||
std.log.err("Got error while finalizing command on the server side: {s}", .{@errorName(err)});
|
||||
};
|
||||
@ -700,7 +700,7 @@ pub const Command = struct { // MARK: Command
|
||||
}
|
||||
|
||||
pub fn serialize(self: SyncOperation, allocator: NeverFailingAllocator) []const u8 {
|
||||
var writer = utils.BinaryWriter.initCapacity(allocator, main.network.networkEndian, 13);
|
||||
var writer = utils.BinaryWriter.initCapacity(allocator, 13);
|
||||
writer.writeEnum(SyncOperationType, self);
|
||||
switch(self) {
|
||||
.create => |create| {
|
||||
@ -740,7 +740,7 @@ pub const Command = struct { // MARK: Command
|
||||
syncOperations: main.ListUnmanaged(SyncOperation) = .{},
|
||||
|
||||
fn serializePayload(self: *Command, allocator: NeverFailingAllocator) []const u8 {
|
||||
var writer = utils.BinaryWriter.init(allocator, main.network.networkEndian);
|
||||
var writer = utils.BinaryWriter.init(allocator);
|
||||
defer writer.deinit();
|
||||
switch(self.payload) {
|
||||
inline else => |payload| {
|
||||
@ -1073,7 +1073,7 @@ pub const Command = struct { // MARK: Command
|
||||
}
|
||||
|
||||
fn confirmationData(self: Open, allocator: NeverFailingAllocator) []const u8 {
|
||||
var writer = utils.BinaryWriter.initCapacity(allocator, main.network.networkEndian, 4);
|
||||
var writer = utils.BinaryWriter.initCapacity(allocator, 4);
|
||||
writer.writeInt(u32, self.inv.id);
|
||||
return writer.data.toOwnedSlice();
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ pub const Blueprint = struct {
|
||||
}
|
||||
}
|
||||
pub fn load(allocator: NeverFailingAllocator, inputBuffer: []u8) !Blueprint {
|
||||
var compressedReader = BinaryReader.init(inputBuffer, .big);
|
||||
var compressedReader = BinaryReader.init(inputBuffer);
|
||||
const version = try compressedReader.readInt(u16);
|
||||
|
||||
if(version > blueprintVersion) {
|
||||
@ -143,7 +143,7 @@ pub const Blueprint = struct {
|
||||
|
||||
const decompressedData = try self.decompressBuffer(compressedReader.remaining, blockPaletteSizeBytes, compression);
|
||||
defer main.stackAllocator.free(decompressedData);
|
||||
var decompressedReader = BinaryReader.init(decompressedData, .big);
|
||||
var decompressedReader = BinaryReader.init(decompressedData);
|
||||
|
||||
const palette = try loadBlockPalette(main.stackAllocator, paletteBlockCount, &decompressedReader);
|
||||
defer main.stackAllocator.free(palette);
|
||||
@ -166,7 +166,7 @@ pub const Blueprint = struct {
|
||||
defer gameIdToBlueprintId.deinit();
|
||||
std.debug.assert(gameIdToBlueprintId.count() != 0);
|
||||
|
||||
var uncompressedWriter = BinaryWriter.init(main.stackAllocator, .big);
|
||||
var uncompressedWriter = BinaryWriter.init(main.stackAllocator);
|
||||
defer uncompressedWriter.deinit();
|
||||
|
||||
const blockPaletteSizeBytes = storeBlockPalette(gameIdToBlueprintId, &uncompressedWriter);
|
||||
@ -179,7 +179,7 @@ pub const Blueprint = struct {
|
||||
const compressed = self.compressOutputBuffer(main.stackAllocator, uncompressedWriter.data.items);
|
||||
defer main.stackAllocator.free(compressed.data);
|
||||
|
||||
var outputWriter = BinaryWriter.initCapacity(allocator, .big, @sizeOf(i16) + @sizeOf(BlueprintCompression) + @sizeOf(u32) + @sizeOf(u16)*4 + compressed.data.len);
|
||||
var outputWriter = BinaryWriter.initCapacity(allocator, @sizeOf(i16) + @sizeOf(BlueprintCompression) + @sizeOf(u32) + @sizeOf(u16)*4 + compressed.data.len);
|
||||
|
||||
outputWriter.writeInt(u16, blueprintVersion);
|
||||
outputWriter.writeEnum(BlueprintCompression, compressed.mode);
|
||||
|
@ -117,7 +117,7 @@ pub const ItemDropManager = struct { // MARK: ItemDropManager
|
||||
}
|
||||
|
||||
pub fn getPositionAndVelocityData(self: *ItemDropManager, allocator: NeverFailingAllocator) []u8 {
|
||||
var writer = utils.BinaryWriter.initCapacity(allocator, main.network.networkEndian, self.size*50);
|
||||
var writer = utils.BinaryWriter.initCapacity(allocator, self.size*50);
|
||||
for(self.indices[0..self.size]) |i| {
|
||||
writer.writeInt(u16, i);
|
||||
writer.writeFloat(f64, self.list.items(.pos)[i][0]);
|
||||
|
@ -21,8 +21,6 @@ const Vec3f = vec.Vec3f;
|
||||
const Vec3i = vec.Vec3i;
|
||||
const NeverFailingAllocator = main.heap.NeverFailingAllocator;
|
||||
|
||||
pub const networkEndian: std.builtin.Endian = .big;
|
||||
|
||||
//TODO: Might want to use SSL or something similar to encode the message
|
||||
|
||||
const Socket = struct {
|
||||
@ -769,7 +767,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
pub fn sendRequest(conn: *Connection, requests: []chunk.ChunkPosition, basePosition: Vec3i, renderDistance: u16) void {
|
||||
if(requests.len == 0) return;
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 14 + 4*requests.len);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 14 + 4*requests.len);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(i32, basePosition[0]);
|
||||
writer.writeInt(i32, basePosition[1]);
|
||||
@ -805,7 +803,7 @@ pub const Protocols = struct {
|
||||
const chunkData = main.server.storage.ChunkCompression.compressChunk(main.stackAllocator, &ch.super, ch.super.pos.voxelSize != 1);
|
||||
ch.mutex.unlock();
|
||||
defer main.stackAllocator.free(chunkData);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, chunkData.len + 16);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, chunkData.len + 16);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(i32, ch.super.pos.wx);
|
||||
writer.writeInt(i32, ch.super.pos.wy);
|
||||
@ -840,7 +838,7 @@ pub const Protocols = struct {
|
||||
return; // Only send at most once every 50 ms.
|
||||
}
|
||||
lastPositionSent = time;
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 62);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 62);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(u64, @bitCast(playerPos[0]));
|
||||
writer.writeInt(u64, @bitCast(playerPos[1]));
|
||||
@ -889,7 +887,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
pub fn send(conn: *Connection, entityData: []const u8, itemData: []const u8) void {
|
||||
if(entityData.len != 0) {
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, entityData.len + 3);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, entityData.len + 3);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(u8, type_entity);
|
||||
writer.writeInt(i16, @truncate(std.time.milliTimestamp()));
|
||||
@ -898,7 +896,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
|
||||
if(itemData.len != 0) {
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, itemData.len + 3);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, itemData.len + 3);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(u8, type_item);
|
||||
writer.writeInt(i16, @truncate(std.time.milliTimestamp()));
|
||||
@ -922,7 +920,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
}
|
||||
pub fn send(conn: *Connection, x: i32, y: i32, z: i32, newBlock: Block) void {
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 16);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 16);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(i32, x);
|
||||
writer.writeInt(i32, y);
|
||||
@ -1047,7 +1045,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
|
||||
pub fn sendTPCoordinates(conn: *Connection, pos: Vec3d) void {
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 25);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 25);
|
||||
defer writer.deinit();
|
||||
|
||||
writer.writeEnum(UpdateType, .teleport);
|
||||
@ -1057,7 +1055,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
|
||||
pub fn sendWorldEditPos(conn: *Connection, posType: WorldEditPosition, maybePos: ?Vec3i) void {
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 25);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 25);
|
||||
defer writer.deinit();
|
||||
|
||||
writer.writeEnum(UpdateType, .worldEditPos);
|
||||
@ -1070,7 +1068,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
|
||||
pub fn sendTimeAndBiome(conn: *Connection, world: *const main.server.ServerWorld) void {
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 13);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 13);
|
||||
defer writer.deinit();
|
||||
|
||||
writer.writeEnum(UpdateType, .timeAndBiome);
|
||||
@ -1124,7 +1122,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
pub fn sendRequest(conn: *Connection, requests: []main.server.terrain.SurfaceMap.MapFragmentPosition) void {
|
||||
if(requests.len == 0) return;
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 9*requests.len);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 9*requests.len);
|
||||
defer writer.deinit();
|
||||
for(requests) |req| {
|
||||
writer.writeInt(i32, req.wx);
|
||||
@ -1154,7 +1152,7 @@ pub const Protocols = struct {
|
||||
std.log.err("Transmission of light map has invalid size: {}. Input data: {any}, After inflate: {any}", .{_inflatedLen, reader.remaining, _inflatedData[0.._inflatedLen]});
|
||||
return error.Invalid;
|
||||
}
|
||||
var ligthMapReader = utils.BinaryReader.init(_inflatedData, networkEndian);
|
||||
var ligthMapReader = utils.BinaryReader.init(_inflatedData);
|
||||
const map = main.globalAllocator.create(main.server.terrain.LightMap.LightMapFragment);
|
||||
map.init(pos.wx, pos.wy, pos.voxelSize);
|
||||
_ = map.refCount.fetchAdd(1, .monotonic);
|
||||
@ -1164,14 +1162,14 @@ pub const Protocols = struct {
|
||||
renderer.mesh_storage.updateLightMap(map);
|
||||
}
|
||||
pub fn sendLightMap(conn: *Connection, map: *main.server.terrain.LightMap.LightMapFragment) void {
|
||||
var ligthMapWriter = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, @sizeOf(@TypeOf(map.startHeight)));
|
||||
var ligthMapWriter = utils.BinaryWriter.initCapacity(main.stackAllocator, @sizeOf(@TypeOf(map.startHeight)));
|
||||
defer ligthMapWriter.deinit();
|
||||
for(&map.startHeight) |val| {
|
||||
ligthMapWriter.writeInt(i16, val);
|
||||
}
|
||||
const compressedData = utils.Compression.deflate(main.stackAllocator, ligthMapWriter.data.items, .default);
|
||||
defer main.stackAllocator.free(compressedData);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, 9 + compressedData.len);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, 9 + compressedData.len);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(i32, map.pos.wx);
|
||||
writer.writeInt(i32, map.pos.wy);
|
||||
@ -1200,7 +1198,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
pub fn sendCommand(conn: *Connection, payloadType: items.Inventory.Command.PayloadType, _data: []const u8) void {
|
||||
std.debug.assert(conn.user == null);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, _data.len + 1);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, _data.len + 1);
|
||||
defer writer.deinit();
|
||||
writer.writeEnum(items.Inventory.Command.PayloadType, payloadType);
|
||||
std.debug.assert(writer.data.items[0] != 0xff);
|
||||
@ -1209,7 +1207,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
pub fn sendConfirmation(conn: *Connection, _data: []const u8) void {
|
||||
std.debug.assert(conn.user != null);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, _data.len + 1);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, _data.len + 1);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(u8, 0xff);
|
||||
writer.writeSlice(_data);
|
||||
@ -1221,7 +1219,7 @@ pub const Protocols = struct {
|
||||
}
|
||||
pub fn sendSyncOperation(conn: *Connection, _data: []const u8) void {
|
||||
std.debug.assert(conn.user != null);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, networkEndian, _data.len + 1);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, _data.len + 1);
|
||||
defer writer.deinit();
|
||||
writer.writeInt(u8, 0);
|
||||
writer.writeSlice(_data);
|
||||
@ -1389,7 +1387,7 @@ pub const Connection = struct { // MARK: Connection
|
||||
self.streamBuffer[0] = Protocols.important;
|
||||
const id = self.messageID;
|
||||
self.messageID += 1;
|
||||
std.mem.writeInt(u32, self.streamBuffer[1..5], id, .big); // TODO: Use little endian for better hardware support. Currently the aim is interoperability with the java version which uses big endian.
|
||||
std.mem.writeInt(u32, self.streamBuffer[1..5], id, .big);
|
||||
|
||||
const packet = UnconfirmedPacket{
|
||||
.data = main.globalAllocator.dupe(u8, self.streamBuffer[0..self.streamPosition]),
|
||||
@ -1456,7 +1454,7 @@ pub const Connection = struct { // MARK: Connection
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
var reader = utils.BinaryReader.init(data, .big);
|
||||
var reader = utils.BinaryReader.init(data);
|
||||
|
||||
self.otherKeepAliveReceived = try reader.readInt(u32);
|
||||
self.lastKeepAliveReceived = try reader.readInt(u32);
|
||||
@ -1531,7 +1529,7 @@ pub const Connection = struct { // MARK: Connection
|
||||
self.receivedPackets[0] = putBackToFront;
|
||||
self.receivedPackets[0].clearRetainingCapacity();
|
||||
}
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, .big, runLengthEncodingStarts.items.len*8 + 9);
|
||||
var writer = utils.BinaryWriter.initCapacity(main.stackAllocator, runLengthEncodingStarts.items.len*8 + 9);
|
||||
defer writer.deinit();
|
||||
|
||||
writer.writeInt(u8, Protocols.keepAlive);
|
||||
@ -1690,7 +1688,7 @@ pub const Connection = struct { // MARK: Connection
|
||||
if(Protocols.isAsynchronous[protocol]) {
|
||||
ProtocolTask.schedule(self, protocol, data);
|
||||
} else {
|
||||
var reader = utils.BinaryReader.init(data, networkEndian);
|
||||
var reader = utils.BinaryReader.init(data);
|
||||
try prot(self, &reader);
|
||||
}
|
||||
} else {
|
||||
@ -1748,7 +1746,7 @@ pub const Connection = struct { // MARK: Connection
|
||||
try self.receiveKeepAlive(data[1..]);
|
||||
} else {
|
||||
if(Protocols.list[protocol]) |prot| {
|
||||
var reader = utils.BinaryReader.init(data[1..], networkEndian);
|
||||
var reader = utils.BinaryReader.init(data[1..]);
|
||||
try prot(self, &reader);
|
||||
} else {
|
||||
std.log.err("Received unknown protocol with id {}", .{protocol});
|
||||
@ -1804,7 +1802,7 @@ const ProtocolTask = struct {
|
||||
|
||||
pub fn run(self: *ProtocolTask) void {
|
||||
defer self.clean();
|
||||
var reader = utils.BinaryReader.init(self.data, networkEndian);
|
||||
var reader = utils.BinaryReader.init(self.data);
|
||||
Protocols.list[self.protocol].?(self.conn, &reader) catch |err| {
|
||||
std.log.err("Got error {s} while executing protocol {} with data {any}", .{@errorName(err), self.protocol, self.data}); // TODO: Maybe disconnect on error
|
||||
};
|
||||
|
@ -405,7 +405,7 @@ fn update() void { // MARK: update()
|
||||
}
|
||||
|
||||
// Send the entity data:
|
||||
var writer = BinaryWriter.initCapacity(main.stackAllocator, network.networkEndian, (4 + 24 + 12 + 24)*userList.len);
|
||||
var writer = BinaryWriter.initCapacity(main.stackAllocator, (4 + 24 + 12 + 24)*userList.len);
|
||||
defer writer.deinit();
|
||||
|
||||
const itemData = world.?.itemDropManager.getPositionAndVelocityData(main.stackAllocator);
|
||||
|
@ -54,7 +54,7 @@ pub const RegionFile = struct { // MARK: RegionFile
|
||||
}
|
||||
|
||||
fn load(self: *RegionFile, path: []const u8, data: []const u8) !void {
|
||||
var reader = BinaryReader.init(data, .big);
|
||||
var reader = BinaryReader.init(data);
|
||||
|
||||
const fileVersion = try reader.readInt(u32);
|
||||
const fileSize = try reader.readInt(u32);
|
||||
@ -129,7 +129,7 @@ pub const RegionFile = struct { // MARK: RegionFile
|
||||
return;
|
||||
}
|
||||
|
||||
var writer = BinaryWriter.initCapacity(main.stackAllocator, .big, totalSize + headerSize);
|
||||
var writer = BinaryWriter.initCapacity(main.stackAllocator, totalSize + headerSize);
|
||||
defer writer.deinit();
|
||||
|
||||
writer.writeInt(u32, version);
|
||||
@ -261,7 +261,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
};
|
||||
pub fn compressChunk(allocator: main.heap.NeverFailingAllocator, ch: *chunk.Chunk, allowLossy: bool) []const u8 {
|
||||
if(ch.data.paletteLength == 1) {
|
||||
var writer = BinaryWriter.initCapacity(allocator, .big, @sizeOf(CompressionAlgo) + @sizeOf(u32));
|
||||
var writer = BinaryWriter.initCapacity(allocator, @sizeOf(CompressionAlgo) + @sizeOf(u32));
|
||||
|
||||
writer.writeEnum(CompressionAlgo, .uniform);
|
||||
writer.writeInt(u32, ch.data.palette[0].toInt());
|
||||
@ -300,7 +300,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
const compressedData = main.utils.Compression.deflate(main.stackAllocator, &uncompressedData, .default);
|
||||
defer main.stackAllocator.free(compressedData);
|
||||
|
||||
var writer = BinaryWriter.initCapacity(allocator, .big, @sizeOf(CompressionAlgo) + @sizeOf(u8) + @sizeOf(u32)*ch.data.paletteLength + compressedData.len);
|
||||
var writer = BinaryWriter.initCapacity(allocator, @sizeOf(CompressionAlgo) + @sizeOf(u8) + @sizeOf(u32)*ch.data.paletteLength + compressedData.len);
|
||||
|
||||
writer.writeEnum(CompressionAlgo, .deflate_with_8bit_palette);
|
||||
writer.writeInt(u8, @intCast(ch.data.paletteLength));
|
||||
@ -311,7 +311,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
writer.writeSlice(compressedData);
|
||||
return writer.data.toOwnedSlice();
|
||||
}
|
||||
var uncompressedWriter = BinaryWriter.initCapacity(main.stackAllocator, .big, chunk.chunkVolume*@sizeOf(u32));
|
||||
var uncompressedWriter = BinaryWriter.initCapacity(main.stackAllocator, chunk.chunkVolume*@sizeOf(u32));
|
||||
defer uncompressedWriter.deinit();
|
||||
|
||||
for(0..chunk.chunkVolume) |i| {
|
||||
@ -320,7 +320,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
const compressedData = main.utils.Compression.deflate(main.stackAllocator, uncompressedWriter.data.items, .default);
|
||||
defer main.stackAllocator.free(compressedData);
|
||||
|
||||
var compressedWriter = BinaryWriter.initCapacity(allocator, .big, @sizeOf(CompressionAlgo) + compressedData.len);
|
||||
var compressedWriter = BinaryWriter.initCapacity(allocator, @sizeOf(CompressionAlgo) + compressedData.len);
|
||||
|
||||
compressedWriter.writeEnum(CompressionAlgo, .deflate);
|
||||
compressedWriter.writeSlice(compressedData);
|
||||
@ -331,7 +331,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
pub fn decompressChunk(ch: *chunk.Chunk, _data: []const u8) !void {
|
||||
std.debug.assert(ch.data.paletteLength == 1);
|
||||
|
||||
var reader = BinaryReader.init(_data, .big);
|
||||
var reader = BinaryReader.init(_data);
|
||||
const compressionAlgorithm = try reader.readEnum(CompressionAlgo);
|
||||
|
||||
switch(compressionAlgorithm) {
|
||||
@ -343,7 +343,7 @@ pub const ChunkCompression = struct { // MARK: ChunkCompression
|
||||
const decompressedLength = try main.utils.Compression.inflateTo(decompressedData, reader.remaining);
|
||||
if(decompressedLength != chunk.chunkVolume*@sizeOf(u32)) return error.corrupted;
|
||||
|
||||
var decompressedReader = BinaryReader.init(decompressedData, .big);
|
||||
var decompressedReader = BinaryReader.init(decompressedData);
|
||||
|
||||
for(0..chunk.chunkVolume) |i| {
|
||||
ch.data.setValue(i, main.blocks.Block.fromInt(try decompressedReader.readInt(u32)));
|
||||
|
@ -134,7 +134,7 @@ pub const MapFragment = struct { // MARK: MapFragment
|
||||
const fullData = try main.files.read(main.stackAllocator, path);
|
||||
defer main.stackAllocator.free(fullData);
|
||||
|
||||
var fullReader = BinaryReader.init(fullData, .big);
|
||||
var fullReader = BinaryReader.init(fullData);
|
||||
|
||||
const header: StorageHeader = .{
|
||||
.version = try fullReader.readInt(u8),
|
||||
@ -165,7 +165,7 @@ pub const MapFragment = struct { // MARK: MapFragment
|
||||
defer main.stackAllocator.free(rawData);
|
||||
if(try main.utils.Compression.inflateTo(rawData, fullReader.remaining) != rawData.len) return error.CorruptedFile;
|
||||
|
||||
var reader = BinaryReader.init(rawData, .big);
|
||||
var reader = BinaryReader.init(rawData);
|
||||
|
||||
for(0..mapSize) |x| for(0..mapSize) |y| {
|
||||
self.biomeMap[x][y] = main.server.terrain.biomes.getById(biomePalette.palette.items[try reader.readInt(u32)]);
|
||||
@ -188,7 +188,7 @@ pub const MapFragment = struct { // MARK: MapFragment
|
||||
const heightDataSize = mapSize*mapSize*@sizeOf(i32);
|
||||
const originalHeightDataSize = mapSize*mapSize*@sizeOf(i32);
|
||||
|
||||
var writer = BinaryWriter.initCapacity(main.stackAllocator, .big, biomeDataSize + heightDataSize + originalHeightDataSize);
|
||||
var writer = BinaryWriter.initCapacity(main.stackAllocator, biomeDataSize + heightDataSize + originalHeightDataSize);
|
||||
defer writer.deinit();
|
||||
|
||||
for(0..mapSize) |x| for(0..mapSize) |y| writer.writeInt(u32, self.biomeMap[x][y].paletteId);
|
||||
@ -198,7 +198,7 @@ pub const MapFragment = struct { // MARK: MapFragment
|
||||
const compressedData = main.utils.Compression.deflate(main.stackAllocator, writer.data.items, .fast);
|
||||
defer main.stackAllocator.free(compressedData);
|
||||
|
||||
var outputWriter = BinaryWriter.initCapacity(main.stackAllocator, .big, @sizeOf(StorageHeader) + compressedData.len);
|
||||
var outputWriter = BinaryWriter.initCapacity(main.stackAllocator, @sizeOf(StorageHeader) + compressedData.len);
|
||||
defer outputWriter.deinit();
|
||||
|
||||
const header: StorageHeader = .{
|
||||
|
@ -43,14 +43,14 @@ pub const Compression = struct { // MARK: Compression
|
||||
main.stackAllocator.free(relPath);
|
||||
};
|
||||
var len: [4]u8 = undefined;
|
||||
std.mem.writeInt(u32, &len, @as(u32, @intCast(relPath.len)), .big);
|
||||
std.mem.writeInt(u32, &len, @as(u32, @intCast(relPath.len)), endian);
|
||||
_ = try comp.write(&len);
|
||||
_ = try comp.write(relPath);
|
||||
|
||||
const fileData = try sourceDir.readFileAlloc(main.stackAllocator.allocator, relPath, std.math.maxInt(usize));
|
||||
defer main.stackAllocator.free(fileData);
|
||||
|
||||
std.mem.writeInt(u32, &len, @as(u32, @intCast(fileData.len)), .big);
|
||||
std.mem.writeInt(u32, &len, @as(u32, @intCast(fileData.len)), endian);
|
||||
_ = try comp.write(&len);
|
||||
_ = try comp.write(fileData);
|
||||
}
|
||||
@ -66,11 +66,11 @@ pub const Compression = struct { // MARK: Compression
|
||||
defer main.stackAllocator.free(_data);
|
||||
var data = _data;
|
||||
while(data.len != 0) {
|
||||
var len = std.mem.readInt(u32, data[0..4], .big);
|
||||
var len = std.mem.readInt(u32, data[0..4], endian);
|
||||
data = data[4..];
|
||||
const path = data[0..len];
|
||||
data = data[len..];
|
||||
len = std.mem.readInt(u32, data[0..4], .big);
|
||||
len = std.mem.readInt(u32, data[0..4], endian);
|
||||
data = data[4..];
|
||||
const fileData = data[0..len];
|
||||
data = data[len..];
|
||||
@ -1408,12 +1408,13 @@ pub const Side = enum {
|
||||
server,
|
||||
};
|
||||
|
||||
const endian: std.builtin.Endian = .big;
|
||||
|
||||
pub const BinaryReader = struct {
|
||||
remaining: []const u8,
|
||||
endian: std.builtin.Endian,
|
||||
|
||||
pub fn init(data: []const u8, endian: std.builtin.Endian) BinaryReader {
|
||||
return .{.remaining = data, .endian = endian};
|
||||
pub fn init(data: []const u8) BinaryReader {
|
||||
return .{.remaining = data};
|
||||
}
|
||||
|
||||
pub fn readVec(self: *BinaryReader, T: type) error{OutOfBounds, IntOutOfBounds}!T {
|
||||
@ -1443,7 +1444,7 @@ pub const BinaryReader = struct {
|
||||
const bufSize = @divExact(@typeInfo(T).int.bits, 8);
|
||||
if(self.remaining.len < bufSize) return error.OutOfBounds;
|
||||
defer self.remaining = self.remaining[bufSize..];
|
||||
return std.mem.readInt(T, self.remaining[0..bufSize], self.endian);
|
||||
return std.mem.readInt(T, self.remaining[0..bufSize], endian);
|
||||
}
|
||||
|
||||
pub fn readFloat(self: *BinaryReader, T: type) error{OutOfBounds, IntOutOfBounds}!T {
|
||||
@ -1471,14 +1472,13 @@ pub const BinaryReader = struct {
|
||||
|
||||
pub const BinaryWriter = struct {
|
||||
data: main.List(u8),
|
||||
endian: std.builtin.Endian,
|
||||
|
||||
pub fn init(allocator: NeverFailingAllocator, endian: std.builtin.Endian) BinaryWriter {
|
||||
return .{.data = .init(allocator), .endian = endian};
|
||||
pub fn init(allocator: NeverFailingAllocator) BinaryWriter {
|
||||
return .{.data = .init(allocator)};
|
||||
}
|
||||
|
||||
pub fn initCapacity(allocator: NeverFailingAllocator, endian: std.builtin.Endian, capacity: usize) BinaryWriter {
|
||||
return .{.data = .initCapacity(allocator, capacity), .endian = endian};
|
||||
pub fn initCapacity(allocator: NeverFailingAllocator, capacity: usize) BinaryWriter {
|
||||
return .{.data = .initCapacity(allocator, capacity)};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *BinaryWriter) void {
|
||||
@ -1507,7 +1507,7 @@ pub const BinaryWriter = struct {
|
||||
return self.writeInt(FullType, value);
|
||||
}
|
||||
const bufSize = @divExact(@typeInfo(T).int.bits, 8);
|
||||
std.mem.writeInt(T, self.data.addMany(bufSize)[0..bufSize], value, self.endian);
|
||||
std.mem.writeInt(T, self.data.addMany(bufSize)[0..bufSize], value, endian);
|
||||
}
|
||||
|
||||
pub fn writeFloat(self: *BinaryWriter, T: type, value: T) void {
|
||||
|
Loading…
x
Reference in New Issue
Block a user