From f01cf2c76ab807e9beb69bfe627302d97ce54bb3 Mon Sep 17 00:00:00 2001 From: Marcus Holland-Moritz Date: Wed, 14 May 2025 06:43:30 +0200 Subject: [PATCH] fix(filesystem_writer): segfault on bad_compression_ratio_error When recompressing a filesystem where some blocks cannot be compressed using the selected algorithm because of a `bad_compression_ratio_error`, the resulting `block` was left empty after the refactoring done in 06f8728cc. It's a really easy fix, pretty much just doing what the code did before the refactor: initialize `block` with the decompressed data before attempting to compress it. Since `clang-format` reformatted the whole lambda after the change, I've pulled the lamdba body out into a separate function, which makes this change much bigger than it is. --- src/writer/filesystem_writer.cpp | 84 +++++++++++++++++--------------- test/tool_main_test.cpp | 10 ++++ 2 files changed, 54 insertions(+), 40 deletions(-) diff --git a/src/writer/filesystem_writer.cpp b/src/writer/filesystem_writer.cpp index 217c7c41..dd6d6563 100644 --- a/src/writer/filesystem_writer.cpp +++ b/src/writer/filesystem_writer.cpp @@ -387,46 +387,10 @@ class rewritten_fsblock : public fsblock::impl { std::promise prom; future_ = prom.get_future(); - wg.add_job([this, prom = std::move(prom), - meta = std::move(meta)]() mutable { - try { - shared_byte_buffer block; - - { - // TODO: we don't have to do this for uncompressed blocks - block_decompressor bd(data_comp_type_, data_); - auto buffer = bd.start_decompression(malloc_byte_buffer::create()); - bd.decompress_frame(bd.uncompressed_size()); - - if (!meta) { - meta = bd.metadata(); - } - - pctx_->bytes_in += buffer.size(); // TODO: data_.size()? - - try { - if (meta) { - block = bc_.compress(buffer, *meta); - } else { - block = bc_.compress(buffer); - } - } catch (bad_compression_ratio_error const&) { - comp_type_ = compression_type::NONE; - } - } - - pctx_->bytes_out += block.size(); - - { - std::lock_guard lock(mx_); - block_data_.emplace(std::move(block)); - } - - prom.set_value(); - } catch (...) { - prom.set_exception(std::current_exception()); - } - }); + wg.add_job( + [this, prom = std::move(prom), meta = std::move(meta)]() mutable { + compress_job(std::move(prom), std::move(meta)); + }); } void wait_until_compressed() override { future_.get(); } @@ -480,6 +444,46 @@ class rewritten_fsblock : public fsblock::impl { } private: + void compress_job(std::promise prom, std::optional meta) { + try { + shared_byte_buffer block; + + { + // TODO: we don't have to do this for uncompressed blocks + block_decompressor bd(data_comp_type_, data_); + block = bd.start_decompression(malloc_byte_buffer::create()); + bd.decompress_frame(bd.uncompressed_size()); + + if (!meta) { + meta = bd.metadata(); + } + + pctx_->bytes_in += block.size(); // TODO: data_.size()? + + try { + if (meta) { + block = bc_.compress(block, *meta); + } else { + block = bc_.compress(block); + } + } catch (bad_compression_ratio_error const&) { + comp_type_ = compression_type::NONE; + } + } + + pctx_->bytes_out += block.size(); + + { + std::lock_guard lock(mx_); + block_data_.emplace(std::move(block)); + } + + prom.set_value(); + } catch (...) { + prom.set_exception(std::current_exception()); + } + } + section_type const type_; block_compressor const& bc_; mutable std::recursive_mutex mx_; diff --git a/test/tool_main_test.cpp b/test/tool_main_test.cpp index a13221c0..c90dd54f 100644 --- a/test/tool_main_test.cpp +++ b/test/tool_main_test.cpp @@ -1623,6 +1623,16 @@ TEST_P(mkdwarfs_recompress_test, recompress) { EXPECT_EQ(2, history.size()); } + { + auto t = tester(image); + ASSERT_EQ(0, t.run({"-i", image_file, "-o", "-", "--recompress", "-l1"})) + << t.err(); + auto fs = t.fs_from_stdout(); + EXPECT_TRUE(fs.find("/random")); + auto history = fs.info_as_json(history_opts)["history"]; + EXPECT_EQ(2, history.size()); + } + { auto t = tester(image); EXPECT_NE(0, t.run({"-i", image_file, "-o", "-", "--recompress=foo"}));