mirror of
https://github.com/mhx/dwarfs.git
synced 2025-08-03 17:56:12 -04:00
fix(filesystem_writer): segfault on bad_compression_ratio_error
When recompressing a filesystem where some blocks cannot be compressed using the selected algorithm because of a `bad_compression_ratio_error`, the resulting `block` was left empty after the refactoring done in 06f8728cc. It's a really easy fix, pretty much just doing what the code did before the refactor: initialize `block` with the decompressed data before attempting to compress it. Since `clang-format` reformatted the whole lambda after the change, I've pulled the lamdba body out into a separate function, which makes this change much bigger than it is.
This commit is contained in:
parent
be8a152a98
commit
f01cf2c76a
@ -387,46 +387,10 @@ class rewritten_fsblock : public fsblock::impl {
|
||||
std::promise<void> prom;
|
||||
future_ = prom.get_future();
|
||||
|
||||
wg.add_job([this, prom = std::move(prom),
|
||||
meta = std::move(meta)]() mutable {
|
||||
try {
|
||||
shared_byte_buffer block;
|
||||
|
||||
{
|
||||
// TODO: we don't have to do this for uncompressed blocks
|
||||
block_decompressor bd(data_comp_type_, data_);
|
||||
auto buffer = bd.start_decompression(malloc_byte_buffer::create());
|
||||
bd.decompress_frame(bd.uncompressed_size());
|
||||
|
||||
if (!meta) {
|
||||
meta = bd.metadata();
|
||||
}
|
||||
|
||||
pctx_->bytes_in += buffer.size(); // TODO: data_.size()?
|
||||
|
||||
try {
|
||||
if (meta) {
|
||||
block = bc_.compress(buffer, *meta);
|
||||
} else {
|
||||
block = bc_.compress(buffer);
|
||||
}
|
||||
} catch (bad_compression_ratio_error const&) {
|
||||
comp_type_ = compression_type::NONE;
|
||||
}
|
||||
}
|
||||
|
||||
pctx_->bytes_out += block.size();
|
||||
|
||||
{
|
||||
std::lock_guard lock(mx_);
|
||||
block_data_.emplace(std::move(block));
|
||||
}
|
||||
|
||||
prom.set_value();
|
||||
} catch (...) {
|
||||
prom.set_exception(std::current_exception());
|
||||
}
|
||||
});
|
||||
wg.add_job(
|
||||
[this, prom = std::move(prom), meta = std::move(meta)]() mutable {
|
||||
compress_job(std::move(prom), std::move(meta));
|
||||
});
|
||||
}
|
||||
|
||||
void wait_until_compressed() override { future_.get(); }
|
||||
@ -480,6 +444,46 @@ class rewritten_fsblock : public fsblock::impl {
|
||||
}
|
||||
|
||||
private:
|
||||
void compress_job(std::promise<void> prom, std::optional<std::string> meta) {
|
||||
try {
|
||||
shared_byte_buffer block;
|
||||
|
||||
{
|
||||
// TODO: we don't have to do this for uncompressed blocks
|
||||
block_decompressor bd(data_comp_type_, data_);
|
||||
block = bd.start_decompression(malloc_byte_buffer::create());
|
||||
bd.decompress_frame(bd.uncompressed_size());
|
||||
|
||||
if (!meta) {
|
||||
meta = bd.metadata();
|
||||
}
|
||||
|
||||
pctx_->bytes_in += block.size(); // TODO: data_.size()?
|
||||
|
||||
try {
|
||||
if (meta) {
|
||||
block = bc_.compress(block, *meta);
|
||||
} else {
|
||||
block = bc_.compress(block);
|
||||
}
|
||||
} catch (bad_compression_ratio_error const&) {
|
||||
comp_type_ = compression_type::NONE;
|
||||
}
|
||||
}
|
||||
|
||||
pctx_->bytes_out += block.size();
|
||||
|
||||
{
|
||||
std::lock_guard lock(mx_);
|
||||
block_data_.emplace(std::move(block));
|
||||
}
|
||||
|
||||
prom.set_value();
|
||||
} catch (...) {
|
||||
prom.set_exception(std::current_exception());
|
||||
}
|
||||
}
|
||||
|
||||
section_type const type_;
|
||||
block_compressor const& bc_;
|
||||
mutable std::recursive_mutex mx_;
|
||||
|
@ -1623,6 +1623,16 @@ TEST_P(mkdwarfs_recompress_test, recompress) {
|
||||
EXPECT_EQ(2, history.size());
|
||||
}
|
||||
|
||||
{
|
||||
auto t = tester(image);
|
||||
ASSERT_EQ(0, t.run({"-i", image_file, "-o", "-", "--recompress", "-l1"}))
|
||||
<< t.err();
|
||||
auto fs = t.fs_from_stdout();
|
||||
EXPECT_TRUE(fs.find("/random"));
|
||||
auto history = fs.info_as_json(history_opts)["history"];
|
||||
EXPECT_EQ(2, history.size());
|
||||
}
|
||||
|
||||
{
|
||||
auto t = tester(image);
|
||||
EXPECT_NE(0, t.run({"-i", image_file, "-o", "-", "--recompress=foo"}));
|
||||
|
Loading…
x
Reference in New Issue
Block a user