mirror of
https://github.com/mhx/dwarfs.git
synced 2025-09-10 13:04:15 -04:00
chore: fix more spelling errors
This commit is contained in:
parent
118ce8213e
commit
109769a32f
@ -113,7 +113,7 @@ class brotli_block_decompressor final : public block_decompressor::impl {
|
|||||||
}
|
}
|
||||||
if (!::BrotliDecoderSetParameter(decoder_.get(),
|
if (!::BrotliDecoderSetParameter(decoder_.get(),
|
||||||
BROTLI_DECODER_PARAM_LARGE_WINDOW, 1)) {
|
BROTLI_DECODER_PARAM_LARGE_WINDOW, 1)) {
|
||||||
DWARFS_THROW(runtime_error, "could not set brotli decoder paramter");
|
DWARFS_THROW(runtime_error, "could not set brotli decoder parameter");
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
decompressed_.reserve(uncompressed_size_);
|
decompressed_.reserve(uncompressed_size_);
|
||||||
@ -148,7 +148,7 @@ class brotli_block_decompressor final : public block_decompressor::impl {
|
|||||||
|
|
||||||
if (res == BROTLI_DECODER_RESULT_ERROR) {
|
if (res == BROTLI_DECODER_RESULT_ERROR) {
|
||||||
DWARFS_THROW(runtime_error,
|
DWARFS_THROW(runtime_error,
|
||||||
fmt::format("brotli errro: {}", brotli_error()));
|
fmt::format("brotli error: {}", brotli_error()));
|
||||||
}
|
}
|
||||||
|
|
||||||
decompressed_.resize(std::distance(decompressed_.data(), next_out));
|
decompressed_.resize(std::distance(decompressed_.data(), next_out));
|
||||||
|
@ -1680,7 +1680,7 @@ void metadata_<LoggerPolicy>::access(inode_view iv, int mode,
|
|||||||
mode, uid, gid);
|
mode, uid, gid);
|
||||||
|
|
||||||
if (mode == F_OK) {
|
if (mode == F_OK) {
|
||||||
// easy; we're only interested in the file's existance
|
// easy; we're only interested in the file's existence
|
||||||
ec.clear();
|
ec.clear();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ namespace {
|
|||||||
/**
|
/**
|
||||||
* Segmenter Strategy
|
* Segmenter Strategy
|
||||||
*
|
*
|
||||||
* For each *block*, start new rolling hash. The hashes are associcated
|
* For each *block*, start new rolling hash. The hashes are associated
|
||||||
* with the block, new hash-offset-pairs will only be added as the block
|
* with the block, new hash-offset-pairs will only be added as the block
|
||||||
* grows. We only need to store a hash-offset-pair every N bytes, with N
|
* grows. We only need to store a hash-offset-pair every N bytes, with N
|
||||||
* being configurable (typically half of the window size so we find all
|
* being configurable (typically half of the window size so we find all
|
||||||
@ -76,7 +76,7 @@ namespace {
|
|||||||
* will be scanned for matches. Old file data beyond the moving window will
|
* will be scanned for matches. Old file data beyond the moving window will
|
||||||
* be added to the current *block*, causing the rolling *block* hash to also
|
* be added to the current *block*, causing the rolling *block* hash to also
|
||||||
* advance. Data needs to be added to the block only in increments at which
|
* advance. Data needs to be added to the block only in increments at which
|
||||||
* a new hash valus is computed.
|
* a new hash values is computed.
|
||||||
*
|
*
|
||||||
* This strategy ensures that we're using a deterministic amount of memory
|
* This strategy ensures that we're using a deterministic amount of memory
|
||||||
* (proportional to block size and history block count).
|
* (proportional to block size and history block count).
|
||||||
|
Loading…
x
Reference in New Issue
Block a user