diff --git a/src/compression/brotli.cpp b/src/compression/brotli.cpp index d06563f0..1bbab824 100644 --- a/src/compression/brotli.cpp +++ b/src/compression/brotli.cpp @@ -113,7 +113,7 @@ class brotli_block_decompressor final : public block_decompressor::impl { } if (!::BrotliDecoderSetParameter(decoder_.get(), BROTLI_DECODER_PARAM_LARGE_WINDOW, 1)) { - DWARFS_THROW(runtime_error, "could not set brotli decoder paramter"); + DWARFS_THROW(runtime_error, "could not set brotli decoder parameter"); } try { decompressed_.reserve(uncompressed_size_); @@ -148,7 +148,7 @@ class brotli_block_decompressor final : public block_decompressor::impl { if (res == BROTLI_DECODER_RESULT_ERROR) { DWARFS_THROW(runtime_error, - fmt::format("brotli errro: {}", brotli_error())); + fmt::format("brotli error: {}", brotli_error())); } decompressed_.resize(std::distance(decompressed_.data(), next_out)); diff --git a/src/reader/internal/metadata_v2.cpp b/src/reader/internal/metadata_v2.cpp index 7f25fdb6..f75a6acd 100644 --- a/src/reader/internal/metadata_v2.cpp +++ b/src/reader/internal/metadata_v2.cpp @@ -1680,7 +1680,7 @@ void metadata_::access(inode_view iv, int mode, mode, uid, gid); if (mode == F_OK) { - // easy; we're only interested in the file's existance + // easy; we're only interested in the file's existence ec.clear(); return; } diff --git a/src/writer/segmenter.cpp b/src/writer/segmenter.cpp index 46df1cdd..2df0b1b8 100644 --- a/src/writer/segmenter.cpp +++ b/src/writer/segmenter.cpp @@ -65,7 +65,7 @@ namespace { /** * Segmenter Strategy * - * For each *block*, start new rolling hash. The hashes are associcated + * For each *block*, start new rolling hash. The hashes are associated * with the block, new hash-offset-pairs will only be added as the block * grows. We only need to store a hash-offset-pair every N bytes, with N * being configurable (typically half of the window size so we find all @@ -76,7 +76,7 @@ namespace { * will be scanned for matches. Old file data beyond the moving window will * be added to the current *block*, causing the rolling *block* hash to also * advance. Data needs to be added to the block only in increments at which - * a new hash valus is computed. + * a new hash values is computed. * * This strategy ensures that we're using a deterministic amount of memory * (proportional to block size and history block count).