From d417642e7631633fc864acf172c8c4512b72d6d7 Mon Sep 17 00:00:00 2001 From: Nando Lawson Date: Sat, 5 Oct 2024 10:09:04 +0200 Subject: [PATCH 1/2] Add a lot of changes that Clippys suggested when enabling some lints None of the changes affect the behavior of the code --- src/alphabet.rs | 3 ++- src/chunked_encoder.rs | 2 +- src/decode.rs | 13 +++++++------ src/encode.rs | 13 +++++++------ src/engine/general_purpose/decode.rs | 10 +++++----- src/engine/general_purpose/mod.rs | 25 +++++++++++++++---------- src/engine/mod.rs | 18 +++++++++--------- src/lib.rs | 1 - src/read/decoder.rs | 2 +- src/write/encoder.rs | 20 ++++++++------------ src/write/encoder_string_writer.rs | 8 ++++---- 11 files changed, 59 insertions(+), 56 deletions(-) diff --git a/src/alphabet.rs b/src/alphabet.rs index b07bfdf..a6b2466 100644 --- a/src/alphabet.rs +++ b/src/alphabet.rs @@ -125,6 +125,7 @@ impl Alphabet { } /// Create a `&str` from the symbols in the `Alphabet` + #[must_use] pub fn as_str(&self) -> &str { core::str::from_utf8(&self.symbols).unwrap() } @@ -198,7 +199,7 @@ pub const IMAP_MUTF7: Alphabet = Alphabet::from_str_unchecked( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,", ); -/// The alphabet used in BinHex 4.0 files. +/// The alphabet used in `BinHex` 4.0 files. /// /// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt) pub const BIN_HEX: Alphabet = Alphabet::from_str_unchecked( diff --git a/src/chunked_encoder.rs b/src/chunked_encoder.rs index 817b339..5ff01e9 100644 --- a/src/chunked_encoder.rs +++ b/src/chunked_encoder.rs @@ -7,7 +7,7 @@ use alloc::string::String; #[cfg(any(feature = "alloc", test))] use core::str; -/// The output mechanism for ChunkedEncoder's encoded bytes. +/// The output mechanism for `ChunkedEncoder`'s encoded bytes. pub trait Sink { type Error; diff --git a/src/decode.rs b/src/decode.rs index 6df8aba..236d30f 100644 --- a/src/decode.rs +++ b/src/decode.rs @@ -21,7 +21,7 @@ pub enum DecodeError { InvalidLength(usize), /// The last non-padding input symbol's encoded 6 bits have nonzero bits that will be discarded. /// This is indicative of corrupted or truncated Base64. - /// Unlike [DecodeError::InvalidByte], which reports symbols that aren't in the alphabet, + /// Unlike [`DecodeError::InvalidByte`], which reports symbols that aren't in the alphabet, /// this error is for symbols that are in the alphabet but represent nonsensical encodings. InvalidLastSymbol(usize, u8), /// The nature of the padding was not as configured: absent or incorrect when it must be @@ -50,7 +50,7 @@ impl error::Error for DecodeError {} /// Errors that can occur while decoding into a slice. #[derive(Clone, Debug, PartialEq, Eq)] pub enum DecodeSliceError { - /// A [DecodeError] occurred + /// A [`DecodeError`] occurred DecodeError(DecodeError), /// The provided slice is too small. OutputSliceTooSmall, @@ -83,7 +83,7 @@ impl From for DecodeSliceError { /// Decode base64 using the [`STANDARD` engine](STANDARD). /// -/// See [Engine::decode]. +/// See [`Engine::decode`]. #[deprecated(since = "0.21.0", note = "Use Engine::decode")] #[cfg(any(feature = "alloc", test))] pub fn decode>(input: T) -> Result, DecodeError> { @@ -92,7 +92,7 @@ pub fn decode>(input: T) -> Result, DecodeError> { /// Decode from string reference as octets using the specified [Engine]. /// -/// See [Engine::decode]. +/// See [`Engine::decode`]. ///Returns a `Result` containing a `Vec`. #[deprecated(since = "0.21.0", note = "Use Engine::decode")] #[cfg(any(feature = "alloc", test))] @@ -105,7 +105,7 @@ pub fn decode_engine>( /// Decode from string reference as octets. /// -/// See [Engine::decode_vec]. +/// See [`Engine::decode_vec`]. #[cfg(any(feature = "alloc", test))] #[deprecated(since = "0.21.0", note = "Use Engine::decode_vec")] pub fn decode_engine_vec>( @@ -118,7 +118,7 @@ pub fn decode_engine_vec>( /// Decode the input into the provided output slice. /// -/// See [Engine::decode_slice]. +/// See [`Engine::decode_slice`]. #[deprecated(since = "0.21.0", note = "Use Engine::decode_slice")] pub fn decode_engine_slice>( input: T, @@ -146,6 +146,7 @@ pub fn decode_engine_slice>( /// // start of the next quad of encoded symbols /// assert_eq!(6, decoded_len_estimate(5)); /// ``` +#[must_use] pub fn decoded_len_estimate(encoded_len: usize) -> usize { STANDARD .internal_decoded_len_estimate(encoded_len) diff --git a/src/encode.rs b/src/encode.rs index ae6d790..a00aa8f 100644 --- a/src/encode.rs +++ b/src/encode.rs @@ -11,7 +11,7 @@ use crate::PAD_BYTE; /// Encode arbitrary octets as base64 using the [`STANDARD` engine](STANDARD). /// -/// See [Engine::encode]. +/// See [`Engine::encode`]. #[allow(unused)] #[deprecated(since = "0.21.0", note = "Use Engine::encode")] #[cfg(any(feature = "alloc", test))] @@ -21,7 +21,7 @@ pub fn encode>(input: T) -> String { ///Encode arbitrary octets as base64 using the provided `Engine` into a new `String`. /// -/// See [Engine::encode]. +/// See [`Engine::encode`]. #[allow(unused)] #[deprecated(since = "0.21.0", note = "Use Engine::encode")] #[cfg(any(feature = "alloc", test))] @@ -31,7 +31,7 @@ pub fn encode_engine>(input: T, engine: &E) -> String ///Encode arbitrary octets as base64 into a supplied `String`. /// -/// See [Engine::encode_string]. +/// See [`Engine::encode_string`]. #[allow(unused)] #[deprecated(since = "0.21.0", note = "Use Engine::encode_string")] #[cfg(any(feature = "alloc", test))] @@ -40,12 +40,12 @@ pub fn encode_engine_string>( output_buf: &mut String, engine: &E, ) { - engine.encode_string(input, output_buf) + engine.encode_string(input, output_buf); } /// Encode arbitrary octets as base64 into a supplied slice. /// -/// See [Engine::encode_slice]. +/// See [`Engine::encode_slice`]. #[allow(unused)] #[deprecated(since = "0.21.0", note = "Use Engine::encode_slice")] pub fn encode_engine_slice>( @@ -58,7 +58,7 @@ pub fn encode_engine_slice>( /// B64-encode and pad (if configured). /// -/// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short +/// This helper exists to avoid recalculating `encoded_size`, which is relatively expensive on short /// inputs. /// /// `encoded_size` is the encoded size calculated for `input`. @@ -94,6 +94,7 @@ pub(crate) fn encode_with_padding( /// /// Returns `None` if the encoded length can't be represented in `usize`. This will happen for /// input lengths in approximately the top quarter of the range of `usize`. +#[must_use] pub const fn encoded_len(bytes_len: usize, padding: bool) -> Option { let rem = bytes_len % 3; diff --git a/src/engine/general_purpose/decode.rs b/src/engine/general_purpose/decode.rs index b55d3fc..64d1ae6 100644 --- a/src/engine/general_purpose/decode.rs +++ b/src/engine/general_purpose/decode.rs @@ -15,7 +15,7 @@ impl GeneralPurposeEstimate { let rem = encoded_len % 4; Self { rem, - conservative_decoded_len: (encoded_len / 4 + (rem > 0) as usize) * 3, + conservative_decoded_len: (encoded_len / 4 + usize::from(rem > 0)) * 3, } } } @@ -26,7 +26,7 @@ impl DecodeEstimate for GeneralPurposeEstimate { } } -/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs. +/// Helper to avoid duplicating `num_chunks` calculation, which is costly on short inputs. /// Returns the decode metadata, or an error. // We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is // inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment, @@ -34,7 +34,7 @@ impl DecodeEstimate for GeneralPurposeEstimate { #[inline] pub(crate) fn decode_helper( input: &[u8], - estimate: GeneralPurposeEstimate, + estimate: &GeneralPurposeEstimate, output: &mut [u8], decode_table: &[u8; 256], decode_allow_trailing_bits: bool, @@ -150,7 +150,7 @@ pub(crate) fn complete_quads_len( .len() .saturating_sub(input_len_rem) // if rem was 0, subtract 4 to avoid padding - .saturating_sub((input_len_rem == 0) as usize * 4); + .saturating_sub(usize::from(input_len_rem == 0) * 4); debug_assert!( input.is_empty() || (1..=4).contains(&(input.len() - input_complete_nonterminal_quads_len)) ); @@ -251,7 +251,7 @@ fn decode_chunk_8( Ok(()) } -/// Like [decode_chunk_8] but for 4 bytes of input and 3 bytes of output. +/// Like [`decode_chunk_8`] but for 4 bytes of input and 3 bytes of output. #[inline(always)] fn decode_chunk_4( input: &[u8], diff --git a/src/engine/general_purpose/mod.rs b/src/engine/general_purpose/mod.rs index 6fe9580..9b60029 100644 --- a/src/engine/general_purpose/mod.rs +++ b/src/engine/general_purpose/mod.rs @@ -1,4 +1,4 @@ -//! Provides the [GeneralPurpose] engine and associated config types. +//! Provides the [`GeneralPurpose`] engine and associated config types. use crate::{ alphabet, alphabet::Alphabet, @@ -32,6 +32,7 @@ impl GeneralPurpose { /// /// While not very expensive to initialize, ideally these should be cached /// if the engine will be used repeatedly. + #[must_use] pub const fn new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self { Self { encode_table: encode_table(alphabet), @@ -176,7 +177,7 @@ impl super::Engine for GeneralPurpose { ) -> Result { decode::decode_helper( input, - estimate, + &estimate, output, &self.decode_table, self.config.decode_allow_trailing_bits, @@ -206,7 +207,7 @@ pub(crate) const fn encode_table(alphabet: &Alphabet) -> [u8; 64] { } /// Returns a table mapping base64 bytes as the lookup index to either: -/// - [INVALID_VALUE] for bytes that aren't members of the alphabet +/// - [`INVALID_VALUE`] for bytes that aren't members of the alphabet /// - a byte whose lower 6 bits are the value that was encoded into the index byte pub(crate) const fn decode_table(alphabet: &Alphabet) -> [u8; 256] { let mut decode_table = [INVALID_VALUE; 256]; @@ -238,7 +239,7 @@ fn read_u64(s: &[u8]) -> u64 { /// // further customize using `.with_*` methods as needed /// ``` /// -/// The constants [PAD] and [NO_PAD] cover most use cases. +/// The constants [PAD] and [`NO_PAD`] cover most use cases. /// /// To specify the characters used, see [Alphabet]. #[derive(Clone, Copy, Debug)] @@ -254,6 +255,7 @@ impl GeneralPurposeConfig { /// /// This probably matches most people's expectations, but consider disabling padding to save /// a few bytes unless you specifically need it for compatibility with some legacy system. + #[must_use] pub const fn new() -> Self { Self { // RFC states that padding must be applied by default @@ -273,6 +275,7 @@ impl GeneralPurposeConfig { /// /// For new applications, consider not using padding if the decoders you're using don't require /// padding to be present. + #[must_use] pub const fn with_encode_padding(self, padding: bool) -> Self { Self { encode_padding: padding, @@ -287,6 +290,7 @@ impl GeneralPurposeConfig { /// character as per [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode). /// If invalid trailing bits are present and this is `true`, those bits will /// be silently ignored, else `DecodeError::InvalidLastSymbol` will be emitted. + #[must_use] pub const fn with_decode_allow_trailing_bits(self, allow: bool) -> Self { Self { decode_allow_trailing_bits: allow, @@ -307,6 +311,7 @@ impl GeneralPurposeConfig { /// /// Or, if "canonical" in your circumstance means _no_ padding rather than padding to the /// next multiple of four, there's `DecodePaddingMode::RequireNoPadding`. + #[must_use] pub const fn with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self { Self { decode_padding_mode: mode, @@ -316,7 +321,7 @@ impl GeneralPurposeConfig { } impl Default for GeneralPurposeConfig { - /// Delegates to [GeneralPurposeConfig::new]. + /// Delegates to [`GeneralPurposeConfig::new`]. fn default() -> Self { Self::new() } @@ -328,21 +333,21 @@ impl Config for GeneralPurposeConfig { } } -/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [PAD] config. +/// A [`GeneralPurpose`] engine using the [`alphabet::STANDARD`] base64 alphabet and [PAD] config. pub const STANDARD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, PAD); -/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [NO_PAD] config. +/// A [`GeneralPurpose`] engine using the [`alphabet::STANDARD`] base64 alphabet and [`NO_PAD`] config. pub const STANDARD_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); -/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [PAD] config. +/// A [`GeneralPurpose`] engine using the [`alphabet::URL_SAFE`] base64 alphabet and [PAD] config. pub const URL_SAFE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, PAD); -/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [NO_PAD] config. +/// A [`GeneralPurpose`] engine using the [`alphabet::URL_SAFE`] base64 alphabet and [`NO_PAD`] config. pub const URL_SAFE_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD); /// Include padding bytes when encoding, and require that they be present when decoding. /// -/// This is the standard per the base64 RFC, but consider using [NO_PAD] instead as padding serves +/// This is the standard per the base64 RFC, but consider using [`NO_PAD`] instead as padding serves /// little purpose in practice. pub const PAD: GeneralPurposeConfig = GeneralPurposeConfig::new(); diff --git a/src/engine/mod.rs b/src/engine/mod.rs index f2cc33f..93ae5d9 100644 --- a/src/engine/mod.rs +++ b/src/engine/mod.rs @@ -24,10 +24,10 @@ pub use general_purpose::{GeneralPurpose, GeneralPurposeConfig}; /// An `Engine` provides low-level encoding and decoding operations that all other higher-level parts of the API use. Users of the library will generally not need to implement this. /// /// Different implementations offer different characteristics. The library currently ships with -/// [GeneralPurpose] that offers good speed and works on any CPU, with more choices +/// [`GeneralPurpose`] that offers good speed and works on any CPU, with more choices /// coming later, like a constant-time one when side channel resistance is called for, and vendor-specific vectorized ones for more speed. /// -/// See [general_purpose::STANDARD_NO_PAD] if you just want standard base64. Otherwise, when possible, it's +/// See [`general_purpose::STANDARD_NO_PAD`] if you just want standard base64. Otherwise, when possible, it's /// recommended to store the engine in a `const` so that references to it won't pose any lifetime /// issues, and to avoid repeating the cost of engine setup. /// @@ -164,7 +164,7 @@ pub trait Engine: Send + Sync { .expect("Writing to a String shouldn't fail"); } - inner(self, input.as_ref(), output_buf) + inner(self, input.as_ref(), output_buf); } /// Encode arbitrary octets as base64 into a supplied slice. @@ -345,9 +345,9 @@ pub trait Engine: Send + Sync { /// /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end). /// - /// See [crate::decoded_len_estimate] for calculating buffer sizes. + /// See [`crate::decoded_len_estimate`] for calculating buffer sizes. /// - /// See [Engine::decode_slice_unchecked] for a version that panics instead of returning an error + /// See [`Engine::decode_slice_unchecked`] for a version that panics instead of returning an error /// if the output buffer is too small. #[inline] fn decode_slice>( @@ -381,9 +381,9 @@ pub trait Engine: Send + Sync { /// /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end). /// - /// See [crate::decoded_len_estimate] for calculating buffer sizes. + /// See [`crate::decoded_len_estimate`] for calculating buffer sizes. /// - /// See [Engine::decode_slice] for a version that returns an error instead of panicking if the output + /// See [`Engine::decode_slice`] for a version that returns an error instead of panicking if the output /// buffer is too small. /// /// # Panics @@ -422,7 +422,7 @@ pub trait Engine: Send + Sync { pub trait Config { /// Returns `true` if padding should be added after the encoded output. /// - /// Padding is added outside the engine's encode() since the engine may be used + /// Padding is added outside the engine's `encode()` since the engine may be used /// to encode only a chunk of the overall output, so it can't always know when /// the output is "done" and would therefore need padding (if configured). // It could be provided as a separate parameter when encoding, but that feels like @@ -448,7 +448,7 @@ pub trait DecodeEstimate { /// Controls how pad bytes are handled when decoding. /// /// Each [Engine] must support at least the behavior indicated by -/// [DecodePaddingMode::RequireCanonical], and may support other modes. +/// [`DecodePaddingMode::RequireCanonical`], and may support other modes. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum DecodePaddingMode { /// Canonical padding is allowed, but any fewer padding bytes than that is also allowed. diff --git a/src/lib.rs b/src/lib.rs index 73821e4..50dac7a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -247,7 +247,6 @@ //! //! If length calculations result in overflowing `usize`, a panic will result. -#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))] #![deny( missing_docs, trivial_casts, diff --git a/src/read/decoder.rs b/src/read/decoder.rs index 781f6f8..b3ba10b 100644 --- a/src/read/decoder.rs +++ b/src/read/decoder.rs @@ -222,7 +222,7 @@ impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> { /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes /// written in `buf`. /// - /// Where possible, this function buffers base64 to minimize the number of read() calls to the + /// Where possible, this function buffers base64 to minimize the number of `read()` calls to the /// delegate reader. /// /// # Errors diff --git a/src/write/encoder.rs b/src/write/encoder.rs index 1c19bb4..6534400 100644 --- a/src/write/encoder.rs +++ b/src/write/encoder.rs @@ -63,7 +63,7 @@ const MIN_ENCODE_CHUNK_SIZE: usize = 3; pub struct EncoderWriter<'e, E: Engine, W: io::Write> { engine: &'e E, /// Where encoded data is written to. It's an Option as it's None immediately before Drop is - /// called so that finish() can return the underlying writer. None implies that finish() has + /// called so that `finish()` can return the underlying writer. None implies that `finish()` has /// been called successfully. delegate: Option, /// Holds a partial chunk, if any, after the last `write()`, so that we may then fill the chunk @@ -126,9 +126,7 @@ impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> { pub fn finish(&mut self) -> Result { // If we could consume self in finish(), we wouldn't have to worry about this case, but // finish() is retryable in the face of I/O errors, so we can't consume here. - if self.delegate.is_none() { - panic!("Encoder has already had finish() called"); - }; + assert!(self.delegate.is_some(), "Encoder has already had finish() called"); self.write_final_leftovers()?; @@ -168,7 +166,7 @@ impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> { } /// Write as much of the encoded output to the delegate writer as it will accept, and store the - /// leftovers to be attempted at the next write() call. Updates `self.output_occupied_len`. + /// leftovers to be attempted at the next `write()` call. Updates `self.output_occupied_len`. /// /// # Errors /// @@ -201,7 +199,7 @@ impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> { /// Write all buffered encoded output. If this returns `Ok`, `self.output_occupied_len` is `0`. /// - /// This is basically write_all for the remaining buffered data but without the undesirable + /// This is basically `write_all` for the remaining buffered data but without the undesirable /// abort-on-`Ok(0)` behavior. /// /// # Errors @@ -217,7 +215,7 @@ impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> { // other errors return Err(e) => return Err(e), // success no-ops because remaining length is already updated - Ok(_) => {} + Ok(()) => {} }; } @@ -263,9 +261,7 @@ impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> { /// /// Any errors emitted by the delegate writer are returned. fn write(&mut self, input: &[u8]) -> Result { - if self.delegate.is_none() { - panic!("Cannot write more after calling finish()"); - } + assert!(self.delegate.is_some(), "Cannot write more after calling finish()"); if input.is_empty() { return Ok(0); @@ -283,7 +279,7 @@ impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> { return self .write_to_delegate(current_len) // did not read any input - .map(|_| 0); + .map(|()| 0); } debug_assert_eq!(0, self.output_occupied_len); @@ -374,7 +370,7 @@ impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> { self.write_to_delegate(encoded_size) // no matter whether we wrote the full encoded buffer or not, we consumed the same // input - .map(|_| extra_input_read_len + input_chunks_to_encode_len) + .map(|()| extra_input_read_len + input_chunks_to_encode_len) .map_err(|e| { // in case we filled and encoded `extra`, reset extra_len self.extra_input_occupied_len = orig_extra_len; diff --git a/src/write/encoder_string_writer.rs b/src/write/encoder_string_writer.rs index 9c02bcd..83d4082 100644 --- a/src/write/encoder_string_writer.rs +++ b/src/write/encoder_string_writer.rs @@ -3,7 +3,7 @@ use crate::engine::Engine; use std::io; /// A `Write` implementation that base64-encodes data using the provided config and accumulates the -/// resulting base64 utf8 `&str` in a [StrConsumer] implementation (typically `String`), which is +/// resulting base64 utf8 `&str` in a [`StrConsumer`] implementation (typically `String`), which is /// then exposed via `into_inner()`. /// /// # Examples @@ -53,7 +53,7 @@ pub struct EncoderStringWriter<'e, E: Engine, S: StrConsumer> { } impl<'e, E: Engine, S: StrConsumer> EncoderStringWriter<'e, E, S> { - /// Create a EncoderStringWriter that will append to the provided `StrConsumer`. + /// Create a `EncoderStringWriter` that will append to the provided `StrConsumer`. pub fn from_consumer(str_consumer: S, engine: &'e E) -> Self { EncoderStringWriter { encoder: EncoderWriter::new(Utf8SingleCodeUnitWriter { str_consumer }, engine), @@ -73,7 +73,7 @@ impl<'e, E: Engine, S: StrConsumer> EncoderStringWriter<'e, E, S> { } impl<'e, E: Engine> EncoderStringWriter<'e, E, String> { - /// Create a EncoderStringWriter that will encode into a new `String` with the provided config. + /// Create a `EncoderStringWriter` that will encode into a new `String` with the provided config. pub fn new(engine: &'e E) -> Self { EncoderStringWriter::from_consumer(String::new(), engine) } @@ -95,7 +95,7 @@ pub trait StrConsumer { fn consume(&mut self, buf: &str); } -/// As for io::Write, `StrConsumer` is implemented automatically for `&mut S`. +/// As for `io::Write`, `StrConsumer` is implemented automatically for `&mut S`. impl StrConsumer for &mut S { fn consume(&mut self, buf: &str) { (**self).consume(buf); From 5a575c27e9a694f429347c741bd4eccd3c164737 Mon Sep 17 00:00:00 2001 From: Nando Lawson Date: Sat, 5 Oct 2024 10:29:06 +0200 Subject: [PATCH 2/2] File format via rustfmt --- src/write/encoder.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/write/encoder.rs b/src/write/encoder.rs index 6534400..3df8c56 100644 --- a/src/write/encoder.rs +++ b/src/write/encoder.rs @@ -126,7 +126,10 @@ impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> { pub fn finish(&mut self) -> Result { // If we could consume self in finish(), we wouldn't have to worry about this case, but // finish() is retryable in the face of I/O errors, so we can't consume here. - assert!(self.delegate.is_some(), "Encoder has already had finish() called"); + assert!( + self.delegate.is_some(), + "Encoder has already had finish() called" + ); self.write_final_leftovers()?; @@ -261,7 +264,10 @@ impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> { /// /// Any errors emitted by the delegate writer are returned. fn write(&mut self, input: &[u8]) -> Result { - assert!(self.delegate.is_some(), "Cannot write more after calling finish()"); + assert!( + self.delegate.is_some(), + "Cannot write more after calling finish()" + ); if input.is_empty() { return Ok(0);