From 1fbf83816b60fbaa2f9e85585338138c3efa949f Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 22 May 2020 13:17:30 +0900 Subject: [PATCH] Format with rustfmt (#389) * Format with rustfmt * Add rustfmt check to CI --- azure-pipelines.yml | 9 +- benches/buf.rs | 2 +- benches/bytes.rs | 5 +- benches/bytes_mut.rs | 39 +++++-- ci/azure-rustfmt.yml | 13 +++ src/buf/buf_impl.rs | 195 +++++++++++++++++----------------- src/buf/buf_mut.rs | 168 +++++++++++++++-------------- src/buf/ext/chain.rs | 21 ++-- src/buf/ext/limit.rs | 5 +- src/buf/ext/mod.rs | 24 +++-- src/buf/ext/reader.rs | 2 +- src/buf/ext/take.rs | 5 +- src/buf/iter.rs | 3 +- src/buf/mod.rs | 3 +- src/bytes.rs | 57 +++++----- src/bytes_mut.rs | 111 +++++++++++-------- src/fmt/debug.rs | 2 +- src/fmt/hex.rs | 2 +- src/lib.rs | 17 +-- src/serde.rs | 29 +++-- tests/test_buf_mut.rs | 4 +- tests/test_bytes.rs | 12 +-- tests/test_bytes_vec_alloc.rs | 6 +- tests/test_chain.rs | 2 +- tests/test_iter.rs | 1 - tests/test_reader.rs | 2 +- tests/test_serde.rs | 2 +- 27 files changed, 410 insertions(+), 331 deletions(-) create mode 100644 ci/azure-rustfmt.yml diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 18b59745d..a92c3c4bb 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -3,9 +3,10 @@ pr: ["master"] jobs: # Check formatting -# - template: ci/azure-rustfmt.yml -# parameters: -# name: rustfmt +- template: ci/azure-rustfmt.yml + parameters: + name: rustfmt + rust_version: stable # Apply clippy lints # - template: ci/azure-clippy.yml @@ -60,7 +61,7 @@ jobs: - template: ci/azure-deploy-docs.yml parameters: dependsOn: - # - rustfmt + - rustfmt # - clippy - stable - nightly diff --git a/benches/buf.rs b/benches/buf.rs index 0c9a1d955..8f14aec20 100644 --- a/benches/buf.rs +++ b/benches/buf.rs @@ -3,8 +3,8 @@ extern crate test; -use test::Bencher; use bytes::Buf; +use test::Bencher; /// Dummy Buf implementation struct TestBuf { diff --git a/benches/bytes.rs b/benches/bytes.rs index 9c36e6081..1741ba0a1 100644 --- a/benches/bytes.rs +++ b/benches/bytes.rs @@ -3,8 +3,8 @@ extern crate test; -use test::Bencher; use bytes::Bytes; +use test::Bencher; #[bench] fn deref_unique(b: &mut Bencher) { @@ -42,7 +42,8 @@ fn deref_static(b: &mut Bencher) { #[bench] fn clone_static(b: &mut Bencher) { - let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); + let bytes = + Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); b.iter(|| { for _ in 0..1024 { diff --git a/benches/bytes_mut.rs b/benches/bytes_mut.rs index ded1d1486..8e0226c76 100644 --- a/benches/bytes_mut.rs +++ b/benches/bytes_mut.rs @@ -3,8 +3,8 @@ extern crate test; -use test::Bencher; use bytes::{BufMut, BytesMut}; +use test::Bencher; #[bench] fn alloc_small(b: &mut Bencher) { @@ -29,7 +29,6 @@ fn alloc_big(b: &mut Bencher) { }) } - #[bench] fn deref_unique(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); @@ -92,7 +91,9 @@ fn deref_two(b: &mut Bencher) { #[bench] fn clone_frozen(b: &mut Bencher) { - let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..]).split().freeze(); + let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..]) + .split() + .freeze(); b.iter(|| { for _ in 0..1024 { @@ -137,7 +138,9 @@ fn fmt_write(b: &mut Bencher) { b.iter(|| { let _ = write!(buf, "{}", s); test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }) } @@ -152,7 +155,9 @@ fn bytes_mut_extend(b: &mut Bencher) { buf.extend(&data); } test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }); } @@ -169,7 +174,9 @@ fn put_slice_bytes_mut(b: &mut Bencher) { buf.put_slice(&data); } test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }); } @@ -184,7 +191,9 @@ fn put_u8_bytes_mut(b: &mut Bencher) { buf.put_u8(b'x'); } test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }); } @@ -199,7 +208,9 @@ fn put_slice_vec(b: &mut Bencher) { buf.put_slice(&data); } test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }); } @@ -214,7 +225,9 @@ fn put_u8_vec(b: &mut Bencher) { buf.put_u8(b'x'); } test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }); } @@ -229,7 +242,9 @@ fn put_slice_vec_extend(b: &mut Bencher) { buf.extend_from_slice(&data); } test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }); } @@ -244,6 +259,8 @@ fn put_u8_vec_push(b: &mut Bencher) { buf.push(b'x'); } test::black_box(&buf); - unsafe { buf.set_len(0); } + unsafe { + buf.set_len(0); + } }); } diff --git a/ci/azure-rustfmt.yml b/ci/azure-rustfmt.yml new file mode 100644 index 000000000..a0a133890 --- /dev/null +++ b/ci/azure-rustfmt.yml @@ -0,0 +1,13 @@ +jobs: +# Check formatting +- job: ${{ parameters.name }} + displayName: Check rustfmt + pool: + vmImage: ubuntu-16.04 + steps: + - template: azure-install-rust.yml + parameters: + rust_version: ${{ parameters.rust_version }} + - script: | + cargo fmt --all -- --check + displayName: Check formatting diff --git a/src/buf/buf_impl.rs b/src/buf/buf_impl.rs index 843db718f..5cd7c686e 100644 --- a/src/buf/buf_impl.rs +++ b/src/buf/buf_impl.rs @@ -1,22 +1,23 @@ -use core::{cmp, ptr, mem}; +use core::{cmp, mem, ptr}; #[cfg(feature = "std")] use std::io::IoSlice; -use alloc::{boxed::Box}; +use alloc::boxed::Box; macro_rules! buf_get_impl { - ($this:ident, $typ:tt::$conv:tt) => ({ + ($this:ident, $typ:tt::$conv:tt) => {{ const SIZE: usize = mem::size_of::<$typ>(); - // try to convert directly from the bytes - // this Option trick is to avoid keeping a borrow on self - // when advance() is called (mut borrow) and to call bytes() only once - let ret = $this.bytes().get(..SIZE).map(|src| unsafe { - $typ::$conv(*(src as *const _ as *const [_; SIZE])) - }); + // try to convert directly from the bytes + // this Option trick is to avoid keeping a borrow on self + // when advance() is called (mut borrow) and to call bytes() only once + let ret = $this + .bytes() + .get(..SIZE) + .map(|src| unsafe { $typ::$conv(*(src as *const _ as *const [_; SIZE])) }); if let Some(ret) = ret { - // if the direct conversion was possible, advance and return + // if the direct conversion was possible, advance and return $this.advance(SIZE); return ret; } else { @@ -25,8 +26,8 @@ macro_rules! buf_get_impl { $this.copy_to_slice(&mut buf); // (do the advance) return $typ::$conv(buf); } - }); - (le => $this:ident, $typ:tt, $len_to_read:expr) => ({ + }}; + (le => $this:ident, $typ:tt, $len_to_read:expr) => {{ debug_assert!(mem::size_of::<$typ>() >= $len_to_read); // The same trick as above does not improve the best case speed. @@ -34,12 +35,12 @@ macro_rules! buf_get_impl { let mut buf = [0; (mem::size_of::<$typ>())]; $this.copy_to_slice(&mut buf[..($len_to_read)]); return $typ::from_le_bytes(buf); - }); + }}; (be => $this:ident, $typ:tt, $len_to_read:expr) => {{ debug_assert!(mem::size_of::<$typ>() >= $len_to_read); let mut buf = [0; (mem::size_of::<$typ>())]; - $this.copy_to_slice(&mut buf[mem::size_of::<$typ>()-($len_to_read)..]); + $this.copy_to_slice(&mut buf[mem::size_of::<$typ>() - ($len_to_read)..]); return $typ::from_be_bytes(buf); }}; } @@ -251,8 +252,7 @@ pub trait Buf { let src = self.bytes(); cnt = cmp::min(src.len(), dst.len() - off); - ptr::copy_nonoverlapping( - src.as_ptr(), dst[off..].as_mut_ptr(), cnt); + ptr::copy_nonoverlapping(src.as_ptr(), dst[off..].as_mut_ptr(), cnt); off += cnt; } @@ -810,109 +810,108 @@ pub trait Buf { } macro_rules! deref_forward_buf { - () => ( - fn remaining(&self) -> usize { - (**self).remaining() - } - - fn bytes(&self) -> &[u8] { - (**self).bytes() - } + () => { + fn remaining(&self) -> usize { + (**self).remaining() + } - #[cfg(feature = "std")] - fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize { - (**self).bytes_vectored(dst) - } + fn bytes(&self) -> &[u8] { + (**self).bytes() + } - fn advance(&mut self, cnt: usize) { - (**self).advance(cnt) - } + #[cfg(feature = "std")] + fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize { + (**self).bytes_vectored(dst) + } - fn has_remaining(&self) -> bool { - (**self).has_remaining() - } + fn advance(&mut self, cnt: usize) { + (**self).advance(cnt) + } - fn copy_to_slice(&mut self, dst: &mut [u8]) { - (**self).copy_to_slice(dst) - } + fn has_remaining(&self) -> bool { + (**self).has_remaining() + } - fn get_u8(&mut self) -> u8 { - (**self).get_u8() - } + fn copy_to_slice(&mut self, dst: &mut [u8]) { + (**self).copy_to_slice(dst) + } - fn get_i8(&mut self) -> i8 { - (**self).get_i8() - } + fn get_u8(&mut self) -> u8 { + (**self).get_u8() + } - fn get_u16(&mut self) -> u16 { - (**self).get_u16() - } + fn get_i8(&mut self) -> i8 { + (**self).get_i8() + } - fn get_u16_le(&mut self) -> u16 { - (**self).get_u16_le() - } + fn get_u16(&mut self) -> u16 { + (**self).get_u16() + } - fn get_i16(&mut self) -> i16 { - (**self).get_i16() - } + fn get_u16_le(&mut self) -> u16 { + (**self).get_u16_le() + } - fn get_i16_le(&mut self) -> i16 { - (**self).get_i16_le() - } + fn get_i16(&mut self) -> i16 { + (**self).get_i16() + } - fn get_u32(&mut self) -> u32 { - (**self).get_u32() - } + fn get_i16_le(&mut self) -> i16 { + (**self).get_i16_le() + } - fn get_u32_le(&mut self) -> u32 { - (**self).get_u32_le() - } + fn get_u32(&mut self) -> u32 { + (**self).get_u32() + } - fn get_i32(&mut self) -> i32 { - (**self).get_i32() - } + fn get_u32_le(&mut self) -> u32 { + (**self).get_u32_le() + } - fn get_i32_le(&mut self) -> i32 { - (**self).get_i32_le() - } + fn get_i32(&mut self) -> i32 { + (**self).get_i32() + } - fn get_u64(&mut self) -> u64 { - (**self).get_u64() - } + fn get_i32_le(&mut self) -> i32 { + (**self).get_i32_le() + } - fn get_u64_le(&mut self) -> u64 { - (**self).get_u64_le() - } + fn get_u64(&mut self) -> u64 { + (**self).get_u64() + } - fn get_i64(&mut self) -> i64 { - (**self).get_i64() - } + fn get_u64_le(&mut self) -> u64 { + (**self).get_u64_le() + } - fn get_i64_le(&mut self) -> i64 { - (**self).get_i64_le() - } + fn get_i64(&mut self) -> i64 { + (**self).get_i64() + } - fn get_uint(&mut self, nbytes: usize) -> u64 { - (**self).get_uint(nbytes) - } + fn get_i64_le(&mut self) -> i64 { + (**self).get_i64_le() + } - fn get_uint_le(&mut self, nbytes: usize) -> u64 { - (**self).get_uint_le(nbytes) - } + fn get_uint(&mut self, nbytes: usize) -> u64 { + (**self).get_uint(nbytes) + } - fn get_int(&mut self, nbytes: usize) -> i64 { - (**self).get_int(nbytes) - } + fn get_uint_le(&mut self, nbytes: usize) -> u64 { + (**self).get_uint_le(nbytes) + } - fn get_int_le(&mut self, nbytes: usize) -> i64 { - (**self).get_int_le(nbytes) - } + fn get_int(&mut self, nbytes: usize) -> i64 { + (**self).get_int(nbytes) + } - fn to_bytes(&mut self) -> crate::Bytes { - (**self).to_bytes() - } + fn get_int_le(&mut self, nbytes: usize) -> i64 { + (**self).get_int_le(nbytes) + } - ) + fn to_bytes(&mut self) -> crate::Bytes { + (**self).to_bytes() + } + }; } impl Buf for &mut T { @@ -950,7 +949,8 @@ impl Buf for Option<[u8; 1]> { } fn bytes(&self) -> &[u8] { - self.as_ref().map(AsRef::as_ref) + self.as_ref() + .map(AsRef::as_ref) .unwrap_or(Default::default()) } @@ -994,7 +994,8 @@ impl> Buf for std::io::Cursor { fn advance(&mut self, cnt: usize) { let pos = (self.position() as usize) - .checked_add(cnt).expect("overflow"); + .checked_add(cnt) + .expect("overflow"); assert!(pos <= self.get_ref().as_ref().len()); self.set_position(pos as u64); diff --git a/src/buf/buf_mut.rs b/src/buf/buf_mut.rs index ab9ad1844..628b240a3 100644 --- a/src/buf/buf_mut.rs +++ b/src/buf/buf_mut.rs @@ -1,9 +1,13 @@ -use core::{cmp, mem::{self, MaybeUninit}, ptr, usize}; +use core::{ + cmp, + mem::{self, MaybeUninit}, + ptr, usize, +}; #[cfg(feature = "std")] use std::fmt; -use alloc::{vec::Vec, boxed::Box}; +use alloc::{boxed::Box, vec::Vec}; /// A trait for values that provide sequential write access to bytes. /// @@ -226,7 +230,10 @@ pub trait BufMut { /// # Panics /// /// Panics if `self` does not have enough capacity to contain `src`. - fn put(&mut self, mut src: T) where Self: Sized { + fn put(&mut self, mut src: T) + where + Self: Sized, + { assert!(self.remaining_mut() >= src.remaining()); while src.has_remaining() { @@ -237,14 +244,13 @@ pub trait BufMut { let d = self.bytes_mut(); l = cmp::min(s.len(), d.len()); - ptr::copy_nonoverlapping( - s.as_ptr(), - d.as_mut_ptr() as *mut u8, - l); + ptr::copy_nonoverlapping(s.as_ptr(), d.as_mut_ptr() as *mut u8, l); } src.advance(l); - unsafe { self.advance_mut(l); } + unsafe { + self.advance_mut(l); + } } } @@ -270,7 +276,12 @@ pub trait BufMut { fn put_slice(&mut self, src: &[u8]) { let mut off = 0; - assert!(self.remaining_mut() >= src.len(), "buffer overflow; remaining = {}; src = {}", self.remaining_mut(), src.len()); + assert!( + self.remaining_mut() >= src.len(), + "buffer overflow; remaining = {}; src = {}", + self.remaining_mut(), + src.len() + ); while off < src.len() { let cnt; @@ -279,16 +290,14 @@ pub trait BufMut { let dst = self.bytes_mut(); cnt = cmp::min(dst.len(), src.len() - off); - ptr::copy_nonoverlapping( - src[off..].as_ptr(), - dst.as_mut_ptr() as *mut u8, - cnt); + ptr::copy_nonoverlapping(src[off..].as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); off += cnt; - } - unsafe { self.advance_mut(cnt); } + unsafe { + self.advance_mut(cnt); + } } } @@ -872,84 +881,84 @@ pub trait BufMut { } macro_rules! deref_forward_bufmut { - () => ( - fn remaining_mut(&self) -> usize { - (**self).remaining_mut() - } + () => { + fn remaining_mut(&self) -> usize { + (**self).remaining_mut() + } - fn bytes_mut(&mut self) -> &mut [MaybeUninit] { - (**self).bytes_mut() - } + fn bytes_mut(&mut self) -> &mut [MaybeUninit] { + (**self).bytes_mut() + } - #[cfg(feature = "std")] - fn bytes_vectored_mut<'b>(&'b mut self, dst: &mut [IoSliceMut<'b>]) -> usize { - (**self).bytes_vectored_mut(dst) - } + #[cfg(feature = "std")] + fn bytes_vectored_mut<'b>(&'b mut self, dst: &mut [IoSliceMut<'b>]) -> usize { + (**self).bytes_vectored_mut(dst) + } - unsafe fn advance_mut(&mut self, cnt: usize) { - (**self).advance_mut(cnt) - } + unsafe fn advance_mut(&mut self, cnt: usize) { + (**self).advance_mut(cnt) + } - fn put_slice(&mut self, src: &[u8]) { - (**self).put_slice(src) - } + fn put_slice(&mut self, src: &[u8]) { + (**self).put_slice(src) + } - fn put_u8(&mut self, n: u8) { - (**self).put_u8(n) - } + fn put_u8(&mut self, n: u8) { + (**self).put_u8(n) + } - fn put_i8(&mut self, n: i8) { - (**self).put_i8(n) - } + fn put_i8(&mut self, n: i8) { + (**self).put_i8(n) + } - fn put_u16(&mut self, n: u16) { - (**self).put_u16(n) - } + fn put_u16(&mut self, n: u16) { + (**self).put_u16(n) + } - fn put_u16_le(&mut self, n: u16) { - (**self).put_u16_le(n) - } + fn put_u16_le(&mut self, n: u16) { + (**self).put_u16_le(n) + } - fn put_i16(&mut self, n: i16) { - (**self).put_i16(n) - } + fn put_i16(&mut self, n: i16) { + (**self).put_i16(n) + } - fn put_i16_le(&mut self, n: i16) { - (**self).put_i16_le(n) - } + fn put_i16_le(&mut self, n: i16) { + (**self).put_i16_le(n) + } - fn put_u32(&mut self, n: u32) { - (**self).put_u32(n) - } + fn put_u32(&mut self, n: u32) { + (**self).put_u32(n) + } - fn put_u32_le(&mut self, n: u32) { - (**self).put_u32_le(n) - } + fn put_u32_le(&mut self, n: u32) { + (**self).put_u32_le(n) + } - fn put_i32(&mut self, n: i32) { - (**self).put_i32(n) - } + fn put_i32(&mut self, n: i32) { + (**self).put_i32(n) + } - fn put_i32_le(&mut self, n: i32) { - (**self).put_i32_le(n) - } + fn put_i32_le(&mut self, n: i32) { + (**self).put_i32_le(n) + } - fn put_u64(&mut self, n: u64) { - (**self).put_u64(n) - } + fn put_u64(&mut self, n: u64) { + (**self).put_u64(n) + } - fn put_u64_le(&mut self, n: u64) { - (**self).put_u64_le(n) - } + fn put_u64_le(&mut self, n: u64) { + (**self).put_u64_le(n) + } - fn put_i64(&mut self, n: i64) { - (**self).put_i64(n) - } + fn put_i64(&mut self, n: i64) { + (**self).put_i64(n) + } - fn put_i64_le(&mut self, n: i64) { - (**self).put_i64_le(n) - } - ) + fn put_i64_le(&mut self, n: i64) { + (**self).put_i64_le(n) + } + }; } impl BufMut for &mut T { @@ -1013,15 +1022,16 @@ impl BufMut for Vec { let len = self.len(); let ptr = self.as_mut_ptr() as *mut MaybeUninit; - unsafe { - &mut slice::from_raw_parts_mut(ptr, cap)[len..] - } + unsafe { &mut slice::from_raw_parts_mut(ptr, cap)[len..] } } // Specialize these methods so they can skip checking `remaining_mut` // and `advance_mut`. - fn put(&mut self, mut src: T) where Self: Sized { + fn put(&mut self, mut src: T) + where + Self: Sized, + { // In case the src isn't contiguous, reserve upfront self.reserve(src.remaining()); diff --git a/src/buf/ext/chain.rs b/src/buf/ext/chain.rs index a1ec597df..e62e2f1b9 100644 --- a/src/buf/ext/chain.rs +++ b/src/buf/ext/chain.rs @@ -1,12 +1,12 @@ -use crate::{Buf, BufMut}; use crate::buf::IntoIter; +use crate::{Buf, BufMut}; use core::mem::MaybeUninit; -#[cfg(feature = "std")] -use std::io::{IoSlice}; #[cfg(feature = "std")] use crate::buf::IoSliceMut; +#[cfg(feature = "std")] +use std::io::IoSlice; /// A `Chain` sequences two buffers. /// @@ -41,10 +41,7 @@ pub struct Chain { impl Chain { /// Creates a new `Chain` sequencing the provided values. pub fn new(a: T, b: U) -> Chain { - Chain { - a, - b, - } + Chain { a, b } } /// Gets a reference to the first underlying `Buf`. @@ -137,8 +134,9 @@ impl Chain { } impl Buf for Chain - where T: Buf, - U: Buf, +where + T: Buf, + U: Buf, { fn remaining(&self) -> usize { self.a.remaining() + self.b.remaining() @@ -179,8 +177,9 @@ impl Buf for Chain } impl BufMut for Chain - where T: BufMut, - U: BufMut, +where + T: BufMut, + U: BufMut, { fn remaining_mut(&self) -> usize { self.a.remaining_mut() + self.b.remaining_mut() diff --git a/src/buf/ext/limit.rs b/src/buf/ext/limit.rs index f86e01151..a36eceeef 100644 --- a/src/buf/ext/limit.rs +++ b/src/buf/ext/limit.rs @@ -11,10 +11,7 @@ pub struct Limit { } pub(super) fn new(inner: T, limit: usize) -> Limit { - Limit { - inner, - limit, - } + Limit { inner, limit } } impl Limit { diff --git a/src/buf/ext/mod.rs b/src/buf/ext/mod.rs index 7b0bdab20..7d6181438 100644 --- a/src/buf/ext/mod.rs +++ b/src/buf/ext/mod.rs @@ -10,9 +10,9 @@ mod take; #[cfg(feature = "std")] mod writer; +pub use self::chain::Chain; pub use self::limit::Limit; pub use self::take::Take; -pub use self::chain::Chain; #[cfg(feature = "std")] pub use self::{reader::Reader, writer::Writer}; @@ -41,7 +41,8 @@ pub trait BufExt: Buf { /// assert_eq!(dst, b" world"); /// ``` fn take(self, limit: usize) -> Take - where Self: Sized + where + Self: Sized, { take::new(self, limit) } @@ -62,7 +63,8 @@ pub trait BufExt: Buf { /// assert_eq!(full.bytes(), b"hello world"); /// ``` fn chain(self, next: U) -> Chain - where Self: Sized + where + Self: Sized, { Chain::new(self, next) } @@ -91,7 +93,10 @@ pub trait BufExt: Buf { /// assert_eq!(&dst[..11], &b"hello world"[..]); /// ``` #[cfg(feature = "std")] - fn reader(self) -> Reader where Self: Sized { + fn reader(self) -> Reader + where + Self: Sized, + { reader::new(self) } } @@ -114,7 +119,8 @@ pub trait BufMutExt: BufMut { /// assert_eq!(dst.remaining_mut(), 10); /// ``` fn limit(self, limit: usize) -> Limit - where Self: Sized + where + Self: Sized, { limit::new(self, limit) } @@ -142,7 +148,10 @@ pub trait BufMutExt: BufMut { /// assert_eq!(*buf, b"hello world"[..]); /// ``` #[cfg(feature = "std")] - fn writer(self) -> Writer where Self: Sized { + fn writer(self) -> Writer + where + Self: Sized, + { writer::new(self) } @@ -167,7 +176,8 @@ pub trait BufMutExt: BufMut { /// assert_eq!(&b[..], b" world"); /// ``` fn chain_mut(self, next: U) -> Chain - where Self: Sized + where + Self: Sized, { Chain::new(self, next) } diff --git a/src/buf/ext/reader.rs b/src/buf/ext/reader.rs index e38103b1d..bc171add1 100644 --- a/src/buf/ext/reader.rs +++ b/src/buf/ext/reader.rs @@ -1,4 +1,4 @@ -use crate::{Buf}; +use crate::Buf; use std::{cmp, io}; diff --git a/src/buf/ext/take.rs b/src/buf/ext/take.rs index 6fc4ffc72..6247165d7 100644 --- a/src/buf/ext/take.rs +++ b/src/buf/ext/take.rs @@ -13,10 +13,7 @@ pub struct Take { } pub fn new(inner: T, limit: usize) -> Take { - Take { - inner, - limit, - } + Take { inner, limit } } impl Take { diff --git a/src/buf/iter.rs b/src/buf/iter.rs index 1af421a8d..5a3f26a2e 100644 --- a/src/buf/iter.rs +++ b/src/buf/iter.rs @@ -109,7 +109,6 @@ impl IntoIter { } } - impl Iterator for IntoIter { type Item = u8; @@ -130,4 +129,4 @@ impl Iterator for IntoIter { } } -impl ExactSizeIterator for IntoIter { } +impl ExactSizeIterator for IntoIter {} diff --git a/src/buf/mod.rs b/src/buf/mod.rs index d4538f21e..1d7292c9e 100644 --- a/src/buf/mod.rs +++ b/src/buf/mod.rs @@ -24,8 +24,7 @@ mod vec_deque; pub use self::buf_impl::Buf; pub use self::buf_mut::BufMut; -pub use self::ext::{BufExt, BufMutExt}; #[cfg(feature = "std")] pub use self::buf_mut::IoSliceMut; +pub use self::ext::{BufExt, BufMutExt}; pub use self::iter::IntoIter; - diff --git a/src/bytes.rs b/src/bytes.rs index eb75e1b5c..08bc9b3f3 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -1,15 +1,14 @@ -use core::{cmp, fmt, hash, mem, ptr, slice, usize}; -use core::iter::{FromIterator}; +use core::iter::FromIterator; use core::ops::{Deref, RangeBounds}; +use core::{cmp, fmt, hash, mem, ptr, slice, usize}; -use alloc::{vec::Vec, string::String, boxed::Box, borrow::Borrow}; +use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec}; -use crate::Buf; use crate::buf::IntoIter; -use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; - +use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; +use crate::Buf; /// A reference counted contiguous slice of memory. /// @@ -176,7 +175,6 @@ impl Bytes { self.len == 0 } - ///Creates `Bytes` instance from slice, by copying it. pub fn copy_from_slice(data: &[u8]) -> Self { data.to_vec().into() @@ -238,7 +236,6 @@ impl Bytes { return Bytes::new(); } - let mut ret = self.clone(); ret.len = end - begin; @@ -394,7 +391,6 @@ impl Bytes { return Bytes::new(); } - let mut ret = self.clone(); unsafe { self.inc_start(at) }; @@ -429,8 +425,9 @@ impl Bytes { // The Vec "promotable" vtables do not store the capacity, // so we cannot truncate while using this repr. We *have* to // promote using `split_off` so the capacity can be stored. - if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE || - self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE { + if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE + || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE + { drop(self.split_off(len)); } else { self.len = len; @@ -455,7 +452,12 @@ impl Bytes { } #[inline] - pub(crate) unsafe fn with_vtable(ptr: *const u8, len: usize, data: AtomicPtr<()>, vtable: &'static Vtable) -> Bytes { + pub(crate) unsafe fn with_vtable( + ptr: *const u8, + len: usize, + data: AtomicPtr<()>, + vtable: &'static Vtable, + ) -> Bytes { Bytes { ptr, len, @@ -468,9 +470,7 @@ impl Bytes { #[inline] fn as_slice(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self.ptr, self.len) - } + unsafe { slice::from_raw_parts(self.ptr, self.len) } } #[inline] @@ -489,18 +489,14 @@ unsafe impl Sync for Bytes {} impl Drop for Bytes { #[inline] fn drop(&mut self) { - unsafe { - (self.vtable.drop)(&mut self.data, self.ptr, self.len) - } + unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) } } } impl Clone for Bytes { #[inline] fn clone(&self) -> Bytes { - unsafe { - (self.vtable.clone)(&self.data, self.ptr, self.len) - } + unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } } } @@ -551,7 +547,10 @@ impl AsRef<[u8]> for Bytes { } impl hash::Hash for Bytes { - fn hash(&self, state: &mut H) where H: hash::Hasher { + fn hash(&self, state: &mut H) + where + H: hash::Hasher, + { self.as_slice().hash(state); } } @@ -729,7 +728,8 @@ impl PartialOrd for &str { } impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes - where Bytes: PartialEq +where + Bytes: PartialEq, { fn eq(&self, other: &&'a T) -> bool { *self == **other @@ -737,7 +737,8 @@ impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes } impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes - where Bytes: PartialOrd +where + Bytes: PartialOrd, { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(&**other) @@ -953,7 +954,13 @@ unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> } #[cold] -unsafe fn shallow_clone_vec(atom: &AtomicPtr<()>, ptr: *const (), buf: *mut u8, offset: *const u8, len: usize) -> Bytes { +unsafe fn shallow_clone_vec( + atom: &AtomicPtr<()>, + ptr: *const (), + buf: *mut u8, + offset: *const u8, + len: usize, +) -> Bytes { // If the buffer is still tracked in a `Vec`. It is time to // promote the vec to an `Arc`. This could potentially be called // concurrently, so some care must be taken. diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index e0630cb9a..5178198f0 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1,18 +1,22 @@ -use core::{cmp, fmt, hash, isize, slice, usize}; +use core::iter::{FromIterator, Iterator}; use core::mem::{self, ManuallyDrop}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull}; -use core::iter::{FromIterator, Iterator}; +use core::{cmp, fmt, hash, isize, slice, usize}; -use alloc::{vec::Vec, string::String, boxed::Box, borrow::{Borrow, BorrowMut}}; +use alloc::{ + borrow::{Borrow, BorrowMut}, + boxed::Box, + string::String, + vec::Vec, +}; -use crate::{Bytes, Buf, BufMut}; -use crate::bytes::Vtable; use crate::buf::IntoIter; -use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; +use crate::bytes::Vtable; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; - +use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; +use crate::{Buf, BufMut, Bytes}; /// A unique reference to a contiguous slice of memory. /// @@ -247,9 +251,7 @@ impl BytesMut { let len = self.len; let data = AtomicPtr::new(self.data as _); mem::forget(self); - unsafe { - Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) - } + unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } } } @@ -391,7 +393,9 @@ impl BytesMut { /// [`split_off`]: #method.split_off pub fn truncate(&mut self, len: usize) { if len <= self.len() { - unsafe { self.set_len(len); } + unsafe { + self.set_len(len); + } } } @@ -572,7 +576,8 @@ impl BytesMut { self.cap += off; } else { // No space - allocate more - let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); + let mut v = + ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); v.reserve(additional); // Update the info @@ -588,7 +593,6 @@ impl BytesMut { debug_assert_eq!(kind, KIND_ARC); let shared: *mut Shared = self.data as _; - // Reserving involves abandoning the currently shared buffer and // allocating a new vector with the requested capacity. // @@ -632,9 +636,7 @@ impl BytesMut { // check. let double = v.capacity().checked_shl(1).unwrap_or(new_cap); - new_cap = cmp::max( - cmp::max(double, new_cap), - original_capacity); + new_cap = cmp::max(cmp::max(double, new_cap), original_capacity); } else { new_cap = cmp::max(new_cap, original_capacity); } @@ -683,14 +685,12 @@ impl BytesMut { // Reserved above debug_assert!(dst.len() >= cnt); - ptr::copy_nonoverlapping( - extend.as_ptr(), - dst.as_mut_ptr() as *mut u8, - cnt); - + ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); } - unsafe { self.advance_mut(cnt); } + unsafe { + self.advance_mut(cnt); + } } /// Absorbs a `BytesMut` that was previously split off. @@ -755,16 +755,12 @@ impl BytesMut { #[inline] fn as_slice(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self.ptr.as_ptr(), self.len) - } + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } #[inline] fn as_slice_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) - } + unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } } unsafe fn set_start(&mut self, start: usize) { @@ -793,7 +789,7 @@ impl BytesMut { // on 64 bit systems and will only happen on 32 bit systems // when shifting past 134,217,727 bytes. As such, we don't // worry too much about performance here. - self.promote_to_shared(/*ref_count = */1); + self.promote_to_shared(/*ref_count = */ 1); } } @@ -825,10 +821,10 @@ impl BytesMut { } let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) }; - if ptr == other.ptr.as_ptr() && - self.kind() == KIND_ARC && - other.kind() == KIND_ARC && - self.data == other.data + if ptr == other.ptr.as_ptr() + && self.kind() == KIND_ARC + && other.kind() == KIND_ARC + && self.data == other.data { // Contiguous blocks, just combine directly self.len += other.len; @@ -889,7 +885,7 @@ impl BytesMut { increment_shared(self.data); ptr::read(self) } else { - self.promote_to_shared(/*ref_count = */2); + self.promote_to_shared(/*ref_count = */ 2); ptr::read(self) } } @@ -957,7 +953,9 @@ impl Buf for BytesMut { cnt, self.remaining(), ); - unsafe { self.set_start(cnt); } + unsafe { + self.set_start(cnt); + } } fn to_bytes(&mut self) -> crate::Bytes { @@ -974,7 +972,12 @@ impl BufMut for BytesMut { #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { let new_len = self.len() + cnt; - assert!(new_len <= self.cap, "new_len = {}; capacity = {}", new_len, self.cap); + assert!( + new_len <= self.cap, + "new_len = {}; capacity = {}", + new_len, + self.cap + ); self.len = new_len; } @@ -989,7 +992,10 @@ impl BufMut for BytesMut { // Specialize these methods so they can skip checking `remaining_mut` // and `advance_mut`. - fn put(&mut self, mut src: T) where Self: Sized { + fn put(&mut self, mut src: T) + where + Self: Sized, + { while src.has_remaining() { let s = src.bytes(); let l = s.len(); @@ -1068,8 +1074,7 @@ impl Ord for BytesMut { } } -impl Eq for BytesMut { -} +impl Eq for BytesMut {} impl Default for BytesMut { #[inline] @@ -1079,7 +1084,10 @@ impl Default for BytesMut { } impl hash::Hash for BytesMut { - fn hash(&self, state: &mut H) where H: hash::Hasher { + fn hash(&self, state: &mut H) + where + H: hash::Hasher, + { let s: &[u8] = self.as_ref(); s.hash(state); } @@ -1139,7 +1147,10 @@ impl<'a> IntoIterator for &'a BytesMut { } impl Extend for BytesMut { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { let iter = iter.into_iter(); let (lower, _) = iter.size_hint(); @@ -1156,7 +1167,10 @@ impl Extend for BytesMut { } impl<'a> Extend<&'a u8> for BytesMut { - fn extend(&mut self, iter: T) where T: IntoIterator { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { self.extend(iter.into_iter().map(|b| *b)) } } @@ -1234,7 +1248,10 @@ impl Shared { fn original_capacity_to_repr(cap: usize) -> usize { let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); - cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH) + cmp::min( + width, + MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH, + ) } fn original_capacity_from_repr(repr: usize) -> usize { @@ -1402,7 +1419,8 @@ impl PartialOrd for String { } impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut - where BytesMut: PartialEq +where + BytesMut: PartialEq, { fn eq(&self, other: &&'a T) -> bool { *self == **other @@ -1410,7 +1428,8 @@ impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut } impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut - where BytesMut: PartialOrd +where + BytesMut: PartialOrd, { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(*other) @@ -1528,8 +1547,8 @@ mod fuzz { use loom::sync::Arc; use loom::thread; - use crate::Bytes; use super::BytesMut; + use crate::Bytes; #[test] fn bytes_mut_cloning_frozen() { diff --git a/src/fmt/debug.rs b/src/fmt/debug.rs index f6a08b863..a8545514e 100644 --- a/src/fmt/debug.rs +++ b/src/fmt/debug.rs @@ -1,7 +1,7 @@ use core::fmt::{Debug, Formatter, Result}; -use crate::{Bytes, BytesMut}; use super::BytesRef; +use crate::{Bytes, BytesMut}; /// Alternative implementation of `std::fmt::Debug` for byte slice. /// diff --git a/src/fmt/hex.rs b/src/fmt/hex.rs index 09170ae1a..97a749a33 100644 --- a/src/fmt/hex.rs +++ b/src/fmt/hex.rs @@ -1,7 +1,7 @@ use core::fmt::{Formatter, LowerHex, Result, UpperHex}; -use crate::{Bytes, BytesMut}; use super::BytesRef; +use crate::{Bytes, BytesMut}; impl LowerHex for BytesRef<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> Result { diff --git a/src/lib.rs b/src/lib.rs index a61e3476a..3cc1fd711 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,9 @@ -#![deny(warnings, missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![deny( + warnings, + missing_docs, + missing_debug_implementations, + rust_2018_idioms +)] #![doc(html_root_url = "https://docs.rs/bytes/0.5.4")] #![no_std] @@ -72,24 +77,20 @@ //! perform a syscall, which has the potential of failing. Operations on `Buf` //! and `BufMut` are infallible. - extern crate alloc; #[cfg(feature = "std")] extern crate std; pub mod buf; -pub use crate::buf::{ - Buf, - BufMut, -}; +pub use crate::buf::{Buf, BufMut}; -mod bytes_mut; mod bytes; +mod bytes_mut; mod fmt; mod loom; -pub use crate::bytes_mut::BytesMut; pub use crate::bytes::Bytes; +pub use crate::bytes_mut::BytesMut; // Optional Serde support #[cfg(feature = "serde")] diff --git a/src/serde.rs b/src/serde.rs index 11020ae7f..0a5bd144a 100644 --- a/src/serde.rs +++ b/src/serde.rs @@ -1,15 +1,16 @@ +use super::{Bytes, BytesMut}; use alloc::string::String; use alloc::vec::Vec; use core::{cmp, fmt}; -use serde::{Serialize, Serializer, Deserialize, Deserializer, de}; -use super::{Bytes, BytesMut}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; macro_rules! serde_impl { - ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => ( + ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => { impl Serialize for $ty { #[inline] fn serialize(&self, serializer: S) -> Result - where S: Serializer + where + S: Serializer, { serializer.serialize_bytes(&self) } @@ -26,7 +27,8 @@ macro_rules! serde_impl { #[inline] fn visit_seq(self, mut seq: V) -> Result - where V: de::SeqAccess<'de> + where + V: de::SeqAccess<'de>, { let len = cmp::min(seq.size_hint().unwrap_or(0), 4096); let mut values: Vec = Vec::with_capacity(len); @@ -40,28 +42,32 @@ macro_rules! serde_impl { #[inline] fn visit_bytes(self, v: &[u8]) -> Result - where E: de::Error + where + E: de::Error, { Ok($ty::$from_slice(v)) } #[inline] fn visit_byte_buf(self, v: Vec) -> Result - where E: de::Error + where + E: de::Error, { Ok($ty::$from_vec(v)) } #[inline] fn visit_str(self, v: &str) -> Result - where E: de::Error + where + E: de::Error, { Ok($ty::$from_slice(v.as_bytes())) } #[inline] fn visit_string(self, v: String) -> Result - where E: de::Error + where + E: de::Error, { Ok($ty::$from_vec(v.into_bytes())) } @@ -70,12 +76,13 @@ macro_rules! serde_impl { impl<'de> Deserialize<'de> for $ty { #[inline] fn deserialize(deserializer: D) -> Result<$ty, D::Error> - where D: Deserializer<'de> + where + D: Deserializer<'de>, { deserializer.deserialize_byte_buf($visitor_ty) } } - ); + }; } serde_impl!(Bytes, BytesVisitor, copy_from_slice, from); diff --git a/tests/test_buf_mut.rs b/tests/test_buf_mut.rs index d2f9b8e4c..c70e20928 100644 --- a/tests/test_buf_mut.rs +++ b/tests/test_buf_mut.rs @@ -1,10 +1,10 @@ #![deny(warnings, rust_2018_idioms)] -use bytes::{BufMut, BytesMut}; #[cfg(feature = "std")] use bytes::buf::IoSliceMut; -use core::usize; +use bytes::{BufMut, BytesMut}; use core::fmt::Write; +use core::usize; #[test] fn test_vec_as_mut_buf() { diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index 47757733c..106fa6f4f 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -1,6 +1,6 @@ #![deny(warnings, rust_2018_idioms)] -use bytes::{Bytes, BytesMut, Buf, BufMut}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; use std::usize; @@ -44,7 +44,6 @@ fn test_layout() { mem::size_of::>(), "BytesMut should be same size as Option", ); - } #[test] @@ -87,13 +86,11 @@ fn fmt_write() { write!(a, "{}", &s[..64]).unwrap(); assert_eq!(a, s[..64].as_bytes()); - let mut b = BytesMut::with_capacity(64); write!(b, "{}", &s[..32]).unwrap(); write!(b, "{}", &s[32..64]).unwrap(); assert_eq!(b, s[..64].as_bytes()); - let mut c = BytesMut::with_capacity(64); write!(c, "{}", s).unwrap(); assert_eq!(c, s[..].as_bytes()); @@ -305,11 +302,13 @@ fn split_off_to_at_gt_len() { assert!(panic::catch_unwind(move || { let _ = make_bytes().split_to(5); - }).is_err()); + }) + .is_err()); assert!(panic::catch_unwind(move || { let _ = make_bytes().split_off(5); - }).is_err()); + }) + .is_err()); } #[test] @@ -864,7 +863,6 @@ fn slice_ref_works() { test_slice_ref(&bytes, 9, 9, b""); } - #[test] fn slice_ref_empty() { let bytes = Bytes::from(&b""[..]); diff --git a/tests/test_bytes_vec_alloc.rs b/tests/test_bytes_vec_alloc.rs index dc007cfd4..418a9cd64 100644 --- a/tests/test_bytes_vec_alloc.rs +++ b/tests/test_bytes_vec_alloc.rs @@ -39,7 +39,11 @@ unsafe impl GlobalAlloc for Ledger { let off_ptr = (ptr as *mut usize).offset(-1); let orig_size = off_ptr.read(); if orig_size != layout.size() { - panic!("bad dealloc: alloc size was {}, dealloc size is {}", orig_size, layout.size()); + panic!( + "bad dealloc: alloc size was {}, dealloc size is {}", + orig_size, + layout.size() + ); } let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) { diff --git a/tests/test_chain.rs b/tests/test_chain.rs index df354bf0f..82de7fcec 100644 --- a/tests/test_chain.rs +++ b/tests/test_chain.rs @@ -1,7 +1,7 @@ #![deny(warnings, rust_2018_idioms)] -use bytes::{Buf, BufMut, Bytes}; use bytes::buf::{BufExt, BufMutExt}; +use bytes::{Buf, BufMut, Bytes}; #[cfg(feature = "std")] use std::io::IoSlice; diff --git a/tests/test_iter.rs b/tests/test_iter.rs index 13b86cdad..2302a69d6 100644 --- a/tests/test_iter.rs +++ b/tests/test_iter.rs @@ -11,7 +11,6 @@ fn iter_len() { assert_eq!(iter.len(), 11); } - #[test] fn empty_iter_len() { let buf = Bytes::from_static(b""); diff --git a/tests/test_reader.rs b/tests/test_reader.rs index 608d97410..b5da2c963 100644 --- a/tests/test_reader.rs +++ b/tests/test_reader.rs @@ -3,7 +3,7 @@ use std::io::{BufRead, Read}; -use bytes::buf::{BufExt}; +use bytes::buf::BufExt; #[test] fn read() { diff --git a/tests/test_serde.rs b/tests/test_serde.rs index 18b135692..36b87f28e 100644 --- a/tests/test_serde.rs +++ b/tests/test_serde.rs @@ -1,7 +1,7 @@ #![cfg(feature = "serde")] #![deny(warnings, rust_2018_idioms)] -use serde_test::{Token, assert_tokens}; +use serde_test::{assert_tokens, Token}; #[test] fn test_ser_de_empty() {