Skip to content

Commit

Permalink
build: set MSRV to 1.56; remove dev-dependencies; comment out bench c…
Browse files Browse the repository at this point in the history
…ode, not worth keeping as only reason MSRV went up is because rayon bumped
  • Loading branch information
m4b committed Jan 1, 2024
1 parent 7d095ad commit ef7fd99
Show file tree
Hide file tree
Showing 3 changed files with 161 additions and 150 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
include:
- build: MSRV # Minimum supported Rust version, ensure it matches the rust-version in Cargo.toml
os: ubuntu-latest
rust: 1.63
rust: 1.56
- build: stable
os: ubuntu-latest
rust: stable
Expand Down
6 changes: 1 addition & 5 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ license = "MIT"
documentation = "https://docs.rs/scroll"
description = "A suite of powerful, extensible, generic, endian-aware Read/Write traits for byte buffers"
include = ["src/**/*", "Cargo.toml", "LICENSE", "README.md"]
rust-version = "1.63"
rust-version = "1.56"

[features]
default = ["std"]
Expand All @@ -19,7 +19,3 @@ derive = ["dep:scroll_derive"]

[dependencies]
scroll_derive = { version = "0.11", optional = true, path = "scroll_derive" }

[dev-dependencies]
rayon = "1"
byteorder = "1"
303 changes: 159 additions & 144 deletions benches/bench.rs
Original file line number Diff line number Diff line change
@@ -1,157 +1,172 @@
#![feature(test)]
extern crate test;
// last numbers before disabled this for MSRV reasons

use scroll::{Cread, Pread, LE};
use test::black_box;
// test bench_byteorder ... bench: 26,971 ns/iter (+/- 334) = 7415 MB/s
// test bench_byteorder_vec ... bench: 327,029 ns/iter (+/- 4,127) = 3057 MB/s
// test bench_cread ... bench: 26,978 ns/iter (+/- 183) = 7413 MB/s
// test bench_cread_vec ... bench: 220,327 ns/iter (+/- 880) = 4538 MB/s
// test bench_gread_unwrap ... bench: 27,044 ns/iter (+/- 161) = 7395 MB/s
// test bench_gread_vec ... bench: 394,277 ns/iter (+/- 7,979) = 2536 MB/s
// test bench_parallel_cread_with ... bench: 84,137 ns/iter (+/- 2,095) = 11885 MB/s
// test bench_parallel_pread_with ... bench: 115,520 ns/iter (+/- 4,162) = 8656 MB/s
// test bench_pread_ctx_vec ... bench: 411,731 ns/iter (+/- 11,635) = 2428 MB/s
// test bench_pread_unwrap ... bench: 51,681 ns/iter (+/- 583) = 3869 MB/s
// test bench_pread_vec ... bench: 406,224 ns/iter (+/- 6,717) = 2461 MB/s
// test bench_pread_with_unwrap ... bench: 27,197 ns/iter (+/- 152) = 7353 MB/s

#[bench]
fn bench_parallel_cread_with(b: &mut test::Bencher) {
use rayon::prelude::*;
let vec = vec![0u8; 1_000_000];
let nums = vec![0usize; 500_000];
b.iter(|| {
let data = black_box(&vec[..]);
nums.par_iter().for_each(|offset| {
let _: u16 = black_box(data.cread_with(*offset, LE));
});
});
b.bytes = vec.len() as u64;
}
// #![feature(test)]
// extern crate test;

#[bench]
fn bench_cread_vec(b: &mut test::Bencher) {
let vec = vec![0u8; 1_000_000];
b.iter(|| {
let data = black_box(&vec[..]);
for val in data.chunks(2) {
let _: u16 = black_box(val.cread_with(0, LE));
}
});
b.bytes = vec.len() as u64;
}
// use scroll::{Cread, Pread, LE};
// use test::black_box;

#[bench]
fn bench_cread(b: &mut test::Bencher) {
const NITER: i32 = 100_000;
b.iter(|| {
for _ in 1..NITER {
let data = black_box([1, 2]);
let _: u16 = black_box(data.cread(0));
}
});
b.bytes = 2 * NITER as u64;
}
// #[bench]
// fn bench_parallel_cread_with(b: &mut test::Bencher) {
// use rayon::prelude::*;
// let vec = vec![0u8; 1_000_000];
// let nums = vec![0usize; 500_000];
// b.iter(|| {
// let data = black_box(&vec[..]);
// nums.par_iter().for_each(|offset| {
// let _: u16 = black_box(data.cread_with(*offset, LE));
// });
// });
// b.bytes = vec.len() as u64;
// }

#[bench]
fn bench_pread_ctx_vec(b: &mut test::Bencher) {
let vec = vec![0u8; 1_000_000];
b.iter(|| {
let data = black_box(&vec[..]);
for val in data.chunks(2) {
let _: Result<u16, _> = black_box(val.pread(0));
}
});
b.bytes = vec.len() as u64;
}
// #[bench]
// fn bench_cread_vec(b: &mut test::Bencher) {
// let vec = vec![0u8; 1_000_000];
// b.iter(|| {
// let data = black_box(&vec[..]);
// for val in data.chunks(2) {
// let _: u16 = black_box(val.cread_with(0, LE));
// }
// });
// b.bytes = vec.len() as u64;
// }

#[bench]
fn bench_pread_with_unwrap(b: &mut test::Bencher) {
const NITER: i32 = 100_000;
b.iter(|| {
for _ in 1..NITER {
let data: &[u8] = &black_box([1, 2]);
let _: u16 = black_box(data.pread_with(0, LE).unwrap());
}
});
b.bytes = 2 * NITER as u64;
}
// #[bench]
// fn bench_cread(b: &mut test::Bencher) {
// const NITER: i32 = 100_000;
// b.iter(|| {
// for _ in 1..NITER {
// let data = black_box([1, 2]);
// let _: u16 = black_box(data.cread(0));
// }
// });
// b.bytes = 2 * NITER as u64;
// }

#[bench]
fn bench_pread_vec(b: &mut test::Bencher) {
let vec = vec![0u8; 1_000_000];
b.iter(|| {
let data = black_box(&vec[..]);
for val in data.chunks(2) {
let _: Result<u16, _> = black_box(val.pread_with(0, LE));
}
});
b.bytes = vec.len() as u64;
}
// #[bench]
// fn bench_pread_ctx_vec(b: &mut test::Bencher) {
// let vec = vec![0u8; 1_000_000];
// b.iter(|| {
// let data = black_box(&vec[..]);
// for val in data.chunks(2) {
// let _: Result<u16, _> = black_box(val.pread(0));
// }
// });
// b.bytes = vec.len() as u64;
// }

#[bench]
fn bench_pread_unwrap(b: &mut test::Bencher) {
const NITER: i32 = 100_000;
b.iter(|| {
for _ in 1..NITER {
let data = black_box([1, 2]);
let _: u16 = black_box(data.pread(0)).unwrap();
}
});
b.bytes = 2 * NITER as u64;
}
// #[bench]
// fn bench_pread_with_unwrap(b: &mut test::Bencher) {
// const NITER: i32 = 100_000;
// b.iter(|| {
// for _ in 1..NITER {
// let data: &[u8] = &black_box([1, 2]);
// let _: u16 = black_box(data.pread_with(0, LE).unwrap());
// }
// });
// b.bytes = 2 * NITER as u64;
// }

#[bench]
fn bench_gread_vec(b: &mut test::Bencher) {
let vec = vec![0u8; 1_000_000];
b.iter(|| {
let data = black_box(&vec[..]);
for val in data.chunks(2) {
let mut offset = 0;
let _: Result<u16, _> = black_box(val.gread(&mut offset));
}
});
b.bytes = vec.len() as u64;
}
// #[bench]
// fn bench_pread_vec(b: &mut test::Bencher) {
// let vec = vec![0u8; 1_000_000];
// b.iter(|| {
// let data = black_box(&vec[..]);
// for val in data.chunks(2) {
// let _: Result<u16, _> = black_box(val.pread_with(0, LE));
// }
// });
// b.bytes = vec.len() as u64;
// }

#[bench]
fn bench_gread_unwrap(b: &mut test::Bencher) {
const NITER: i32 = 100_000;
b.iter(|| {
for _ in 1..NITER {
let data = black_box([1, 2]);
let mut offset = 0;
let _: u16 = black_box(data.gread_with(&mut offset, LE).unwrap());
}
});
b.bytes = 2 * NITER as u64;
}
// #[bench]
// fn bench_pread_unwrap(b: &mut test::Bencher) {
// const NITER: i32 = 100_000;
// b.iter(|| {
// for _ in 1..NITER {
// let data = black_box([1, 2]);
// let _: u16 = black_box(data.pread(0)).unwrap();
// }
// });
// b.bytes = 2 * NITER as u64;
// }

#[bench]
fn bench_parallel_pread_with(b: &mut test::Bencher) {
use rayon::prelude::*;
let vec = vec![0u8; 1_000_000];
let nums = vec![0usize; 500_000];
b.iter(|| {
let data = black_box(&vec[..]);
nums.par_iter().for_each(|offset| {
let _: Result<u16, _> = black_box(data.pread_with(*offset, LE));
});
});
b.bytes = vec.len() as u64;
}
// #[bench]
// fn bench_gread_vec(b: &mut test::Bencher) {
// let vec = vec![0u8; 1_000_000];
// b.iter(|| {
// let data = black_box(&vec[..]);
// for val in data.chunks(2) {
// let mut offset = 0;
// let _: Result<u16, _> = black_box(val.gread(&mut offset));
// }
// });
// b.bytes = vec.len() as u64;
// }

#[bench]
fn bench_byteorder_vec(b: &mut test::Bencher) {
use byteorder::ReadBytesExt;
let vec = vec![0u8; 1_000_000];
b.iter(|| {
let data = black_box(&vec[..]);
for mut val in data.chunks(2) {
let _: Result<u16, _> = black_box(val.read_u16::<byteorder::LittleEndian>());
}
});
b.bytes = vec.len() as u64;
}
// #[bench]
// fn bench_gread_unwrap(b: &mut test::Bencher) {
// const NITER: i32 = 100_000;
// b.iter(|| {
// for _ in 1..NITER {
// let data = black_box([1, 2]);
// let mut offset = 0;
// let _: u16 = black_box(data.gread_with(&mut offset, LE).unwrap());
// }
// });
// b.bytes = 2 * NITER as u64;
// }

#[bench]
fn bench_byteorder(b: &mut test::Bencher) {
use byteorder::ByteOrder;
const NITER: i32 = 100_000;
b.iter(|| {
for _ in 1..NITER {
let data = black_box([1, 2]);
let _: u16 = black_box(byteorder::LittleEndian::read_u16(&data));
}
});
b.bytes = 2 * NITER as u64;
}
// #[bench]
// fn bench_parallel_pread_with(b: &mut test::Bencher) {
// use rayon::prelude::*;
// let vec = vec![0u8; 1_000_000];
// let nums = vec![0usize; 500_000];
// b.iter(|| {
// let data = black_box(&vec[..]);
// nums.par_iter().for_each(|offset| {
// let _: Result<u16, _> = black_box(data.pread_with(*offset, LE));
// });
// });
// b.bytes = vec.len() as u64;
// }

// #[bench]
// fn bench_byteorder_vec(b: &mut test::Bencher) {
// use byteorder::ReadBytesExt;
// let vec = vec![0u8; 1_000_000];
// b.iter(|| {
// let data = black_box(&vec[..]);
// for mut val in data.chunks(2) {
// let _: Result<u16, _> = black_box(val.read_u16::<byteorder::LittleEndian>());
// }
// });
// b.bytes = vec.len() as u64;
// }

// #[bench]
// fn bench_byteorder(b: &mut test::Bencher) {
// use byteorder::ByteOrder;
// const NITER: i32 = 100_000;
// b.iter(|| {
// for _ in 1..NITER {
// let data = black_box([1, 2]);
// let _: u16 = black_box(byteorder::LittleEndian::read_u16(&data));
// }
// });
// b.bytes = 2 * NITER as u64;
// }

0 comments on commit ef7fd99

Please sign in to comment.