Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace deprecated std package's constants on floats and integers #1420

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions common/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,6 @@ pub fn u64_to_f64(val: u64) -> f64 {
#[cfg(test)]
pub mod test {

use std::f64;

use proptest::prelude::*;

use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
Expand Down Expand Up @@ -135,11 +133,11 @@ pub mod test {

#[test]
fn test_i64_converter() {
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());
assert_eq!(i64_to_u64(i64::max_value()), u64::max_value());
assert_eq!(i64_to_u64(i64::MIN), u64::MIN);
assert_eq!(i64_to_u64(i64::MAX), u64::MAX);
test_i64_converter_helper(0i64);
test_i64_converter_helper(i64::min_value());
test_i64_converter_helper(i64::max_value());
test_i64_converter_helper(i64::MIN);
test_i64_converter_helper(i64::MAX);
for i in -1000i64..1000i64 {
test_i64_converter_helper(i);
}
Expand Down
4 changes: 2 additions & 2 deletions common/src/serialize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ pub mod test {
fixed_size_test::<u32>();
assert_eq!(4, serialize_test(3u32));
assert_eq!(4, serialize_test(5u32));
assert_eq!(4, serialize_test(u32::max_value()));
assert_eq!(4, serialize_test(u32::MAX));
}

#[test]
Expand Down Expand Up @@ -277,6 +277,6 @@ pub mod test {
assert_eq!(serialize_test(VInt(1234u64)), 2);
assert_eq!(serialize_test(VInt(16_383u64)), 2);
assert_eq!(serialize_test(VInt(16_384u64)), 3);
assert_eq!(serialize_test(VInt(u64::max_value())), 10);
assert_eq!(serialize_test(VInt(u64::MAX)), 10);
}
}
4 changes: 2 additions & 2 deletions common/src/vint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ mod tests {
aux_test_vint(0);
aux_test_vint(1);
aux_test_vint(5);
aux_test_vint(u64::max_value());
aux_test_vint(u64::MAX);
for i in 1..9 {
let power_of_128 = 1u64 << (7 * i);
aux_test_vint(power_of_128 - 1u64);
Expand Down Expand Up @@ -228,6 +228,6 @@ mod tests {
aux_test_serialize_vint_u32(power_of_128);
aux_test_serialize_vint_u32(power_of_128 + 1u32);
}
aux_test_serialize_vint_u32(u32::max_value());
aux_test_serialize_vint_u32(u32::MAX);
}
}
2 changes: 1 addition & 1 deletion query-grammar/src/query_grammar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ fn boost<'a>() -> impl Parser<&'a str, Output = f64> {

fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAst> {
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
Some(boost) if (boost - 1.0).abs() > f64::EPSILON => {
UserInputAst::Boost(Box::new(leaf), boost)
}
_ => leaf,
Expand Down
2 changes: 1 addition & 1 deletion src/docset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::DocId;
///
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
/// to compare [u32; 4].
pub const TERMINATED: DocId = std::i32::MAX as u32;
pub const TERMINATED: DocId = i32::MAX as u32;

/// Represents an iterable set of sorted doc ids.
pub trait DocSet: Send {
Expand Down
2 changes: 1 addition & 1 deletion src/fastfield/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ impl IntFastFieldWriter {
vals: BlockedBitpacker::new(),
val_count: 0,
val_if_missing: 0u64,
val_min: u64::max_value(),
val_min: u64::MAX,
val_max: 0,
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/fieldnorm/code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ mod tests {

#[test]
fn test_u32_max() {
assert_eq!(fieldnorm_to_id(u32::max_value()), u8::max_value());
assert_eq!(fieldnorm_to_id(u32::MAX), u8::MAX);
}

#[test]
Expand Down
2 changes: 1 addition & 1 deletion src/indexer/index_writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub const MARGIN_IN_BYTES: usize = 1_000_000;

// We impose the memory per thread to be at least 3 MB.
pub const MEMORY_ARENA_NUM_BYTES_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize;
pub const MEMORY_ARENA_NUM_BYTES_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
pub const MEMORY_ARENA_NUM_BYTES_MAX: usize = u32::MAX as usize - MARGIN_IN_BYTES;

// We impose the number of index writter thread to be at most this.
pub const MAX_NUM_THREAD: usize = 8;
Expand Down
4 changes: 2 additions & 2 deletions src/indexer/merger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -688,8 +688,8 @@ impl IndexMerger {
let offsets =
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;

let mut min_value = u64::max_value();
let mut max_value = u64::min_value();
let mut min_value = u64::MAX;
let mut max_value = u64::MIN;
let mut num_vals = 0;

let mut vals = Vec::with_capacity(100);
Expand Down
4 changes: 2 additions & 2 deletions src/positions/reader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ impl PositionReader {
bit_widths: bit_widths.clone(),
positions: positions.clone(),
block_decoder: BlockDecoder::default(),
block_offset: std::i64::MAX as u64,
block_offset: i64::MAX as u64,
anchor_offset: 0u64,
original_bit_widths: bit_widths,
original_positions: positions,
Expand All @@ -57,7 +57,7 @@ impl PositionReader {
fn reset(&mut self) {
self.positions = self.original_positions.clone();
self.bit_widths = self.original_bit_widths.clone();
self.block_offset = std::i64::MAX as u64;
self.block_offset = i64::MAX as u64;
self.anchor_offset = 0u64;
}

Expand Down
4 changes: 2 additions & 2 deletions src/postings/block_segment_postings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ impl BlockSegmentPostings {

let mut block_segment_postings = BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: std::usize::MAX,
loaded_offset: usize::MAX,
freq_decoder: BlockDecoder::with_val(1),
freq_reading_option,
block_max_score_cache: None,
Expand Down Expand Up @@ -169,7 +169,7 @@ impl BlockSegmentPostings {
split_into_skips_and_postings(doc_freq, postings_data)?;
self.data = postings_data;
self.block_max_score_cache = None;
self.loaded_offset = std::usize::MAX;
self.loaded_offset = usize::MAX;
if let Some(skip_data) = skip_data_opt {
self.skip_reader.reset(skip_data, doc_freq);
} else {
Expand Down
6 changes: 3 additions & 3 deletions src/postings/recorder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ impl Default for NothingRecorder {
fn default() -> Self {
NothingRecorder {
stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(),
current_doc: u32::MAX,
}
}
}
Expand Down Expand Up @@ -230,7 +230,7 @@ impl Default for TfAndPositionRecorder {
fn default() -> Self {
TfAndPositionRecorder {
stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(),
current_doc: u32::MAX,
term_doc_freq: 0u32,
}
}
Expand Down Expand Up @@ -339,7 +339,7 @@ mod tests {
#[test]
fn test_vint_u32() {
let mut buffer = vec![];
let vals = [0, 1, 324_234_234, u32::max_value()];
let vals = [0, 1, 324_234_234, u32::MAX];
for &i in &vals {
assert!(write_u32_vint(i, &mut buffer).is_ok());
}
Expand Down
2 changes: 1 addition & 1 deletion src/postings/skip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ impl SkipReader {
BlockInfo::VInt { num_docs } => {
debug_assert_eq!(num_docs, self.remaining_docs);
self.remaining_docs = 0;
self.byte_offset = std::usize::MAX;
self.byte_offset = usize::MAX;
}
}
self.last_doc_in_previous_block = self.last_doc_in_block;
Expand Down
4 changes: 2 additions & 2 deletions src/postings/stacker/memory_arena.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ pub struct Addr(u32);
impl Addr {
/// Creates a null pointer.
pub fn null_pointer() -> Addr {
Addr(u32::max_value())
Addr(u32::MAX)
}

/// Returns the `Addr` object for `addr + offset`
Expand All @@ -64,7 +64,7 @@ impl Addr {

/// Returns true if and only if the `Addr` is null.
pub fn is_null(self) -> bool {
self.0 == u32::max_value()
self.0 == u32::MAX
}
}

Expand Down
4 changes: 2 additions & 2 deletions src/postings/term_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@ pub struct TermInfo {
impl TermInfo {
pub(crate) fn posting_num_bytes(&self) -> u32 {
let num_bytes = self.postings_range.len();
assert!(num_bytes <= std::u32::MAX as usize);
assert!(num_bytes <= u32::MAX as usize);
num_bytes as u32
}

pub(crate) fn positions_num_bytes(&self) -> u32 {
let num_bytes = self.positions_range.len();
assert!(num_bytes <= std::u32::MAX as usize);
assert!(num_bytes <= u32::MAX as usize);
num_bytes as u32
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/termdict/fst_termdict/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
//! For `u64`-terms, tantivy explicitely uses a `BigEndian` representation to ensure that the
//! lexicographical order matches the natural order of integers.
//!
//! `i64`-terms are transformed to `u64` using a continuous mapping `val ⟶ val - i64::min_value()`
//! `i64`-terms are transformed to `u64` using a continuous mapping `val ⟶ val - i64::MIN`
//! and then treated as a `u64`.
//!
//! `f64`-terms are transformed to `u64` using a mapping that preserve order, and are then treated
Expand Down
2 changes: 1 addition & 1 deletion src/termdict/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
//! For `u64`-terms, tantivy explicitely uses a `BigEndian` representation to ensure that the
//! lexicographical order matches the natural order of integers.
//!
//! `i64`-terms are transformed to `u64` using a continuous mapping `val ⟶ val - i64::min_value()`
//! `i64`-terms are transformed to `u64` using a continuous mapping `val ⟶ val - i64::MIN`
//! and then treated as a `u64`.
//!
//! `f64`-terms are transformed to `u64` using a mapping that preserve order, and are then treated
Expand Down
2 changes: 1 addition & 1 deletion src/tokenizer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ pub use self::whitespace_tokenizer::WhitespaceTokenizer;
/// Tokenizer are in charge of not emitting tokens larger than this value.
/// Currently, if a faulty tokenizer implementation emits tokens with a length larger than
/// `2^16 - 1 - 5`, the token will simply be ignored downstream.
pub const MAX_TOKEN_LEN: usize = u16::max_value() as usize - 5;
pub const MAX_TOKEN_LEN: usize = u16::MAX as usize - 5;

#[cfg(test)]
pub mod tests {
Expand Down
2 changes: 1 addition & 1 deletion src/tokenizer/tokenizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ impl Default for Token {
Token {
offset_from: 0,
offset_to: 0,
position: usize::max_value(),
position: usize::MAX,
text: String::with_capacity(200),
position_length: 1,
}
Expand Down