diff --git a/src/core/searcher.rs b/src/core/searcher.rs index ab23a73d00..497e6767a4 100644 --- a/src/core/searcher.rs +++ b/src/core/searcher.rs @@ -249,7 +249,7 @@ impl SearcherInner { index: Index, segment_readers: Vec, generation: TrackedObject, - doc_store_cache_size: usize, + doc_store_cache_num_blocks: usize, ) -> io::Result { assert_eq!( &segment_readers @@ -261,7 +261,7 @@ impl SearcherInner { ); let store_readers: Vec = segment_readers .iter() - .map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size)) + .map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_num_blocks)) .collect::>>()?; Ok(SearcherInner { diff --git a/src/core/segment_reader.rs b/src/core/segment_reader.rs index c19bbffcb5..8c82cff6f4 100644 --- a/src/core/segment_reader.rs +++ b/src/core/segment_reader.rs @@ -134,9 +134,12 @@ impl SegmentReader { &self.fieldnorm_readers } - /// Accessor to the segment's `StoreReader`. - pub fn get_store_reader(&self, cache_size: usize) -> io::Result { - StoreReader::open(self.store_file.clone(), cache_size) + /// Accessor to the segment's [`StoreReader`](crate::store::StoreReader). + /// + /// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU. + /// The size of blocks is configurable, this should be reflexted in the + pub fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result { + StoreReader::open(self.store_file.clone(), cache_num_blocks) } /// Open a new segment for reading. diff --git a/src/reader/mod.rs b/src/reader/mod.rs index 1b6b700847..a66bc4d4ab 100644 --- a/src/reader/mod.rs +++ b/src/reader/mod.rs @@ -151,7 +151,7 @@ impl TryInto for IndexReaderBuilder { } struct InnerIndexReader { - doc_store_cache_size: usize, + doc_store_cache_num_blocks: usize, index: Index, warming_state: WarmingState, searcher: arc_swap::ArcSwap, @@ -178,7 +178,7 @@ impl InnerIndexReader { &searcher_generation_inventory, )?; Ok(InnerIndexReader { - doc_store_cache_size, + doc_store_cache_num_blocks: doc_store_cache_size, index, warming_state, searcher: ArcSwap::from(searcher), @@ -214,7 +214,7 @@ impl InnerIndexReader { fn create_searcher( index: &Index, - doc_store_cache_size: usize, + doc_store_cache_num_blocks: usize, warming_state: &WarmingState, searcher_generation_counter: &Arc, searcher_generation_inventory: &Inventory, @@ -232,7 +232,7 @@ impl InnerIndexReader { index.clone(), segment_readers, searcher_generation, - doc_store_cache_size, + doc_store_cache_num_blocks, )?); warming_state.warm_new_searcher_generation(&searcher.clone().into())?; @@ -242,7 +242,7 @@ impl InnerIndexReader { fn reload(&self) -> crate::Result<()> { let searcher = Self::create_searcher( &self.index, - self.doc_store_cache_size, + self.doc_store_cache_num_blocks, &self.warming_state, &self.searcher_generation_counter, &self.searcher_generation_inventory, diff --git a/src/store/mod.rs b/src/store/mod.rs index dd82c9370b..cd59a9bcbd 100644 --- a/src/store/mod.rs +++ b/src/store/mod.rs @@ -4,8 +4,8 @@ //! order to be handled in the `Store`. //! //! Internally, documents (or rather their stored fields) are serialized to a buffer. -//! When the buffer exceeds 16K, the buffer is compressed using `brotli`, `LZ4` or `snappy` -//! and the resulting block is written to disk. +//! When the buffer exceeds `block_size` (defaults to 16K), the buffer is compressed using `brotli`, +//! `LZ4` or `snappy` and the resulting block is written to disk. //! //! One can then request for a specific `DocId`. //! A skip list helps navigating to the right block, @@ -29,7 +29,6 @@ //! [`SegmentReader`'s `doc` method](../struct.SegmentReader.html#method.doc) //! - at the index level, the [`Searcher::doc()`](crate::Searcher::doc) method //! -//! ! mod compressors; mod decompressors; diff --git a/src/store/reader.rs b/src/store/reader.rs index 32319c8c18..9b9ea1647d 100644 --- a/src/store/reader.rs +++ b/src/store/reader.rs @@ -114,7 +114,10 @@ impl Sum for CacheStats { impl StoreReader { /// Opens a store reader - pub fn open(store_file: FileSlice, cache_size: usize) -> io::Result { + /// + /// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU. + /// The size of blocks is configurable, this should be reflexted in the + pub fn open(store_file: FileSlice, cache_num_blocks: usize) -> io::Result { let (footer, data_and_offset) = DocStoreFooter::extract_footer(store_file)?; let (data_file, offset_index_file) = data_and_offset.split(footer.offset as usize); @@ -125,8 +128,8 @@ impl StoreReader { decompressor: footer.decompressor, data: data_file, cache: BlockCache { - cache: NonZeroUsize::new(cache_size) - .map(|cache_size| Mutex::new(LruCache::new(cache_size))), + cache: NonZeroUsize::new(cache_num_blocks) + .map(|cache_num_blocks| Mutex::new(LruCache::new(cache_num_blocks))), cache_hits: Default::default(), cache_misses: Default::default(), },