diff --git a/fuzz/fuzz_targets/fuzz_redb.rs b/fuzz/fuzz_targets/fuzz_redb.rs index 7e2ec3cc..74b11ffd 100644 --- a/fuzz/fuzz_targets/fuzz_redb.rs +++ b/fuzz/fuzz_targets/fuzz_redb.rs @@ -511,6 +511,23 @@ fn exec_table_crash_support(config: &FuzzConfig, apply: fn(WriteTransa .set_region_size(config.region_size.value as u64) .create_with_backend(backend).unwrap(); + // Disable IO error simulation while we get a baseline number of allocated pages + let old_countdown = countdown.swap(u64::MAX, Ordering::SeqCst); + let txn = db.begin_write().unwrap(); + // Touch the savepoints tables to be sure they get created, so that they occupy pages + let id = txn.persistent_savepoint().unwrap(); + txn.delete_persistent_savepoint(id).unwrap(); + #[allow(unused_must_use)] + { + txn.list_persistent_savepoints().unwrap(); + } + txn.commit().unwrap(); + db.begin_write().unwrap().commit().unwrap(); + let txn = db.begin_write().unwrap(); + let baseline_allocated_pages = txn.stats().unwrap().allocated_pages(); + txn.abort().unwrap(); + countdown.store(old_countdown, Ordering::SeqCst); + let txn = db.begin_write().unwrap(); let mut table = txn.open_table(COUNTER_TABLE).unwrap(); table.insert((), 0)?; @@ -626,6 +643,52 @@ fn exec_table_crash_support(config: &FuzzConfig, apply: fn(WriteTransa } } + // Repair the database, if needed, and disable IO error simulation + countdown.swap(u64::MAX, Ordering::SeqCst); + drop(db); + let backend = FuzzerBackend::new(FileBackend::new(redb_file.as_file().try_clone().unwrap()).unwrap()); + db = Database::builder() + .set_page_size(config.page_size.value) + .set_cache_size(config.cache_size.value) + .set_region_size(config.region_size.value as u64) + .create_with_backend(backend) + .unwrap(); + + // Check for leaked pages + let read_txn = db.begin_read().unwrap(); + let txn = db.begin_write().unwrap(); + for table in read_txn.list_tables().unwrap() { + assert!(txn.delete_table(table).unwrap()); + } + for table in read_txn.list_multimap_tables().unwrap() { + assert!(txn.delete_multimap_table(table).unwrap()); + } + savepoint_manager.savepoints.clear(); + for id in txn.list_persistent_savepoints().unwrap() { + txn.delete_persistent_savepoint(id).unwrap(); + } + drop(read_txn); + txn.commit().unwrap(); + + // Clear out the freed table + let mut allocated_pages = db.begin_write().unwrap().stats().unwrap().allocated_pages(); + loop { + db.begin_write().unwrap().commit().unwrap(); + let new_allocated_pages = db.begin_write().unwrap().stats().unwrap().allocated_pages(); + if new_allocated_pages == allocated_pages { + break; + } else { + allocated_pages = new_allocated_pages; + } + } + + let txn = db.begin_write().unwrap(); + let allocated_pages = txn.stats().unwrap().allocated_pages(); + txn.abort().unwrap(); + assert_eq!(allocated_pages, baseline_allocated_pages, "Found {} allocated pages at shutdown, expected {}", allocated_pages, baseline_allocated_pages); + + assert!(db.check_integrity().unwrap()); + Ok(()) } diff --git a/src/transactions.rs b/src/transactions.rs index b9681782..f761506d 100644 --- a/src/transactions.rs +++ b/src/transactions.rs @@ -24,6 +24,8 @@ use std::collections::{HashMap, HashSet}; use std::fmt::{Debug, Display, Formatter}; use std::marker::PhantomData; use std::ops::RangeBounds; +#[cfg(any(test, fuzzing))] +use std::ops::RangeFull; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::{panic, thread}; @@ -506,6 +508,82 @@ impl WriteTransaction { }) } + #[cfg(any(test, fuzzing))] + pub fn print_allocated_page_debug(&self) { + let mut all_allocated: HashSet = + HashSet::from_iter(self.mem.all_allocated_pages()); + + let tracker = self.mem.tracker_page(); + all_allocated.remove(&tracker); + println!("Tracker page"); + println!("{tracker:?}"); + + let table_allocators = self + .tables + .lock() + .unwrap() + .table_tree + .all_referenced_pages() + .unwrap(); + let mut table_pages = vec![]; + for (i, allocator) in table_allocators.iter().enumerate() { + allocator.get_allocated_pages(i.try_into().unwrap(), &mut table_pages); + } + println!("Tables"); + for p in table_pages { + all_allocated.remove(&p); + println!("{p:?}") + } + + let system_table_allocators = self + .system_tables + .lock() + .unwrap() + .table_tree + .all_referenced_pages() + .unwrap(); + let mut system_table_pages = vec![]; + for (i, allocator) in system_table_allocators.iter().enumerate() { + allocator.get_allocated_pages(i.try_into().unwrap(), &mut system_table_pages); + } + println!("System tables"); + for p in system_table_pages { + all_allocated.remove(&p); + println!("{p:?}") + } + + println!("Free table"); + if let Some(freed_iter) = self.freed_tree.lock().unwrap().all_pages_iter().unwrap() { + for p in freed_iter { + let p = p.unwrap(); + all_allocated.remove(&p); + println!("{p:?}") + } + } + println!("Pending free (i.e. in freed table)"); + for entry in self + .freed_tree + .lock() + .unwrap() + .range::(&(..)) + .unwrap() + { + let entry = entry.unwrap(); + let value = entry.value(); + for i in 0..value.len() { + let p = value.get(i); + all_allocated.remove(&p); + println!("{p:?}") + } + } + if !all_allocated.is_empty() { + println!("Leaked pages"); + for p in all_allocated { + println!("{p:?}"); + } + } + } + /// Creates a snapshot of the current database state, which can be used to rollback the database. /// This savepoint will exist until it is deleted with `[delete_savepoint()]`. /// diff --git a/src/tree_store/page_store/bitmap.rs b/src/tree_store/page_store/bitmap.rs index 8b7bb192..26c52e3f 100644 --- a/src/tree_store/page_store/bitmap.rs +++ b/src/tree_store/page_store/bitmap.rs @@ -346,6 +346,7 @@ impl U64GroupedBitmap { U64GroupedBitmapDifference::new(&self.data, &exclusion.data) } + #[allow(dead_code)] pub fn iter(&self) -> U64GroupedBitmapIter { U64GroupedBitmapIter::new(self.len, &self.data) } diff --git a/src/tree_store/page_store/buddy_allocator.rs b/src/tree_store/page_store/buddy_allocator.rs index 284649f6..8c967e29 100644 --- a/src/tree_store/page_store/buddy_allocator.rs +++ b/src/tree_store/page_store/buddy_allocator.rs @@ -245,6 +245,7 @@ impl BuddyAllocator { } } + #[cfg(any(test, fuzzing))] pub(crate) fn get_allocated_pages(&self, region: u32, output: &mut Vec) { for order in 0..=self.max_order { let allocated = self.get_order_allocated(order); @@ -494,7 +495,12 @@ impl BuddyAllocator { /// data must have been initialized by Self::init_new() pub(crate) fn free(&mut self, page_number: u32, order: u8) { debug_assert!(self.get_order_free_mut(order).get(page_number)); - debug_assert!(self.get_order_allocated(order).get(page_number)); + debug_assert!( + self.get_order_allocated(order).get(page_number), + "Attempted to free page {}, order {}, which is not allocated", + page_number, + order + ); self.get_order_allocated_mut(order).clear(page_number); diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index d00c030c..190bf952 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -261,6 +261,16 @@ impl TransactionalMemory { }) } + #[cfg(any(test, fuzzing))] + pub(crate) fn all_allocated_pages(&self) -> Vec { + self.state.lock().unwrap().allocators.all_allocated() + } + + #[cfg(any(test, fuzzing))] + pub(crate) fn tracker_page(&self) -> PageNumber { + self.state.lock().unwrap().header.region_tracker() + } + pub(crate) fn clear_read_cache(&self) { self.storage.invalidate_cache_all() } diff --git a/src/tree_store/page_store/region.rs b/src/tree_store/page_store/region.rs index e746877c..67993bac 100644 --- a/src/tree_store/page_store/region.rs +++ b/src/tree_store/page_store/region.rs @@ -151,14 +151,13 @@ impl Allocators { } } - // TODO: remove this at some point. It is useful for debugging though. - #[allow(dead_code)] - pub(super) fn print_all_allocated(&self) { + #[cfg(any(test, fuzzing))] + pub(super) fn all_allocated(&self) -> Vec { let mut pages = vec![]; for (i, allocator) in self.region_allocators.iter().enumerate() { allocator.get_allocated_pages(i.try_into().unwrap(), &mut pages); } - println!("Allocated pages: {pages:?}"); + pages } pub(crate) fn xxh3_hash(&self) -> u128 {