From 45cdf31055b1b6a629bdb8032adaa6dd5a8e32b9 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 6 Sep 2024 16:40:10 +0800 Subject: [PATCH] Require ObjectReference to point inside object (#1195) Require the raw address of `ObjectReference` to be within the address range of the object it refers to. The raw address is now used directly for side metadata access and SFT dispatching. This makes "in-object address" unnecessary, and we removed the concept of "in-object address" and related constants and methods. Methods which use the "in-object address" for SFT dispatching or side-metadata access used to have a `` type parameter. This PR removes that type parameter. Because `ObjectReference` is now both within an object an word-aligned, the algorithm for searching for VO bits from internal pointers is slightly simplified. The method `is_mmtk_object` now has undefined behavior for arguments that are zero or misaligned because they are obviously illegal addresses for `ObjectReference`, and the user should have filtered them out in the first place. Fixes: https://github.com/mmtk/mmtk-core/issues/1170 --- benches/mock_bench/internal_pointer.rs | 10 +- benches/mock_bench/sft.rs | 2 +- docs/dummyvm/src/api.rs | 8 +- docs/dummyvm/src/object_model.rs | 9 +- docs/userguide/src/migration/prefix.md | 48 +++++ docs/userguide/src/portingguide/howto/nogc.md | 54 +++++- src/memory_manager.rs | 50 ++--- src/plan/barriers.rs | 2 +- src/policy/copyspace.rs | 8 +- src/policy/immix/immixspace.rs | 18 +- src/policy/immortalspace.rs | 6 +- src/policy/largeobjectspace.rs | 21 +-- src/policy/lockfreeimmortalspace.rs | 4 +- src/policy/markcompactspace.rs | 12 +- src/policy/marksweepspace/malloc_ms/global.rs | 10 +- .../marksweepspace/malloc_ms/metadata.rs | 21 +-- src/policy/marksweepspace/native_ms/block.rs | 2 +- src/policy/marksweepspace/native_ms/global.rs | 6 +- src/policy/space.rs | 2 +- src/policy/vmspace.rs | 6 +- src/scheduler/gc_work.rs | 2 +- src/util/address.rs | 177 +++++++++++------- src/util/alloc/free_list_allocator.rs | 2 +- src/util/finalizable_processor.rs | 2 +- src/util/is_mmtk_object.rs | 5 + src/util/linear_scan.rs | 18 +- src/util/metadata/global.rs | 20 +- src/util/metadata/log_bit.rs | 2 +- src/util/metadata/vo_bit/helper.rs | 8 +- src/util/metadata/vo_bit/mod.rs | 81 ++++---- src/util/object_enum.rs | 2 +- src/util/reference_processor.rs | 30 +-- src/util/sanity/sanity_checker.rs | 4 +- src/util/test_util/mock_vm.rs | 3 - src/vm/active_plan.rs | 2 + src/vm/object_model.rs | 29 +-- .../mock_tests/mock_test_conservatism.rs | 33 ++-- ...ock_test_internal_ptr_before_object_ref.rs | 18 +- .../mock_test_internal_ptr_invalid.rs | 6 +- ...st_internal_ptr_large_object_multi_page.rs | 38 ++-- ...est_internal_ptr_large_object_same_page.rs | 21 +-- .../mock_test_internal_ptr_normal_object.rs | 19 +- .../mock_tests/mock_test_is_in_mmtk_spaces.rs | 10 +- .../mock_tests/mock_test_vm_layout_default.rs | 2 +- 44 files changed, 447 insertions(+), 386 deletions(-) diff --git a/benches/mock_bench/internal_pointer.rs b/benches/mock_bench/internal_pointer.rs index fee21856d3..edc22f8ef3 100644 --- a/benches/mock_bench/internal_pointer.rs +++ b/benches/mock_bench/internal_pointer.rs @@ -42,10 +42,7 @@ pub fn bench(c: &mut Criterion) { ); let obj_end = addr + NORMAL_OBJECT_SIZE; _b.iter(|| { - memory_manager::find_object_from_internal_pointer::( - obj_end - 1, - NORMAL_OBJECT_SIZE, - ); + memory_manager::find_object_from_internal_pointer(obj_end - 1, NORMAL_OBJECT_SIZE); }) } #[cfg(not(feature = "is_mmtk_object"))] @@ -83,10 +80,7 @@ pub fn bench(c: &mut Criterion) { ); let obj_end = addr + LARGE_OBJECT_SIZE; _b.iter(|| { - memory_manager::find_object_from_internal_pointer::( - obj_end - 1, - LARGE_OBJECT_SIZE, - ); + memory_manager::find_object_from_internal_pointer(obj_end - 1, LARGE_OBJECT_SIZE); }) } #[cfg(not(feature = "is_mmtk_object"))] diff --git a/benches/mock_bench/sft.rs b/benches/mock_bench/sft.rs index d8a432d69f..7f58f4aa5f 100644 --- a/benches/mock_bench/sft.rs +++ b/benches/mock_bench/sft.rs @@ -12,6 +12,6 @@ pub fn bench(c: &mut Criterion) { let obj = MockVM::object_start_to_ref(addr); c.bench_function("sft read", |b| { - b.iter(|| memory_manager::is_in_mmtk_spaces::(black_box(obj))) + b.iter(|| memory_manager::is_in_mmtk_spaces(black_box(obj))) }); } diff --git a/docs/dummyvm/src/api.rs b/docs/dummyvm/src/api.rs index 27f7b1f87a..4fad312be1 100644 --- a/docs/dummyvm/src/api.rs +++ b/docs/dummyvm/src/api.rs @@ -142,23 +142,23 @@ pub extern "C" fn mmtk_total_bytes() -> usize { #[no_mangle] pub extern "C" fn mmtk_is_live_object(object: ObjectReference) -> bool { - memory_manager::is_live_object::(object) + memory_manager::is_live_object(object) } #[no_mangle] pub extern "C" fn mmtk_will_never_move(object: ObjectReference) -> bool { - !object.is_movable::() + !object.is_movable() } #[cfg(feature = "is_mmtk_object")] #[no_mangle] pub extern "C" fn mmtk_is_mmtk_object(addr: Address) -> bool { - memory_manager::is_mmtk_object(addr) + memory_manager::is_mmtk_object(addr).is_some() } #[no_mangle] pub extern "C" fn mmtk_is_in_mmtk_spaces(object: ObjectReference) -> bool { - memory_manager::is_in_mmtk_spaces::(object) + memory_manager::is_in_mmtk_spaces(object) } #[no_mangle] diff --git a/docs/dummyvm/src/object_model.rs b/docs/dummyvm/src/object_model.rs index d1747dbf19..e63a4e85ff 100644 --- a/docs/dummyvm/src/object_model.rs +++ b/docs/dummyvm/src/object_model.rs @@ -6,15 +6,10 @@ use mmtk::vm::*; pub struct VMObjectModel {} /// This is the offset from the allocation result to the object reference for the object. -/// For bindings that this offset is not a constant, you can implement the calculation in the method `ref_to_object_start``, and +/// For bindings that this offset is not a constant, you can implement the calculation in the method `ref_to_object_start`, and /// remove this constant. pub const OBJECT_REF_OFFSET: usize = 0; -/// This is the offset from the object reference to an in-object address. The binding needs -/// to guarantee the in-object address is inside the storage associated with the object. -/// It has to be a constant offset. See `ObjectModel::IN_OBJECT_ADDRESS_OFFSET`. -pub const IN_OBJECT_ADDRESS_OFFSET: isize = 0; - // This is the offset from the object reference to the object header. // This value is used in `ref_to_header` where MMTk loads header metadata from. pub const OBJECT_HEADER_OFFSET: usize = 0; @@ -86,8 +81,6 @@ impl ObjectModel for VMObjectModel { object.to_raw_address().sub(OBJECT_HEADER_OFFSET) } - const IN_OBJECT_ADDRESS_OFFSET: isize = IN_OBJECT_ADDRESS_OFFSET; - fn dump_object(_object: ObjectReference) { unimplemented!() } diff --git a/docs/userguide/src/migration/prefix.md b/docs/userguide/src/migration/prefix.md index c6765bd779..fc25b775c3 100644 --- a/docs/userguide/src/migration/prefix.md +++ b/docs/userguide/src/migration/prefix.md @@ -30,6 +30,54 @@ Notes for the mmtk-core developers: +## 0.28.0 + +### `ObjectReference` must point inside an object + +```admonish tldr +`ObjectReference` is now required to be an address within an object. The concept of "in-object +address" and related methods are removed. Some methods which used to depend on the "in-object +address" no longer need the `` type argument. +``` + +API changes: + +- struct `ObjectReference` + + Its "raw address" must be within an object now. + + The following methods which were used to access the in-object address are removed. + * `from_address` + * `to_address` + * When accessing side metadata, the "raw address" should be used, instead. + + The following methods no longer have the `` type argument. + * `get_forwarded_object` + * `is_in_any_space` + * `is_live` + * `is_movable` + * `is_reachable` +- module `memory_manager` + + `is_mmtk_object`: It now requires the address parameter to be non-zero and word-aligned. + * Otherwise it will not be a legal `ObjectReference` in the first place. The user should + filter out such illegal values. + + The following functions no longer have the `` type argument. + * `find_object_from_internal_pointer` + * `is_in_mmtk_space` + * `is_live_object` + * `is_pinned` + * `pin_object` + * `unpin_object` +- struct `Region` + + The following methods no longer have the `` type argument. + * `containing` +- trait `ObjectModel` + + `IN_OBJECT_ADDRESS_OFFSET`: removed because it is no longer needed. + +See also: + +- PR: +- Examples: + + https://github.com/mmtk/mmtk-openjdk/pull/286: a simple case + + https://github.com/mmtk/mmtk-jikesrvm/issues/178: a VM that needs much change for this + ## 0.27.0 ### `is_mmtk_object` returns `Option diff --git a/docs/userguide/src/portingguide/howto/nogc.md b/docs/userguide/src/portingguide/howto/nogc.md index a7721a9c6a..4d68041e8d 100644 --- a/docs/userguide/src/portingguide/howto/nogc.md +++ b/docs/userguide/src/portingguide/howto/nogc.md @@ -95,13 +95,39 @@ We recommend going through the [list of metadata specifications](https://docs.mm #### `ObjectReference` vs `Address` -A key principle in MMTk is the distinction between [`ObjectReference`](https://docs.mmtk.io/api/mmtk/util/address/struct.ObjectReference.html) and [`Address`](https://docs.mmtk.io/api/mmtk/util/address/struct.Address.html). The idea is that very few operations are allowed on an `ObjectReference`. For example, MMTk does not allow address arithmetic on `ObjectReference`s. This allows us to preserve memory-safety, only performing unsafe operations when required, and gives us a cleaner and more flexible abstraction to work with as it can allow object handles or offsets etc. `Address`, on the other hand, represents an arbitrary machine address. You might be interested in reading the *Demystifying Magic: High-level Low-level Programming* paper[^3] which describes the above in more detail. - -In MMTk, `ObjectReference` is a special address that represents an object. A binding may use tagged references, compressed pointers, etc. -They need to deal with the encoding and the decoding in their [`Slot`](https://docs.mmtk.io/api/mmtk/vm/slot/trait.Slot.html) implementation, -and always present plain `ObjectReference`s to MMTk. See [this test](https://github.com/mmtk/mmtk-core/blob/master/src/vm/tests/mock_tests/mock_test_slots.rs) for some `Slot` implementation examples. - -[^3]: https://users.cecs.anu.edu.au/~steveb/pubs/papers/vmmagic-vee-2009.pdf +A key principle in MMTk is the distinction between [`ObjectReference`](https://docs.mmtk.io/api/mmtk/util/address/struct.ObjectReference.html) and [`Address`](https://docs.mmtk.io/api/mmtk/util/address/struct.Address.html). The idea is that very few operations are allowed on an `ObjectReference`. For example, MMTk does not allow address arithmetic on `ObjectReference`s. This allows us to preserve memory-safety, only performing unsafe operations when required, and gives us a cleaner and more flexible abstraction to work with as it can allow object handles or offsets etc. `Address`, on the other hand, represents an arbitrary machine address. You might be interested in reading the [*Demystifying Magic: High-level Low-level Programming*][FBC09] paper which describes the above in more detail. + +In MMTk, `ObjectReference` is a special address that represents an object. It is required to be +within the address range of the object it refers to, and must be word-aligned. This address is used +by MMTk to access side metadata, and find the space or regions (chunk, block, line, etc.) that +contains the object. It must also be efficient to locate the object header (where in-header MMTk +metadata are held) and the object's VM-specific metadata, such as type information, from a given +`ObjectReference`. MMTk will need to access those information, either directly or indirectly via +traits implemented by the binding, during tracing, which is performance-critical. + +The address used as `ObjectReference` is nominated by the VM binding when an object is allocated (or +moved by a moving GC, which we can ignore for now when supporting NoGC). VMs usually have their own +concepts of "object reference" which refer to objects. Some of them, including OpenJDK and CRuby, +uses addresses to the object (the starting address or at an offset within the object) to refer to an +object. Such VMs can directly use their "object reference" for the address of MMTk's +`ObjectReference`. + +Some VMs, such as JikesRVM, refers to an object by an address at a constant offset after the header, +and can be outside the object. This does not satisfy the requirement of MMTk's `ObjectReference`, +and the VM binding needs to make a clear distinction between the VM-level object reference and +MMTk's `ObjectReference` type. A detailed example for supporting such a VM can be found +[here][jikesrvm-objref]. + +Other VMs may use tagged references, compressed pointers, etc. They need to convert them to plain +addresses to be used as MMTk's `ObjectReference`. Specifically, if the VM use such representations +in object fields, the VM binding can deal with the encoding and the decoding in its +[`Slot`][slot-trait] implementation, and always present plain `ObjectReference`s to MMTk. See [this +test] for some `Slot` implementation examples. + +[FBC09]: https://users.cecs.anu.edu.au/~steveb/pubs/papers/vmmagic-vee-2009.pdf +[jikesrvm-objref]: https://github.com/mmtk/mmtk-jikesrvm/issues/178 +[slot-trait]: https://docs.mmtk.io/api/mmtk/vm/slot/trait.Slot.html +[slot-test]: https://github.com/mmtk/mmtk-core/blob/master/src/vm/tests/mock_tests/mock_test_slots.rs #### Miscellaneous configuration options @@ -261,7 +287,7 @@ void *mmtk_alloc(MmtkMutator mutator, size_t size, size_t align, * Set relevant object metadata * * @param mutator the mutator instance that is requesting the allocation - * @param object the returned address of the allocated object + * @param object the ObjectReference address chosen by the VM binding * @param size the size of the allocated object * @param allocator the allocation semantics to use for the allocation */ @@ -274,13 +300,21 @@ In order to perform allocations, you will need to know what object alignment the Now that MMTk is aware of each mutator thread, you have to change the runtime's allocation functions to call into MMTk to allocate using `mmtk_alloc` and set object metadata using `mmtk_post_alloc`. Note that there may be multiple allocation functions in the runtime so make sure that you edit them all! -You should use the saved `Mutator` pointer as the first parameter, the requested object size as the next parameter, and any alignment requirements the runtimes has as the third parameter. +When calling `mmtk_alloc`, you should use the saved `Mutator` pointer as the first parameter, the requested object size as the next parameter, and any alignment requirements the runtimes has as the third parameter. If your runtime requires a non-zero allocation offset (i.e. the alignment requirements are for the offset address, not the returned address) then you have to provide the required value as the fourth parameter. Note that you ***must*** also update the [`USE_ALLOCATION_OFFSET`](https://docs.mmtk.io/api/mmtk/vm/trait.VMBinding.html#associatedconstant.USE_ALLOCATION_OFFSET) constant in the `VMBinding` implementation if your runtime requires a non-zero allocation offset. For the time-being, you can ignore the `allocator` parameter in both these functions and always pass a value of `0` which means MMTk will pick the default allocator for your collector (a bump pointer allocator in the case of NoGC). -Finally, you need to call `mmtk_post_alloc` with the object address returned from the previous `mmtk_alloc` call in order to initialize object metadata. +The return value of `mmtk_alloc` is the starting address of the allocated object. + +Then you should nominate a word-aligned address within the allocated bytes to be the +`ObjectReference` used to refer to that object from now on. It doesn't have to be the starting +address. + +Finally, you need to call `mmtk_post_alloc` with your chosen `ObjectReference` in order to +initialize MMTk-level object metadata, such as logging bits, valid-object (VO) bits, etc. As a VM +binding developer, you can ignore the details for now. **Note:** Currently MMTk assumes object sizes are multiples of the `MIN_ALIGNMENT`. If you encounter errors with alignment, a simple workaround would be to align the requested object size up to the `MIN_ALIGNMENT`. See [here](https://github.com/mmtk/mmtk-core/issues/730) for the tracking issue to fix this bug. diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 75ba10c73b..a0ae84a06c 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -579,16 +579,17 @@ pub fn handle_user_collection_request(mmtk: &MMTK, tls: VMMut /// /// Arguments: /// * `object`: The object reference to query. -pub fn is_live_object(object: ObjectReference) -> bool { - object.is_live::() +pub fn is_live_object(object: ObjectReference) -> bool { + object.is_live() } -/// Check if `addr` is the address of an object reference to an MMTk object. +/// Check if `addr` is the raw address of an object reference to an MMTk object. /// /// Concretely: -/// 1. Return true if `ObjectReference::from_raw_address(addr)` is a valid object reference to an -/// object in any space in MMTk. -/// 2. Return false otherwise. +/// 1. Return `Some(object)` if `ObjectReference::from_raw_address(addr)` is a valid object +/// reference to an object in any space in MMTk. `object` is the result of +/// `ObjectReference::from_raw_address(addr)`. +/// 2. Return `None` otherwise. /// /// This function is useful for conservative root scanning. The VM can iterate through all words in /// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses @@ -603,7 +604,9 @@ pub fn is_live_object(object: ObjectReference) -> bool { /// is present. See `crate::plan::global::BasePlan::vm_space`. /// /// Argument: -/// * `addr`: An arbitrary address. +/// * `addr`: A non-zero word-aligned address. Because the raw address of an `ObjectReference` +/// cannot be zero and must be word-aligned, the caller must filter out zero and misaligned +/// addresses before calling this function. Otherwise the behavior is undefined. #[cfg(feature = "is_mmtk_object")] pub fn is_mmtk_object(addr: Address) -> Option { crate::util::is_mmtk_object::check_object_reference(addr) @@ -613,12 +616,13 @@ pub fn is_mmtk_object(addr: Address) -> Option { /// This should be used instead of [`crate::memory_manager::is_mmtk_object`] for conservative stack scanning if /// the binding may have internal pointers on the stack. /// -/// Note that, we only consider pointers that point to addresses that are equal or greater than the in-object addresss -/// (i.e. [`crate::util::ObjectReference::to_address()`] which is the same as `object_ref.to_raw_address() + ObjectModel::IN_OBJECT_ADDRESS_OFFSET`), -/// and within the allocation as 'internal pointers'. To be precise, for each object ref `obj_ref`, internal pointers are in the range -/// `[obj_ref + ObjectModel::IN_OBJECT_ADDRESS_OFFSET, ObjectModel::ref_to_object_start(obj_ref) + ObjectModel::get_current_size(obj_ref))`. -/// If a binding defines internal pointers differently, calling this method is undefined behavior. -/// If this is the case for you, please submit an issue or engage us on Zulip to discuss more. +/// Note that, we only consider pointers that point to addresses that are equal to or greater than +/// the raw addresss of the object's `ObjectReference`, and within the allocation as 'internal +/// pointers'. To be precise, for each object ref `obj_ref`, internal pointers are in the range +/// `[obj_ref.to_raw_address(), obj_ref.to_object_start() + +/// ObjectModel::get_current_size(obj_ref))`. If a binding defines internal pointers differently, +/// calling this method is undefined behavior. If this is the case for you, please submit an issue +/// or engage us on Zulip to discuss more. /// /// Note that, in the similar situation as [`crate::memory_manager::is_mmtk_object`], the binding should filter /// out obvious non-pointers (e.g. alignment check, bound check, etc) before calling this function to avoid unnecessary @@ -633,7 +637,7 @@ pub fn is_mmtk_object(addr: Address) -> Option { /// * `internal_ptr`: The address to start searching. We search backwards from this address (including this address) to find the base reference. /// * `max_search_bytes`: The maximum number of bytes we may search for an object with VO bit set. `internal_ptr - max_search_bytes` is not included. #[cfg(feature = "is_mmtk_object")] -pub fn find_object_from_internal_pointer( +pub fn find_object_from_internal_pointer( internal_ptr: Address, max_search_bytes: usize, ) -> Option { @@ -655,7 +659,7 @@ pub fn find_object_from_internal_pointer( /// object for the VM in response to `memory_manager::alloc`, this function will return true; but /// if the VM directly called `malloc` to allocate the object, this function will return false. /// -/// If `is_mmtk_object(object.to_address())` returns true, `is_in_mmtk_spaces(object)` must also +/// If `is_mmtk_object(object.to_raw_address())` returns true, `is_in_mmtk_spaces(object)` must also /// return true. /// /// This function is useful if an object reference in the VM can be either a pointer into the MMTk @@ -669,10 +673,10 @@ pub fn find_object_from_internal_pointer( /// /// Arguments: /// * `object`: The object reference to query. -pub fn is_in_mmtk_spaces(object: ObjectReference) -> bool { +pub fn is_in_mmtk_spaces(object: ObjectReference) -> bool { use crate::mmtk::SFT_MAP; SFT_MAP - .get_checked(object.to_address::()) + .get_checked(object.to_raw_address()) .is_in_space(object) } @@ -766,10 +770,10 @@ pub fn add_finalizer( /// Arguments: /// * `object`: The object to be pinned #[cfg(feature = "object_pinning")] -pub fn pin_object(object: ObjectReference) -> bool { +pub fn pin_object(object: ObjectReference) -> bool { use crate::mmtk::SFT_MAP; SFT_MAP - .get_checked(object.to_address::()) + .get_checked(object.to_raw_address()) .pin_object(object) } @@ -780,10 +784,10 @@ pub fn pin_object(object: ObjectReference) -> bool { /// Arguments: /// * `object`: The object to be pinned #[cfg(feature = "object_pinning")] -pub fn unpin_object(object: ObjectReference) -> bool { +pub fn unpin_object(object: ObjectReference) -> bool { use crate::mmtk::SFT_MAP; SFT_MAP - .get_checked(object.to_address::()) + .get_checked(object.to_raw_address()) .unpin_object(object) } @@ -792,10 +796,10 @@ pub fn unpin_object(object: ObjectReference) -> bool { /// Arguments: /// * `object`: The object to be checked #[cfg(feature = "object_pinning")] -pub fn is_pinned(object: ObjectReference) -> bool { +pub fn is_pinned(object: ObjectReference) -> bool { use crate::mmtk::SFT_MAP; SFT_MAP - .get_checked(object.to_address::()) + .get_checked(object.to_raw_address()) .is_object_pinned(object) } diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index 39932fda93..6ae193f919 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -182,7 +182,7 @@ impl ObjectBarrier { fn log_object(&self, object: ObjectReference) -> bool { #[cfg(all(feature = "vo_bit", feature = "extreme_assertions"))] debug_assert!( - crate::util::metadata::vo_bit::is_vo_bit_set::(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "object bit is unset" ); loop { diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 84e875191e..c2c9b35e15 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -59,7 +59,7 @@ impl SFT for CopySpace { fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(_object); + crate::util::metadata::vo_bit::set_vo_bit(_object); } fn get_forwarded_object(&self, object: ObjectReference) -> Option { @@ -76,7 +76,7 @@ impl SFT for CopySpace { #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] @@ -231,7 +231,7 @@ impl CopySpace { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::metadata::vo_bit::is_vo_bit_set::(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO bit not set", object ); @@ -255,7 +255,7 @@ impl CopySpace { ); #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(new_object); + crate::util::metadata::vo_bit::set_vo_bit(new_object); trace!("Forwarding pointer"); queue.enqueue(new_object); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 0e3d303202..bc24e3c960 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -139,11 +139,11 @@ impl SFT for ImmixSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(_object); + crate::util::metadata::vo_bit::set_vo_bit(_object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] fn find_object_from_internal_pointer( @@ -207,7 +207,7 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace if KIND == TRACE_KIND_TRANSITIVE_PIN { self.trace_object_without_moving(queue, object) } else if KIND == TRACE_KIND_DEFRAG { - if Block::containing::(object).is_defrag_source() { + if Block::containing(object).is_defrag_source() { debug_assert!(self.in_defrag()); debug_assert!( !crate::plan::is_nursery_gc(worker.mmtk.get_plan()), @@ -576,7 +576,7 @@ impl ImmixSpace { self.mark_lines(object); } } else { - Block::containing::(object).set_state(BlockState::Marked); + Block::containing(object).set_state(BlockState::Marked); } #[cfg(feature = "vo_bit")] @@ -625,9 +625,9 @@ impl ImmixSpace { } else { // new_object != object debug_assert!( - !Block::containing::(new_object).is_defrag_source(), + !Block::containing(new_object).is_defrag_source(), "Block {:?} containing forwarded object {} should not be a defragmentation source", - Block::containing::(new_object), + Block::containing(new_object), new_object, ); } @@ -646,7 +646,7 @@ impl ImmixSpace { { self.attempt_mark(object, self.mark_state); object_forwarding::clear_forwarding_bits::(object); - Block::containing::(object).set_state(BlockState::Marked); + Block::containing(object).set_state(BlockState::Marked); #[cfg(feature = "vo_bit")] vo_bit::helper::on_object_marked::(object); @@ -671,12 +671,12 @@ impl ImmixSpace { new_object }; debug_assert_eq!( - Block::containing::(new_object).get_state(), + Block::containing(new_object).get_state(), BlockState::Marked ); queue.enqueue(new_object); - debug_assert!(new_object.is_live::()); + debug_assert!(new_object.is_live()); self.unlog_object_if_needed(new_object); new_object } diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 2ebb14e4d4..e4f49f7c28 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -62,11 +62,11 @@ impl SFT for ImmortalSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(object); + crate::util::metadata::vo_bit::set_vo_bit(object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] fn find_object_from_internal_pointer( @@ -208,7 +208,7 @@ impl ImmortalSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::metadata::vo_bit::is_vo_bit_set::(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO bit not set", object ); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 9d948201bb..8906a50044 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -80,15 +80,15 @@ impl SFT for LargeObjectSpace { } #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(object); + crate::util::metadata::vo_bit::set_vo_bit(object); #[cfg(all(feature = "is_mmtk_object", debug_assertions))] { use crate::util::constants::LOG_BYTES_IN_PAGE; - let vo_addr = object.to_address::(); + let vo_addr = object.to_raw_address(); let offset_from_page_start = vo_addr & ((1 << LOG_BYTES_IN_PAGE) - 1) as usize; debug_assert!( offset_from_page_start < crate::util::metadata::vo_bit::VO_BIT_WORD_TO_REGION, - "The in-object address is not in the first 512 bytes of a page. The internal pointer searching for LOS won't work." + "The raw address of ObjectReference is not in the first 512 bytes of a page. The internal pointer searching for LOS won't work." ); } @@ -96,7 +96,7 @@ impl SFT for LargeObjectSpace { } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] fn find_object_from_internal_pointer( @@ -117,19 +117,14 @@ impl SFT for LargeObjectSpace { } // For performance, we only check the first word which maps to the first 512 bytes in the page. // In almost all the cases, it should be sufficient. - // However, if the in-object address is not in the first 512 bytes, this won't work. + // However, if the raw address of ObjectReference is not in the first 512 bytes, this won't work. // We assert this when we set VO bit for LOS. if vo_bit::get_raw_vo_bit_word(cur_page) != 0 { // Find the exact address that has vo bit set for offset in 0..vo_bit::VO_BIT_WORD_TO_REGION { let addr = cur_page + offset; if unsafe { vo_bit::is_vo_addr(addr) } { - let obj = vo_bit::is_internal_ptr_from_vo_bit::(addr, ptr); - if obj.is_some() { - return obj; - } else { - return None; - } + return vo_bit::is_internal_ptr_from_vo_bit::(addr, ptr); } } unreachable!( @@ -257,7 +252,7 @@ impl LargeObjectSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::metadata::vo_bit::is_vo_bit_set::(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO bit not set", object ); @@ -292,7 +287,7 @@ impl LargeObjectSpace { fn sweep_large_pages(&mut self, sweep_nursery: bool) { let sweep = |object: ObjectReference| { #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::unset_vo_bit::(object); + crate::util::metadata::vo_bit::unset_vo_bit(object); self.pr .release_pages(get_super_page(object.to_object_start::())); }; diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index dee3205181..05cf448001 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -72,11 +72,11 @@ impl SFT for LockFreeImmortalSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(_object); + crate::util::metadata::vo_bit::set_vo_bit(_object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] fn find_object_from_internal_pointer( diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index bc0f5659c2..7843edef9a 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -67,7 +67,7 @@ impl SFT for MarkCompactSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - crate::util::metadata::vo_bit::set_vo_bit::(object); + crate::util::metadata::vo_bit::set_vo_bit(object); } #[cfg(feature = "sanity")] @@ -77,7 +77,7 @@ impl SFT for MarkCompactSpace { #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] @@ -241,7 +241,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::metadata::vo_bit::is_vo_bit_set::(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO bit not set", object ); @@ -257,7 +257,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::metadata::vo_bit::is_vo_bit_set::(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO bit not set", object ); @@ -403,7 +403,7 @@ impl MarkCompactSpace { for obj in self.linear_scan_objects(from_start..from_end) { let copied_size = VM::VMObjectModel::get_size_when_copied(obj); // clear the VO bit - vo_bit::unset_vo_bit::(obj); + vo_bit::unset_vo_bit(obj); let maybe_forwarding_pointer = Self::get_header_forwarding_pointer(obj); if let Some(forwarding_pointer) = maybe_forwarding_pointer { @@ -416,7 +416,7 @@ impl MarkCompactSpace { let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update VO bit, - vo_bit::set_vo_bit::(new_object); + vo_bit::set_vo_bit(new_object); to = new_object.to_object_start::() + copied_size; debug_assert_eq!(end_of_new_object, to); } else { diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index dbe386db5c..d6a3124a1a 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -98,7 +98,7 @@ impl SFT for MallocSpace { // For malloc space, we need to further check the VO bit. fn is_in_space(&self, object: ObjectReference) -> bool { - is_alloced_by_malloc::(object) + is_alloced_by_malloc(object) } /// For malloc space, we just use the side metadata. @@ -107,7 +107,7 @@ impl SFT for MallocSpace { debug_assert!(!addr.is_zero()); // `addr` cannot be mapped by us. It should be mapped by the malloc library. debug_assert!(!addr.is_mapped()); - has_object_alloced_by_malloc::(addr) + has_object_alloced_by_malloc(addr) } #[cfg(feature = "is_mmtk_object")] @@ -124,7 +124,7 @@ impl SFT for MallocSpace { fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { trace!("initialize_object_metadata for object {}", object); - set_vo_bit::(object); + set_vo_bit(object); } fn sft_trace_object( @@ -173,7 +173,7 @@ impl Space for MallocSpace { // We have assertions in a debug build. We allow this pattern for the release build. #[allow(clippy::let_and_return)] fn in_space(&self, object: ObjectReference) -> bool { - let ret = is_alloced_by_malloc::(object); + let ret = is_alloced_by_malloc(object); #[cfg(debug_assertions)] if ASSERT_ALLOCATION { @@ -556,7 +556,7 @@ impl MallocSpace { // Free object self.free_internal(obj_start, bytes, offset_malloc); trace!("free object {}", object); - unsafe { unset_vo_bit_unsafe::(object) }; + unsafe { unset_vo_bit_unsafe(object) }; true } else { diff --git a/src/policy/marksweepspace/malloc_ms/metadata.rs b/src/policy/marksweepspace/malloc_ms/metadata.rs index 1216c50120..7fb5b5738a 100644 --- a/src/policy/marksweepspace/malloc_ms/metadata.rs +++ b/src/policy/marksweepspace/malloc_ms/metadata.rs @@ -153,9 +153,8 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size } /// Check if a given object was allocated by malloc -pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { - is_meta_space_mapped_for_address(object.to_address::()) - && vo_bit::is_vo_bit_set::(object) +pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { + is_meta_space_mapped_for_address(object.to_raw_address()) && vo_bit::is_vo_bit_set(object) } /// Check if there is an object allocated by malloc at the address. @@ -163,11 +162,11 @@ pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { /// This function doesn't check if `addr` is aligned. /// If not, it will try to load the VO bit for the address rounded down to the metadata's granularity. #[cfg(feature = "is_mmtk_object")] -pub fn has_object_alloced_by_malloc(addr: Address) -> Option { +pub fn has_object_alloced_by_malloc(addr: Address) -> Option { if !is_meta_space_mapped_for_address(addr) { return None; } - vo_bit::is_vo_bit_set_for_addr::(addr) + vo_bit::is_vo_bit_set_for_addr(addr) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { @@ -218,8 +217,8 @@ pub unsafe fn is_chunk_marked_unsafe(chunk_start: Address) -> bool { ACTIVE_CHUNK_METADATA_SPEC.load::(chunk_start) == 1 } -pub fn set_vo_bit(object: ObjectReference) { - vo_bit::set_vo_bit::(object); +pub fn set_vo_bit(object: ObjectReference) { + vo_bit::set_vo_bit(object); } pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) { @@ -227,8 +226,8 @@ pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) } #[allow(unused)] -pub fn unset_vo_bit(object: ObjectReference) { - vo_bit::unset_vo_bit::(object); +pub fn unset_vo_bit(object: ObjectReference) { + vo_bit::unset_vo_bit(object); } #[allow(unused)] @@ -255,8 +254,8 @@ pub(super) unsafe fn unset_offset_malloc_bit_unsafe(address: Address) { OFFSET_MALLOC_METADATA_SPEC.store::(address, 0); } -pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { - vo_bit::unset_vo_bit_unsafe::(object); +pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { + vo_bit::unset_vo_bit_unsafe(object); } #[allow(unused)] diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index a150d974b5..5bef9a3e52 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -300,7 +300,7 @@ impl Block { // clear VO bit if it is ever set. It is possible that the VO bit is never set for this cell (i.e. there was no object in this cell before this GC), // we unset the bit anyway. #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::unset_vo_bit_nocheck::(potential_object); + crate::util::metadata::vo_bit::unset_vo_bit_nocheck(potential_object); unsafe { cell.store::
(last); } diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 4a86dda6fa..eaecbe3741 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -191,12 +191,12 @@ impl SFT for MarkSweepSpace { fn initialize_object_metadata(&self, _object: crate::util::ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(_object); + crate::util::metadata::vo_bit::set_vo_bit(_object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] @@ -341,7 +341,7 @@ impl MarkSweepSpace { ); if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_marked::(object, Ordering::SeqCst) { VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.mark::(object, Ordering::SeqCst); - let block = Block::containing::(object); + let block = Block::containing(object); block.set_state(BlockState::Marked); queue.enqueue(object); } diff --git a/src/policy/space.rs b/src/policy/space.rs index 0329c995b7..053b636963 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -242,7 +242,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { } fn in_space(&self, object: ObjectReference) -> bool { - self.address_in_space(object.to_address::()) + self.address_in_space(object.to_raw_address()) } /** diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 38fc6011da..60199c0fde 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -64,11 +64,11 @@ impl SFT for VMSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::set_vo_bit::(object); + crate::util::metadata::vo_bit::set_vo_bit(object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> Option { - crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr) + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr) } #[cfg(feature = "is_mmtk_object")] fn find_object_from_internal_pointer( @@ -277,7 +277,7 @@ impl VMSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::metadata::vo_bit::is_vo_bit_set::(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO bit not set", object ); diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index ff2fb75511..82be1f3561 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -697,7 +697,7 @@ impl ProcessEdgesWork for SFTProcessEdges { let worker = GCWorkerMutRef::new(self.worker()); // Invoke trace object on sft - let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_address::()) }; + let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) }; sft.sft_trace_object(&mut self.base.nodes, object, worker) } diff --git a/src/util/address.rs b/src/util/address.rs index b5da5cd847..46b65f4bd9 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -467,32 +467,106 @@ mod tests { use crate::vm::VMBinding; -/// ObjectReference represents address for an object. Compared with Address, -/// operations allowed on ObjectReference are very limited. No address arithmetics -/// are allowed for ObjectReference. The idea is from the paper -/// High-level Low-level Programming (VEE09) and JikesRVM. +/// `ObjectReference` represents address for an object. Compared with `Address`, operations allowed +/// on `ObjectReference` are very limited. No address arithmetics are allowed for `ObjectReference`. +/// The idea is from the paper [Demystifying Magic: High-level Low-level Programming (VEE09)][FBC09] +/// and [JikesRVM]. +/// +/// In MMTk, `ObjectReference` holds a non-zero address, i.e. its **raw address**. It must satisfy +/// the following requirements. +/// +/// - It uniquely references an MMTk object. +/// - The address must be within the address range of the object it refers to. +/// - The address must be word-aligned. +/// - It must be efficient to access object metadata from an `ObjectReference`. +/// +/// Each `ObjectReference` uniquely identifies exactly one MMTk object. There is no "null +/// reference" (see below for details). +/// +/// Conversely, each object has a unique (raw) address used for `ObjectReference`. That address is +/// nominated by the VM binding right after an object is allocated in the MMTk heap (i.e. the +/// argument of [`crate::memory_manager::post_alloc`]). The same address is used by all +/// `ObjectReference` instances that refer to that object until the object is moved, at which time +/// the VM binding shall choose another address to use as the `ObjectReference` of the new copy (in +/// [`crate::vm::ObjectModel::copy`] or [`crate::vm::ObjectModel::get_reference_when_copied_to`]) +/// until the object is moved again. +/// +/// In addition to the raw address, there are also two addresses related to each object allocated in +/// MMTk heap, namely **starting address** and **header address**. See the +/// [`crate::vm::ObjectModel`] trait for their precise definition. +/// +/// The VM binding may, in theory, pick any aligned address within the object, and it doesn't have +/// to be the starting address. However, during tracing, MMTk will need to access object metadata +/// from a `ObjectReference`. Particularly, it needs to identify reference fields, and query +/// information about the object, such as object size. Such information is usually accessed from +/// object headers. The choice of `ObjectReference` must make such accesses efficient. +/// +/// Because the raw address is within the object, MMTk will also use the raw address to identify the +/// space or region (chunk, block, line, etc.) that contains the object, and to access side metadata +/// and the SFTMap. If a VM binding needs to access side metadata directly (particularly, setting +/// the "valid-object (VO) bit" in allocation fast paths), it shall use the raw address to compute +/// the byte and bit address of the metadata bits. +/// +/// # Notes +/// +/// ## About VMs own concepts of "object references" +/// +/// A runtime may define its own concept of "object references" differently from MMTk's +/// `ObjectReference` type. It may define its object reference as +/// +/// - the starting address of an object, +/// - an address inside an object, +/// - an address at a certain offset outside an object, +/// - a handle that points to an indirection table entry where a pointer to the object is held, or +/// - anything else that refers to an object. +/// +/// Regardless, when passing an `ObjectReference` value to MMTk through the API, MMTk expectes its +/// value to satisfy MMTk's definition. This means MMTk's `ObjectReference` may not be the value +/// held in an object field. Some VM bindings may need to do conversions when passing object +/// references to MMTk. For example, adding an offset to the VM-level object reference so that the +/// resulting address is within the object. When using handles, the VM binding may use the *pointer +/// stored in the entry* of the indirection table instead of the *pointer to the entry* itself as +/// MMTk-level `ObjectReference`. +/// +/// ## About null references /// -/// A runtime may define its "object references" differently. It may define an object reference as -/// the address of an object, a handle that points to an indirection table entry where a pointer to -/// the object is held, or anything else. Regardless, MMTk expects each object reference to have a -/// pointer to the object (an address) in each object reference, and that address should be used -/// for this `ObjectReference` type. +/// An [`ObjectReference`] always refers to an object. Some VMs have special values (such as `null` +/// in Java) that do not refer to any object. Those values cannot be represented by +/// `ObjectReference`. When scanning roots and object fields, the VM binding should ignore slots +/// that do not hold a reference to an object. Specifically, [`crate::vm::slot::Slot::load`] +/// returns `Option`. It can return `None` so that MMTk skips that slot. /// -/// We currently do not allow an opaque `ObjectReference` type for which a binding can define -/// their layout. We now only allow a binding to define their semantics through a set of -/// methods in [`crate::vm::ObjectModel`]. Major refactoring is needed in MMTk to allow -/// the opaque `ObjectReference` type, and we haven't seen a use case for now. +/// `Option` should be used for the cases where a non-null object reference may or +/// may not exist, That includes several API functions, including [`crate::vm::slot::Slot::load`]. +/// [`ObjectReference`] is backed by `NonZeroUsize` which cannot be zero, and it has the +/// `#[repr(transparent)]` attribute. Thanks to [null pointer optimization (NPO)][NPO], +/// `Option` has the same size as `NonZeroUsize` and `usize`. /// -/// Note that [`ObjectReference`] cannot be null. For the cases where a non-null object reference -/// may or may not exist, (such as the result of [`crate::vm::slot::Slot::load`]) -/// `Option` should be used. [`ObjectReference`] is backed by `NonZeroUsize` -/// which cannot be zero, and it has the `#[repr(transparent)]` attribute. Thanks to [null pointer -/// optimization (NPO)][NPO], `Option` has the same size as `NonZeroUsize` and -/// `usize`. For the convenience of passing `Option` to and from native (C/C++) -/// programs, mmtk-core provides [`crate::util::api_util::NullableObjectReference`]. +/// For the convenience of passing `Option` to and from native (C/C++) programs, +/// mmtk-core provides [`crate::util::api_util::NullableObjectReference`]. /// -/// Note that [`ObjectReference`] has to be word aligned. +/// ## About the `VMSpace` /// +/// The `VMSpace` is managed by the VM binding. The VM binding declare ranges of memory as part of +/// the `VMSpace`, but MMTk never allocates into it. The VM binding allocates objects into the +/// `VMSpace` (usually by mapping boot-images), and refers to objects in the `VMSpace` using +/// `ObjectReference`s whose raw addresses point inside those objects (and must be word-aligned, +/// too). MMTk will access metadata using methods of [`ObjectModel`] like other objects. MMTk also +/// has side metadata available for objects in the `VMSpace`. +/// +/// ## About `ObjectReference` pointing outside MMTk spaces +/// +/// If a VM binding implements [`crate::vm::ActivePlan::vm_trace_object`], `ObjectReference` is +/// allowed to point to locations outside any MMTk spaces. When tracing objects, such +/// `ObjectReference` values will be processed by `ActivePlan::vm_trace_object` so that the VM +/// binding can trace its own allocated objects during GC. However, **this is an experimental +/// feature**, and may not interact well with other parts of MMTk. Notably, MMTk will not allocate +/// side metadata for such `ObjectReference`, and attempts to access side metadata with a non-MMTk +/// `ObjectReference` will result in crash. Use with caution. +/// +/// [FBC09]: https://dl.acm.org/doi/10.1145/1508293.1508305 +/// [JikesRVM]: https://www.jikesrvm.org/ +/// [`ObjectModel`]: crate::vm::ObjectModel /// [NPO]: https://doc.rust-lang.org/std/option/index.html#representation #[repr(transparent)] #[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)] @@ -503,22 +577,14 @@ impl ObjectReference { /// you will see an assertion failure in the debug build when constructing an object reference instance. pub const ALIGNMENT: usize = crate::util::constants::BYTES_IN_ADDRESS; - /// Cast the object reference to its raw address. This method is mostly for the convinience of a binding. - /// - /// MMTk should not make any assumption on the actual location of the address with the object reference. - /// MMTk should not assume the address returned by this method is in our allocation. For the purposes of - /// setting object metadata, MMTk should use [`crate::util::ObjectReference::to_address`] or [`crate::util::ObjectReference::to_header`]. + /// Cast the object reference to its raw address. pub fn to_raw_address(self) -> Address { Address(self.0.get()) } - /// Cast a raw address to an object reference. This method is mostly for the convinience of a binding. - /// This is how a binding creates `ObjectReference` instances. + /// Cast a raw address to an object reference. /// /// If `addr` is 0, the result is `None`. - /// - /// MMTk should not assume an arbitrary address can be turned into an object reference. MMTk can use [`crate::util::ObjectReference::from_address`] - /// to turn addresses that are from [`crate::util::ObjectReference::to_address`] back to object. pub fn from_raw_address(addr: Address) -> Option { debug_assert!( addr.is_aligned_to(Self::ALIGNMENT), @@ -544,15 +610,6 @@ impl ObjectReference { ObjectReference(NonZeroUsize::new_unchecked(addr.0)) } - /// Get the in-heap address from an object reference. This method is used by MMTk to get an in-heap address - /// for an object reference. - pub fn to_address(self) -> Address { - use crate::vm::ObjectModel; - let to_address = Address(self.0.get()).offset(VM::VMObjectModel::IN_OBJECT_ADDRESS_OFFSET); - debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || to_address == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, in-object addr is {}", self, to_address); - to_address - } - /// Get the header base address from an object reference. This method is used by MMTk to get a base address for the /// object header, and access the object header. This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_header`]. /// See the comments on [`crate::vm::ObjectModel::ref_to_header`]. @@ -580,52 +637,36 @@ impl ObjectReference { object_start } - /// Get the object reference from an address that is returned from [`crate::util::address::ObjectReference::to_address`]. - pub fn from_address(addr: Address) -> ObjectReference { - use crate::vm::ObjectModel; - let obj = unsafe { - ObjectReference::from_raw_address_unchecked( - addr.offset(-VM::VMObjectModel::IN_OBJECT_ADDRESS_OFFSET), - ) - }; - debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || addr == obj.to_raw_address(), "The binding claims unified object reference address, but for address {}, the object reference is {}", addr, obj); - debug_assert!( - obj.to_raw_address().is_aligned_to(Self::ALIGNMENT), - "ObjectReference is required to be word aligned. addr: {addr}, obj: {obj}" - ); - obj - } - /// Is the object reachable, determined by the policy? /// Note: Objects in ImmortalSpace may have `is_live = true` but are actually unreachable. - pub fn is_reachable(self) -> bool { - unsafe { SFT_MAP.get_unchecked(self.to_address::()) }.is_reachable(self) + pub fn is_reachable(self) -> bool { + unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_reachable(self) } /// Is the object live, determined by the policy? - pub fn is_live(self) -> bool { - unsafe { SFT_MAP.get_unchecked(self.to_address::()) }.is_live(self) + pub fn is_live(self) -> bool { + unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_live(self) } /// Can the object be moved? - pub fn is_movable(self) -> bool { - unsafe { SFT_MAP.get_unchecked(self.to_address::()) }.is_movable() + pub fn is_movable(self) -> bool { + unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_movable() } /// Get forwarding pointer if the object is forwarded. - pub fn get_forwarded_object(self) -> Option { - unsafe { SFT_MAP.get_unchecked(self.to_address::()) }.get_forwarded_object(self) + pub fn get_forwarded_object(self) -> Option { + unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.get_forwarded_object(self) } /// Is the object in any MMTk spaces? - pub fn is_in_any_space(self) -> bool { - unsafe { SFT_MAP.get_unchecked(self.to_address::()) }.is_in_space(self) + pub fn is_in_any_space(self) -> bool { + unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_in_space(self) } /// Is the object sane? #[cfg(feature = "sanity")] - pub fn is_sane(self) -> bool { - unsafe { SFT_MAP.get_unchecked(self.to_address::()) }.is_sane() + pub fn is_sane(self) -> bool { + unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_sane() } } diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 9f0e6119b9..28958a1759 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -406,7 +406,7 @@ impl FreeListAllocator { // unset allocation bit // Note: We cannot use `unset_vo_bit_unsafe` because two threads may attempt to free // objects at adjacent addresses, and they may share the same byte in the VO bit metadata. - crate::util::metadata::vo_bit::unset_vo_bit::(unsafe { + crate::util::metadata::vo_bit::unset_vo_bit(unsafe { ObjectReference::from_raw_address_unchecked(addr) }) } diff --git a/src/util/finalizable_processor.rs b/src/util/finalizable_processor.rs index 698aad06fa..70f97bfd52 100644 --- a/src/util/finalizable_processor.rs +++ b/src/util/finalizable_processor.rs @@ -54,7 +54,7 @@ impl FinalizableProcessor { for mut f in self.candidates.drain(start..).collect::>() { let reff = f.get_reference(); trace!("Pop {:?} for finalization", reff); - if reff.is_live::() { + if reff.is_live() { FinalizableProcessor::::forward_finalizable_reference(e, &mut f); trace!("{:?} is live, push {:?} back to candidates", reff, f); self.candidates.push(f); diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs index 6e3e4bac2a..c9b5fd1bb4 100644 --- a/src/util/is_mmtk_object.rs +++ b/src/util/is_mmtk_object.rs @@ -7,6 +7,11 @@ use crate::util::{Address, ObjectReference}; pub(crate) fn check_object_reference(addr: Address) -> Option { use crate::mmtk::SFT_MAP; + debug_assert_ne!(addr, Address::ZERO, "Address is zero"); + debug_assert!( + addr.is_aligned_to(ObjectReference::ALIGNMENT), + "Address is not aligned to word size: {addr}" + ); SFT_MAP.get_checked(addr).is_mmtk_object(addr) } diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index bf391c785d..ec39379afd 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -27,6 +27,14 @@ impl /// that the VO bit metadata is mapped for the address range. pub fn new(start: Address, end: Address) -> Self { debug_assert!(start < end); + debug_assert!( + start.is_aligned_to(ObjectReference::ALIGNMENT), + "start is not word-aligned: {start}" + ); + debug_assert!( + end.is_aligned_to(ObjectReference::ALIGNMENT), + "end is not word-aligned: {end}" + ); ObjectIterator { start, end, @@ -44,9 +52,9 @@ impl std fn next(&mut self) -> Option<::Item> { while self.cursor < self.end { let is_object = if ATOMIC_LOAD_VO_BIT { - vo_bit::is_vo_bit_set_for_addr::(self.cursor) + vo_bit::is_vo_bit_set_for_addr(self.cursor) } else { - unsafe { vo_bit::is_vo_bit_set_unsafe::(self.cursor) } + unsafe { vo_bit::is_vo_bit_set_unsafe(self.cursor) } }; if let Some(object) = is_object { @@ -117,9 +125,9 @@ pub trait Region: Copy + PartialEq + PartialOrd { debug_assert!(self.start().as_usize() < usize::MAX - (n << Self::LOG_BYTES)); Self::from_aligned_address(self.start() + (n << Self::LOG_BYTES)) } - /// Return the region that contains the object (by its cell address). - fn containing(object: ObjectReference) -> Self { - Self::from_unaligned_address(object.to_address::()) + /// Return the region that contains the object. + fn containing(object: ObjectReference) -> Self { + Self::from_unaligned_address(object.to_raw_address()) } /// Check if the given address is in the region. fn includes_address(&self, addr: Address) -> bool { diff --git a/src/util/metadata/global.rs b/src/util/metadata/global.rs index 7583009489..0396946c68 100644 --- a/src/util/metadata/global.rs +++ b/src/util/metadata/global.rs @@ -55,7 +55,7 @@ impl MetadataSpec { mask: Option, ) -> T { match self { - MetadataSpec::OnSide(metadata_spec) => metadata_spec.load(object.to_address::()), + MetadataSpec::OnSide(metadata_spec) => metadata_spec.load(object.to_raw_address()), MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::load_metadata::(metadata_spec, object, mask) } @@ -79,7 +79,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.load_atomic(object.to_address::(), ordering) + metadata_spec.load_atomic(object.to_raw_address(), ordering) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::load_metadata_atomic::(metadata_spec, object, mask, ordering) @@ -105,7 +105,7 @@ impl MetadataSpec { ) { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.store(object.to_address::(), val); + metadata_spec.store(object.to_raw_address(), val); } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::store_metadata::(metadata_spec, object, val, mask) @@ -130,7 +130,7 @@ impl MetadataSpec { ) { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.store_atomic(object.to_address::(), val, ordering); + metadata_spec.store_atomic(object.to_raw_address(), val, ordering); } MetadataSpec::InHeader(metadata_spec) => VM::VMObjectModel::store_metadata_atomic::( metadata_spec, @@ -165,7 +165,7 @@ impl MetadataSpec { ) -> std::result::Result { match self { MetadataSpec::OnSide(metadata_spec) => metadata_spec.compare_exchange_atomic( - object.to_address::(), + object.to_raw_address(), old_val, new_val, success_order, @@ -202,7 +202,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_add_atomic(object.to_address::(), val, order) + metadata_spec.fetch_add_atomic(object.to_raw_address(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_add_metadata::(metadata_spec, object, val, order) @@ -227,7 +227,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_sub_atomic(object.to_address::(), val, order) + metadata_spec.fetch_sub_atomic(object.to_raw_address(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_sub_metadata::(metadata_spec, object, val, order) @@ -252,7 +252,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_and_atomic(object.to_address::(), val, order) + metadata_spec.fetch_and_atomic(object.to_raw_address(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_and_metadata::(metadata_spec, object, val, order) @@ -277,7 +277,7 @@ impl MetadataSpec { ) -> T { match self { MetadataSpec::OnSide(metadata_spec) => { - metadata_spec.fetch_or_atomic(object.to_address::(), val, order) + metadata_spec.fetch_or_atomic(object.to_raw_address(), val, order) } MetadataSpec::InHeader(metadata_spec) => { VM::VMObjectModel::fetch_or_metadata::(metadata_spec, object, val, order) @@ -308,7 +308,7 @@ impl MetadataSpec { ) -> std::result::Result { match self { MetadataSpec::OnSide(metadata_spec) => metadata_spec.fetch_update_atomic( - object.to_address::(), + object.to_raw_address(), set_order, fetch_order, f, diff --git a/src/util/metadata/log_bit.rs b/src/util/metadata/log_bit.rs index 6b387041fb..78e198ea35 100644 --- a/src/util/metadata/log_bit.rs +++ b/src/util/metadata/log_bit.rs @@ -23,7 +23,7 @@ impl VMGlobalLogBitSpec { // know we are setting log bit for mature space, and every object in the space should have log // bit as 1. MetadataSpec::OnSide(spec) => unsafe { - spec.set_raw_byte_atomic(object.to_address::(), order) + spec.set_raw_byte_atomic(object.to_raw_address(), order) }, } } diff --git a/src/util/metadata/vo_bit/helper.rs b/src/util/metadata/vo_bit/helper.rs index c9c992693d..4dd06459de 100644 --- a/src/util/metadata/vo_bit/helper.rs +++ b/src/util/metadata/vo_bit/helper.rs @@ -142,7 +142,7 @@ pub(crate) fn on_trace_object(object: ObjectReference) { // If the VO bits are available during tracing, // we validate the objects we trace using the VO bits. debug_assert!( - vo_bit::is_vo_bit_set::(object), + vo_bit::is_vo_bit_set(object), "{:x}: VO bit not set", object ); @@ -153,7 +153,7 @@ pub(crate) fn on_object_marked(object: ObjectReference) { match strategy::() { VOBitUpdateStrategy::ClearAndReconstruct => { // In this strategy, we set the VO bit when an object is marked. - vo_bit::set_vo_bit::(object); + vo_bit::set_vo_bit(object); } VOBitUpdateStrategy::CopyFromMarkBits => { // VO bit was not cleared before tracing in this strategy. Do nothing. @@ -165,7 +165,7 @@ pub(crate) fn on_object_forwarded(new_object: ObjectReference) { match strategy::() { VOBitUpdateStrategy::ClearAndReconstruct => { // In this strategy, we set the VO bit of the to-space object when forwarded. - vo_bit::set_vo_bit::(new_object); + vo_bit::set_vo_bit(new_object); } VOBitUpdateStrategy::CopyFromMarkBits => { // In this strategy, we will copy mark bits to VO bits. @@ -178,7 +178,7 @@ pub(crate) fn on_object_forwarded(new_object: ObjectReference) { ); // We set the VO bit for the to-space object eagerly. - vo_bit::set_vo_bit::(new_object); + vo_bit::set_vo_bit(new_object); } } } diff --git a/src/util/metadata/vo_bit/mod.rs b/src/util/metadata/vo_bit/mod.rs index 3ae0aee8d7..dc2b90b916 100644 --- a/src/util/metadata/vo_bit/mod.rs +++ b/src/util/metadata/vo_bit/mod.rs @@ -61,24 +61,20 @@ pub(crate) const VO_BIT_SIDE_METADATA_SPEC: SideMetadataSpec = pub const VO_BIT_SIDE_METADATA_ADDR: Address = VO_BIT_SIDE_METADATA_SPEC.get_absolute_offset(); /// Atomically set the VO bit for an object. -pub fn set_vo_bit(object: ObjectReference) { - debug_assert!( - !is_vo_bit_set::(object), - "{:x}: VO bit already set", - object - ); - VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 1, Ordering::SeqCst); +pub fn set_vo_bit(object: ObjectReference) { + debug_assert!(!is_vo_bit_set(object), "{:x}: VO bit already set", object); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_raw_address(), 1, Ordering::SeqCst); } /// Atomically unset the VO bit for an object. -pub fn unset_vo_bit(object: ObjectReference) { - debug_assert!(is_vo_bit_set::(object), "{:x}: VO bit not set", object); - VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 0, Ordering::SeqCst); +pub fn unset_vo_bit(object: ObjectReference) { + debug_assert!(is_vo_bit_set(object), "{:x}: VO bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_raw_address(), 0, Ordering::SeqCst); } /// Atomically unset the VO bit for an object, regardless whether the bit is set or not. -pub fn unset_vo_bit_nocheck(object: ObjectReference) { - VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 0, Ordering::SeqCst); +pub fn unset_vo_bit_nocheck(object: ObjectReference) { + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_raw_address(), 0, Ordering::SeqCst); } /// Non-atomically unset the VO bit for an object. The caller needs to ensure the side @@ -87,41 +83,42 @@ pub fn unset_vo_bit_nocheck(object: ObjectReference) { /// # Safety /// /// This is unsafe: check the comment on `side_metadata::store` -pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { - debug_assert!(is_vo_bit_set::(object), "{:x}: VO bit not set", object); - VO_BIT_SIDE_METADATA_SPEC.store::(object.to_address::(), 0); +pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { + debug_assert!(is_vo_bit_set(object), "{:x}: VO bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store::(object.to_raw_address(), 0); } /// Check if the VO bit is set for an object. -pub fn is_vo_bit_set(object: ObjectReference) -> bool { - VO_BIT_SIDE_METADATA_SPEC.load_atomic::(object.to_address::(), Ordering::SeqCst) == 1 +pub fn is_vo_bit_set(object: ObjectReference) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load_atomic::(object.to_raw_address(), Ordering::SeqCst) == 1 } /// Check if an address can be turned directly into an object reference using the VO bit. /// If so, return `Some(object)`. Otherwise return `None`. -pub fn is_vo_bit_set_for_addr(address: Address) -> Option { - // if the address is not aligned, it cannot be an object reference. - if !address.is_aligned_to(ObjectReference::ALIGNMENT) { - return None; - } - is_vo_bit_set_inner::(address) +/// +/// The `address` must be word-aligned. +pub fn is_vo_bit_set_for_addr(address: Address) -> Option { + is_vo_bit_set_inner::(address) } /// Check if an address can be turned directly into an object reference using the VO bit. /// If so, return `Some(object)`. Otherwise return `None`. The caller needs to ensure the side /// metadata for the VO bit for the object is accessed by only one thread. /// +/// The `address` must be word-aligned. +/// /// # Safety /// /// This is unsafe: check the comment on `side_metadata::load` -pub unsafe fn is_vo_bit_set_unsafe(address: Address) -> Option { - is_vo_bit_set_inner::(address) +pub unsafe fn is_vo_bit_set_unsafe(address: Address) -> Option { + is_vo_bit_set_inner::(address) } -fn is_vo_bit_set_inner( - address: Address, -) -> Option { - let addr = get_in_object_address_for_potential_object::(address); +fn is_vo_bit_set_inner(addr: Address) -> Option { + debug_assert!( + addr.is_aligned_to(ObjectReference::ALIGNMENT), + "Address is not word-aligned: {addr}" + ); // If we haven't mapped VO bit for the address, it cannot be an object if !VO_BIT_SIDE_METADATA_SPEC.is_mapped(addr) { @@ -134,12 +131,7 @@ fn is_vo_bit_set_inner( unsafe { VO_BIT_SIDE_METADATA_SPEC.load::(addr) } }; - if vo_bit == 1 { - let obj = get_object_ref_for_vo_addr::(addr); - Some(obj) - } else { - None - } + (vo_bit == 1).then(|| get_object_ref_for_vo_addr(addr)) } /// Bulk zero the VO bit. @@ -195,16 +187,12 @@ pub fn find_object_from_internal_pointer( } } -/// Turning a potential object reference into its in-object address (the ref_to_address address) where the metadata is set for. -fn get_in_object_address_for_potential_object(potential_obj: Address) -> Address { - potential_obj.offset(VM::VMObjectModel::IN_OBJECT_ADDRESS_OFFSET) -} - /// Get the object reference from an aligned address where VO bit is set. -pub(crate) fn get_object_ref_for_vo_addr(vo_addr: Address) -> ObjectReference { - let addr = vo_addr.offset(-VM::VMObjectModel::IN_OBJECT_ADDRESS_OFFSET); - let aligned = addr.align_up(ObjectReference::ALIGNMENT); - unsafe { ObjectReference::from_raw_address_unchecked(aligned) } +pub(crate) fn get_object_ref_for_vo_addr(vo_addr: Address) -> ObjectReference { + // VO bit should be set on the address. + debug_assert!(vo_addr.is_aligned_to(ObjectReference::ALIGNMENT)); + debug_assert!(unsafe { is_vo_addr(vo_addr) }); + unsafe { ObjectReference::from_raw_address_unchecked(vo_addr) } } /// Check if the address could be an internal pointer in the object. @@ -219,10 +207,7 @@ pub fn is_internal_ptr_from_vo_bit( vo_addr: Address, internal_ptr: Address, ) -> Option { - // VO bit should be set on the address. - debug_assert!(unsafe { is_vo_addr(vo_addr) }); - - let obj = get_object_ref_for_vo_addr::(vo_addr); + let obj = get_object_ref_for_vo_addr(vo_addr); if is_internal_ptr::(obj, internal_ptr) { Some(obj) } else { diff --git a/src/util/object_enum.rs b/src/util/object_enum.rs index 61378f7a57..18508f088b 100644 --- a/src/util/object_enum.rs +++ b/src/util/object_enum.rs @@ -58,7 +58,7 @@ where fn visit_address_range(&mut self, start: Address, end: Address) { VO_BIT.scan_non_zero_values::(start, end, &mut |address| { - let object = vo_bit::get_object_ref_for_vo_addr::(address); + let object = vo_bit::get_object_ref_for_vo_addr(address); (self.object_callback)(object); }) } diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 4f4331849c..f9339d7267 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -208,17 +208,17 @@ impl ReferenceProcessor { /// Return the new `ObjectReference` of a referent if it is already moved, or its current /// `ObjectReference` otherwise. The referent must be live when calling this function. - fn get_forwarded_referent(referent: ObjectReference) -> ObjectReference { - debug_assert!(referent.is_live::()); - referent.get_forwarded_object::().unwrap_or(referent) + fn get_forwarded_referent(referent: ObjectReference) -> ObjectReference { + debug_assert!(referent.is_live()); + referent.get_forwarded_object().unwrap_or(referent) } /// Return the new `ObjectReference` of a reference object if it is already moved, or its /// current `ObjectReference` otherwise. The reference object must be live when calling this /// function. - fn get_forwarded_reference(object: ObjectReference) -> ObjectReference { - debug_assert!(object.is_live::()); - object.get_forwarded_object::().unwrap_or(object) + fn get_forwarded_reference(object: ObjectReference) -> ObjectReference { + debug_assert!(object.is_live()); + object.get_forwarded_object().unwrap_or(object) } // These funcions call `trace_object()`, which will ensure the object and its descendents will @@ -259,10 +259,10 @@ impl ReferenceProcessor { { // For references in the table, the reference needs to be valid, and if the referent is not cleared, it should be valid as well sync.references.iter().for_each(|reff| { - debug_assert!(reff.is_in_any_space::()); + debug_assert!(reff.is_in_any_space()); if let Some(referent) = VM::VMReferenceGlue::get_referent(*reff) { debug_assert!( - referent.is_in_any_space::(), + referent.is_in_any_space(), "Referent {:?} (of reference {:?}) is not in any space", referent, reff @@ -271,7 +271,7 @@ impl ReferenceProcessor { }); // For references that will be enqueue'd, the reference needs to be valid, and the referent needs to be cleared. sync.enqueued_references.iter().for_each(|reff| { - debug_assert!(reff.is_in_any_space::()); + debug_assert!(reff.is_in_any_space()); let maybe_referent = VM::VMReferenceGlue::get_referent(*reff); debug_assert!(maybe_referent.is_none()); }); @@ -403,7 +403,7 @@ impl ReferenceProcessor { for reference in sync.references.iter() { trace!("Processing reference: {:?}", reference); - if !reference.is_live::() { + if !reference.is_live() { // Reference is currently unreachable but may get reachable by the // following trace. We postpone the decision. continue; @@ -435,14 +435,14 @@ impl ReferenceProcessor { // If the reference is dead, we're done with it. Let it (and // possibly its referent) be garbage-collected. - if !reference.is_live::() { + if !reference.is_live() { VM::VMReferenceGlue::clear_referent(reference); trace!(" UNREACHABLE reference: {}", reference); return None; } // The reference object is live. - let new_reference = Self::get_forwarded_reference::(reference); + let new_reference = Self::get_forwarded_reference(reference); trace!(" forwarded to: {}", new_reference); // Get the old referent. @@ -458,11 +458,11 @@ impl ReferenceProcessor { return None; }; - if old_referent.is_live::() { + if old_referent.is_live() { // Referent is still reachable in a way that is as strong as // or stronger than the current reference level. - let new_referent = Self::get_forwarded_referent::(old_referent); - debug_assert!(new_referent.is_live::()); + let new_referent = Self::get_forwarded_referent(old_referent); + debug_assert!(new_referent.is_live()); trace!(" forwarded referent to: {}", new_referent); // The reference object stays on the waiting list, and the diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index 150b4d67f7..d3d3cdb007 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -192,7 +192,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { let mut sanity_checker = self.mmtk().sanity_checker.lock().unwrap(); if !sanity_checker.refs.contains(&object) { // FIXME steveb consider VM-specific integrity check on reference. - assert!(object.is_sane::(), "Invalid reference {:?}", object); + assert!(object.is_sane(), "Invalid reference {:?}", object); // Let plan check object assert!( @@ -217,7 +217,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { // If the valid object (VO) bit metadata is enabled, all live objects should have the VO // bit set when sanity GC starts. #[cfg(feature = "vo_bit")] - if !crate::util::metadata::vo_bit::is_vo_bit_set::(object) { + if !crate::util::metadata::vo_bit::is_vo_bit_set(object) { panic!("VO bit is not set: {}", object); } diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs index 4f8d49b1be..a0d9b5f868 100644 --- a/src/util/test_util/mock_vm.rs +++ b/src/util/test_util/mock_vm.rs @@ -523,9 +523,6 @@ impl crate::vm::ObjectModel for MockVM { mock!(ref_to_header(object)) } - // TODO: This is not mocked. We need a way to deal with it. - const IN_OBJECT_ADDRESS_OFFSET: isize = -(DEFAULT_OBJECT_REF_OFFSET as isize); - fn dump_object(object: ObjectReference) { mock!(dump_object(object)) } diff --git a/src/vm/active_plan.rs b/src/vm/active_plan.rs index 1828a1352a..aa73831e8e 100644 --- a/src/vm/active_plan.rs +++ b/src/vm/active_plan.rs @@ -44,6 +44,8 @@ pub trait ActivePlan { /// /// The method should return the new object reference if the method moves the object, otherwise return the original object reference. /// + /// Note: **This is an experimental feature**, and may not interact well with other parts of MMTk. Use with caution. + /// /// Arguments: /// * `queue`: The object queue. If an object is encountered for the first time in this GC, we expect the implementation to call `queue.enqueue()` /// for the object. If the object is moved during the tracing, the new object reference (after copying) should be enqueued instead. diff --git a/src/vm/object_model.rs b/src/vm/object_model.rs index f377e60795..4b9732a0d7 100644 --- a/src/vm/object_model.rs +++ b/src/vm/object_model.rs @@ -64,25 +64,18 @@ use crate::vm::VMBinding; /// Instead, MMTk only uses the following addresses for an object. If you find the MMTk's approach does not work for your language in practice, you are welcome to submit an issue /// or engage with MMTk team on Zulip to disucss further. /// -/// ## Object Reference +/// ## (Raw) Object Reference /// -/// See [`crate::util::address::ObjectReference`]. This is a special address that represents the object. -/// MMTk refers to an object by its object reference. An object reference cannot be NULL, and has to be -/// word aligned ([`crate::util::address::ObjectReference::ALIGNMENT`]). It is allowed that an object -/// reference is not in the allocated memory for the object. +/// See [`crate::util::address::ObjectReference`]. This is a special address that represents the +/// object. MMTk refers to an object by its object reference. An object reference cannot be NULL, +/// must be inside the address range of the object, and must be word aligned +/// ([`crate::util::address::ObjectReference::ALIGNMENT`]). /// /// ## Object Start Address /// /// This address is returned by an allocation call [`crate::memory_manager::alloc`]. This is the start of the address range of the allocation. /// [`ObjectModel::ref_to_object_start`] should return this address for a given object. /// -/// ## In-object Address -/// -/// As the object reference address may be outside the allocated memory, and calculating the object start address may -/// be complex, MMTk requires a fixed and efficient in-object address for each object. The in-object address must be a constant -/// offset from the object reference address, and must be inside the allocated memory. MMTk requires the binding to -/// specify the offset from the object reference to the in-object address by [`ObjectModel::IN_OBJECT_ADDRESS_OFFSET`]. -/// /// ## Object header address /// /// If a binding allows MMTk to use its header bits for object metadata, it needs to supply an object header @@ -432,10 +425,9 @@ pub trait ObjectModel { /// mature space for generational plans. const VM_WORST_CASE_COPY_EXPANSION: f64 = 1.5; - /// If this is true, the binding guarantees that the object reference's raw address, - /// the in-object address, and the object start are always the same address. To be precise, - /// 1. an object reference's raw address is always equal to the return value of the `ref_to_object_start` method, - /// 2. `IN_OBJECT_ADDRESS_OFFSET` is 0. + /// If this is true, the binding guarantees that the object reference's raw address and the + /// object start are always the same address. In other words, an object reference's raw + /// address is always equal to the return value of the `ref_to_object_start` method, /// /// This is a very strong guarantee, but it is also helpful for MMTk to /// make some assumptions and optimize for this case. @@ -473,11 +465,6 @@ pub trait ObjectModel { /// * `object`: The object to be queried. fn ref_to_header(object: ObjectReference) -> Address; - /// The offset from the object reference to an in-object address. - /// The binding needs to guarantee that obj_ref.to_raw_address() + IN_OBJECT_ADDRESS_OFFSET - /// is inside the storage associated with the object. - const IN_OBJECT_ADDRESS_OFFSET: isize; - /// Dump debugging information for an object. /// /// Arguments: diff --git a/src/vm/tests/mock_tests/mock_test_conservatism.rs b/src/vm/tests/mock_tests/mock_test_conservatism.rs index 2b06467625..0e817aab26 100644 --- a/src/vm/tests/mock_tests/mock_test_conservatism.rs +++ b/src/vm/tests/mock_tests/mock_test_conservatism.rs @@ -12,8 +12,13 @@ lazy_static! { } fn basic_filter(addr: Address) -> bool { - !addr.is_zero() - && addr.as_usize() % VO_BIT_REGION_SIZE == (DEFAULT_OBJECT_REF_OFFSET % VO_BIT_REGION_SIZE) + // `is_mmtk_object` only accept addresses that are aligned to `ObjectReference::ALIGNMENT`. + // It currently has the same value as `VO_BIT_REGION_SIZE`. + !addr.is_zero() && addr.is_aligned_to(VO_BIT_REGION_SIZE) +} + +fn iter_aligned_offsets(limit: usize) -> impl Iterator { + (VO_BIT_REGION_SIZE..limit).step_by(VO_BIT_REGION_SIZE) } fn assert_filter_pass(addr: Address) { @@ -60,17 +65,9 @@ fn assert_invalid_objref(addr: Address, real: Address) { #[test] pub fn null() { - with_mockvm( - default_setup, - || { - SINGLE_OBJECT.with_fixture(|fixture| { - let addr = Address::ZERO; - assert_filter_fail(addr); - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - }); - }, - no_cleanup, - ) + let addr = Address::ZERO; + // Zero address cannot be passed to `is_mmtk_object`. We just test if our filter is good. + assert_filter_fail(addr); } // This should be small enough w.r.t `HEAP_START` and `HEAP_END`. @@ -82,7 +79,7 @@ pub fn too_small() { default_setup, || { SINGLE_OBJECT.with_fixture(|fixture| { - for offset in 1usize..SMALL_OFFSET { + for offset in iter_aligned_offsets(SMALL_OFFSET) { let addr = Address::ZERO + offset; assert_invalid_objref(addr, fixture.objref.to_raw_address()); } @@ -98,7 +95,7 @@ pub fn max() { default_setup, || { SINGLE_OBJECT.with_fixture(|fixture| { - let addr = Address::MAX; + let addr = Address::MAX.align_down(VO_BIT_REGION_SIZE); assert_invalid_objref(addr, fixture.objref.to_raw_address()); }); }, @@ -112,8 +109,8 @@ pub fn too_big() { default_setup, || { SINGLE_OBJECT.with_fixture(|fixture| { - for offset in 1usize..SMALL_OFFSET { - let addr = Address::MAX - offset; + for offset in iter_aligned_offsets(SMALL_OFFSET) { + let addr = unsafe { Address::from_usize(0usize.wrapping_sub(offset)) }; assert_invalid_objref(addr, fixture.objref.to_raw_address()); } }); @@ -145,7 +142,7 @@ pub fn small_offsets() { default_setup, || { SINGLE_OBJECT.with_fixture(|fixture| { - for offset in 1usize..SEVERAL_PAGES { + for offset in iter_aligned_offsets(SEVERAL_PAGES) { let addr = fixture.objref.to_raw_address() + offset; if basic_filter(addr) { assert_invalid_objref(addr, fixture.objref.to_raw_address()); diff --git a/src/vm/tests/mock_tests/mock_test_internal_ptr_before_object_ref.rs b/src/vm/tests/mock_tests/mock_test_internal_ptr_before_object_ref.rs index 007d3ab99d..329ea776d3 100644 --- a/src/vm/tests/mock_tests/mock_test_internal_ptr_before_object_ref.rs +++ b/src/vm/tests/mock_tests/mock_test_internal_ptr_before_object_ref.rs @@ -30,11 +30,10 @@ pub fn interior_pointer_before_object_ref() { let obj = MockVM::object_start_to_ref(addr); println!( - "start = {}, end = {}, obj = {}, in-obj addr = {}", + "start = {}, end = {}, obj = {}", addr, addr + OBJECT_SIZE, obj, - obj.to_address::() ); memory_manager::post_alloc( &mut fixture.mutator, @@ -42,21 +41,6 @@ pub fn interior_pointer_before_object_ref() { OBJECT_SIZE, AllocationSemantics::Default, ); - - // Forge a pointer that points before the object reference, but after in-object address. MMTk should still find the base reference properly. - - let before_obj_ref = addr; - assert!(before_obj_ref < obj.to_raw_address()); - assert!(before_obj_ref >= obj.to_address::()); - - println!("Check {:?}", before_obj_ref); - let base_ref = crate::memory_manager::find_object_from_internal_pointer::( - before_obj_ref, - usize::MAX, - ); - println!("base_ref {:?}", base_ref); - assert!(base_ref.is_some()); - assert_eq!(base_ref.unwrap(), obj); }, no_cleanup, ) diff --git a/src/vm/tests/mock_tests/mock_test_internal_ptr_invalid.rs b/src/vm/tests/mock_tests/mock_test_internal_ptr_invalid.rs index 14de366798..b9186839e1 100644 --- a/src/vm/tests/mock_tests/mock_test_internal_ptr_invalid.rs +++ b/src/vm/tests/mock_tests/mock_test_internal_ptr_invalid.rs @@ -15,10 +15,8 @@ pub fn interior_pointer_invalid() { let _ = MutatorFixture::create_with_heapsize(10 * MB); let assert_no_object = |addr: Address| { - let base_ref = crate::memory_manager::find_object_from_internal_pointer::( - addr, - usize::MAX, - ); + let base_ref = + crate::memory_manager::find_object_from_internal_pointer(addr, usize::MAX); assert!(base_ref.is_none()); }; diff --git a/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_multi_page.rs b/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_multi_page.rs index e330e3bb7c..675b9a7095 100644 --- a/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_multi_page.rs +++ b/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_multi_page.rs @@ -33,11 +33,10 @@ pub fn interior_pointer_in_large_object() { let obj = MockVM::object_start_to_ref(addr); println!( - "start = {}, end = {}, obj = {}, in-obj addr = {}", + "start = {}, end = {}, obj = {}", addr, addr + OBJECT_SIZE, obj, - obj.to_address::() ); memory_manager::post_alloc( @@ -47,25 +46,22 @@ pub fn interior_pointer_in_large_object() { AllocationSemantics::Los, ); - let test_internal_ptr = - |ptr: Address| { - println!("ptr = {}", ptr); - if ptr > addr + OBJECT_SIZE { - // not internal pointer - let base_ref = crate::memory_manager::find_object_from_internal_pointer::< - MockVM, - >(ptr, usize::MAX); - println!("{:?}", base_ref); - assert!(base_ref.is_none()); - } else { - // is internal pointer - let base_ref = crate::memory_manager::find_object_from_internal_pointer::< - MockVM, - >(ptr, usize::MAX); - assert!(base_ref.is_some()); - assert_eq!(base_ref.unwrap(), obj); - } - }; + let test_internal_ptr = |ptr: Address| { + println!("ptr = {}", ptr); + if ptr > addr + OBJECT_SIZE { + // not internal pointer + let base_ref = + crate::memory_manager::find_object_from_internal_pointer(ptr, usize::MAX); + println!("{:?}", base_ref); + assert!(base_ref.is_none()); + } else { + // is internal pointer + let base_ref = + crate::memory_manager::find_object_from_internal_pointer(ptr, usize::MAX); + assert!(base_ref.is_some()); + assert_eq!(base_ref.unwrap(), obj); + } + }; // Test with the first 1024 bytes as offset in the object for offset in 0..1024usize { diff --git a/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_same_page.rs b/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_same_page.rs index f5f01511a4..36fc9de6ed 100644 --- a/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_same_page.rs +++ b/src/vm/tests/mock_tests/mock_test_internal_ptr_large_object_same_page.rs @@ -34,11 +34,10 @@ pub fn interior_pointer_in_large_object_same_page() { let obj = MockVM::object_start_to_ref(addr); println!( - "start = {}, end = {}, obj = {}, in-obj addr = {}", + "start = {}, end = {}, obj = {}", addr, addr + OBJECT_SIZE, obj, - obj.to_address::() ); memory_manager::post_alloc( @@ -49,27 +48,21 @@ pub fn interior_pointer_in_large_object_same_page() { ); let ptr = obj.to_raw_address(); - let base_ref = crate::memory_manager::find_object_from_internal_pointer::( - ptr, - OBJECT_SIZE, - ); + let base_ref = + crate::memory_manager::find_object_from_internal_pointer(ptr, OBJECT_SIZE); println!("{:?}", base_ref); assert!(base_ref.is_some()); assert_eq!(base_ref.unwrap(), obj); let ptr = obj.to_raw_address() + OBJECT_SIZE / 2; - let base_ref = crate::memory_manager::find_object_from_internal_pointer::( - ptr, - OBJECT_SIZE, - ); + let base_ref = + crate::memory_manager::find_object_from_internal_pointer(ptr, OBJECT_SIZE); assert!(base_ref.is_some()); assert_eq!(base_ref.unwrap(), obj); let ptr = obj.to_raw_address() + OBJECT_SIZE; - let base_ref = crate::memory_manager::find_object_from_internal_pointer::( - ptr, - OBJECT_SIZE, - ); + let base_ref = + crate::memory_manager::find_object_from_internal_pointer(ptr, OBJECT_SIZE); assert!(base_ref.is_none()); }, no_cleanup, diff --git a/src/vm/tests/mock_tests/mock_test_internal_ptr_normal_object.rs b/src/vm/tests/mock_tests/mock_test_internal_ptr_normal_object.rs index fdfddd6c6c..ad94f20fdb 100644 --- a/src/vm/tests/mock_tests/mock_test_internal_ptr_normal_object.rs +++ b/src/vm/tests/mock_tests/mock_test_internal_ptr_normal_object.rs @@ -32,11 +32,10 @@ pub fn interior_pointer_in_normal_object() { let obj = MockVM::object_start_to_ref(addr); println!( - "start = {}, end = {}, obj = {}, in-obj addr = {}", + "start = {}, end = {}, obj = {}", addr, addr + OBJECT_SIZE, obj, - obj.to_address::() ); memory_manager::post_alloc( &mut fixture.mutator, @@ -49,23 +48,25 @@ pub fn interior_pointer_in_normal_object() { if ptr >= addr + OBJECT_SIZE { println!("ptr = {}, not internal pointer", ptr); // not internal pointer - let base_ref = crate::memory_manager::find_object_from_internal_pointer::< - MockVM, - >(ptr, usize::MAX); + let base_ref = crate::memory_manager::find_object_from_internal_pointer( + ptr, + usize::MAX, + ); println!("{:?}", base_ref); assert!(base_ref.is_none()); } else { println!("ptr = {}, internal pointer", ptr); // is internal pointer - let base_ref = crate::memory_manager::find_object_from_internal_pointer::< - MockVM, - >(ptr, usize::MAX); + let base_ref = crate::memory_manager::find_object_from_internal_pointer( + ptr, + usize::MAX, + ); assert!(base_ref.is_some()); assert_eq!(base_ref.unwrap(), obj); } }; - let base_ref = crate::memory_manager::find_object_from_internal_pointer::( + let base_ref = crate::memory_manager::find_object_from_internal_pointer( obj.to_raw_address(), OBJECT_SIZE, ); diff --git a/src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs b/src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs index ad0032d165..189729ab3c 100644 --- a/src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs +++ b/src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs @@ -19,7 +19,7 @@ pub fn near_zero() { // and decide if we need to test calling `is_in_mmtk_space` with 0 as an argument. let addr = unsafe { Address::from_usize(DEFAULT_OBJECT_REF_OFFSET) }; assert!( - !memory_manager::is_in_mmtk_spaces::( + !memory_manager::is_in_mmtk_spaces( ObjectReference::from_raw_address(addr).unwrap() ), "A very low address {addr} should not be in any MMTk spaces." @@ -37,7 +37,7 @@ pub fn max() { || { SINGLE_OBJECT.with_fixture(|_fixture| { assert!( - !memory_manager::is_in_mmtk_spaces::( + !memory_manager::is_in_mmtk_spaces( ObjectReference::from_raw_address( Address::MAX.align_down(crate::util::constants::BYTES_IN_ADDRESS) ) @@ -58,7 +58,7 @@ pub fn direct_hit() { || { SINGLE_OBJECT.with_fixture(|fixture| { assert!( - memory_manager::is_in_mmtk_spaces::(fixture.objref), + memory_manager::is_in_mmtk_spaces(fixture.objref), "The address of the allocated object should be in the space" ); }); @@ -86,7 +86,7 @@ pub fn large_offsets_aligned() { }; // It's just a smoke test. It is hard to predict if the addr is still in any space, // but it must not crash. - let _ = memory_manager::is_in_mmtk_spaces::( + let _ = memory_manager::is_in_mmtk_spaces( ObjectReference::from_raw_address(addr).unwrap(), ); } @@ -115,7 +115,7 @@ pub fn negative_offsets() { }; // It's just a smoke test. It is hard to predict if the addr is still in any space, // but it must not crash. - let _ = memory_manager::is_in_mmtk_spaces::( + let _ = memory_manager::is_in_mmtk_spaces( ObjectReference::from_raw_address( addr.align_down(crate::util::constants::BYTES_IN_ADDRESS), ) diff --git a/src/vm/tests/mock_tests/mock_test_vm_layout_default.rs b/src/vm/tests/mock_tests/mock_test_vm_layout_default.rs index 0a80ab5fda..8bd527d9d6 100644 --- a/src/vm/tests/mock_tests/mock_test_vm_layout_default.rs +++ b/src/vm/tests/mock_tests/mock_test_vm_layout_default.rs @@ -24,7 +24,7 @@ pub fn test_with_vm_layout(layout: Option) { let addr = memory_manager::alloc(&mut fixture.mutator, 8, 8, 0, AllocationSemantics::Default); let obj = MockVM::object_start_to_ref(addr); // Test SFT - assert!(memory_manager::is_in_mmtk_spaces::(obj)); + assert!(memory_manager::is_in_mmtk_spaces(obj)); // Test mmapper assert!(memory_manager::is_mapped_address(addr)); }