Skip to content

Commit

Permalink
Add missing docs for the rest of the code base (merge after #1026) (#…
Browse files Browse the repository at this point in the history
…1028)

This is the last PR for adding missing docs, and it closes
#309. All the public items
should be documented properly now, and `#![deny(missing_docs)]` is used
so any new public item that does not have rustdoc will cause the build
to fail.
  • Loading branch information
qinsoon authored Nov 23, 2023
1 parent b066199 commit a87636c
Show file tree
Hide file tree
Showing 30 changed files with 149 additions and 52 deletions.
2 changes: 1 addition & 1 deletion .github/scripts/ci-doc.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# If the output path is changed in this script, we need to update rustdoc.yml as well.

# deny warnings for rustdoc
export RUSTDOCFLAGS="-D warnings"
export RUSTDOCFLAGS="-D warnings -D missing_docs"

# Check cargo doc
# We document public and private items for MMTk developers (GC implementers).
Expand Down
3 changes: 0 additions & 3 deletions docs/userguide/src/tutorial/code/mygc_semispace/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,6 @@ pub struct MyGC<VM: VMBinding> {
// ANCHOR: constraints
pub const MYGC_CONSTRAINTS: PlanConstraints = PlanConstraints {
moves_objects: true,
gc_header_bits: 2,
gc_header_words: 0,
num_specialized_scans: 1,
..PlanConstraints::default()
};
// ANCHOR_END: constraints
Expand Down
2 changes: 2 additions & 0 deletions docs/userguide/src/tutorial/code/mygc_semispace/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
// This module's code is unused When we compile this module with MMTk core. Allow it.
#![allow(dead_code)]
// Allow missing docs for public items in this module.
#![allow(missing_docs)]

mod gc_work; // Add
mod global;
Expand Down
1 change: 0 additions & 1 deletion docs/userguide/src/tutorial/mygc/ss/alloc.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ We will make the following changes:

1. Initialize `gc_header_bits` to 2. We reserve 2 bits in the header for GC use.
1. Initialize `moves_objects` to `true`.
1. Initialize `num_specialized_scans` to 1.

Finished code (step 1-3):
```
Expand Down
2 changes: 2 additions & 0 deletions src/build_info.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
//! Some information for the current MMTk build.

mod raw {
// This includes a full list of the constants in built.rs generated by the 'built' crate.
// https://docs.rs/built/latest/built/index.html
Expand Down
4 changes: 0 additions & 4 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
// TODO: We should fix missing docs for public items and turn this on (Issue #309).
// #![deny(missing_docs)]

// Allow this for now. Clippy suggests we should use Sft, Mmtk, rather than SFT and MMTK.
// According to its documentation (https://rust-lang.github.io/rust-clippy/master/index.html#upper_case_acronyms),
// with upper-case-acronyms-aggressive turned on, it should also warn us about SFTMap, VMBinding, GCWorker.
Expand Down Expand Up @@ -69,4 +66,3 @@ pub mod vm;
pub use crate::plan::{
AllocationSemantics, BarrierSelector, Mutator, MutatorContext, ObjectQueue, Plan,
};
pub use crate::policy::copy_context::PolicyCopyContext;
1 change: 1 addition & 0 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,7 @@ pub fn free_with_size<VM: VMBinding>(mmtk: &MMTK<VM>, addr: Address, old_size: u
crate::util::malloc::free_with_size(mmtk, addr, old_size)
}

/// Get the current active malloc'd bytes. Here MMTk only accounts for bytes that are done through those 'counted malloc' functions.
#[cfg(feature = "malloc_counted_size")]
pub fn get_malloc_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
mmtk.state.malloc_bytes.load(Ordering::SeqCst)
Expand Down
15 changes: 13 additions & 2 deletions src/mmtk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ use crate::util::rust_util::InitializeOnce;
// A global space function table that allows efficient dispatch space specific code for addresses in our heap.
pub static SFT_MAP: InitializeOnce<Box<dyn SFTMap>> = InitializeOnce::new();

// MMTk builder. This is used to set options before actually creating an MMTk instance.
/// MMTk builder. This is used to set options and other settings before actually creating an MMTk instance.
pub struct MMTKBuilder {
/// The options for this instance.
pub options: Options,
Expand Down Expand Up @@ -131,7 +131,8 @@ unsafe impl<VM: VMBinding> Sync for MMTK<VM> {}
unsafe impl<VM: VMBinding> Send for MMTK<VM> {}

impl<VM: VMBinding> MMTK<VM> {
pub fn new(options: Arc<Options>) -> Self {
/// Create an MMTK instance. This is not public. Bindings should use [`MMTKBuilder::build`].
pub(crate) fn new(options: Arc<Options>) -> Self {
// Initialize SFT first in case we need to use this in the constructor.
// The first call will initialize SFT map. Other calls will be blocked until SFT map is initialized.
crate::policy::sft_map::SFTRefStorage::pre_use_check();
Expand Down Expand Up @@ -219,6 +220,9 @@ impl<VM: VMBinding> MMTK<VM> {
}
}

/// Generic hook to allow benchmarks to be harnessed. MMTk will trigger a GC
/// to clear any residual garbage and start collecting statistics for the benchmark.
/// This is usually called by the benchmark harness as its last step before the actual benchmark.
pub fn harness_begin(&self, tls: VMMutatorThread) {
probe!(mmtk, harness_begin);
self.handle_user_collection_request(tls, true, true);
Expand All @@ -227,6 +231,9 @@ impl<VM: VMBinding> MMTK<VM> {
self.scheduler.enable_stat();
}

/// Generic hook to allow benchmarks to be harnessed. MMTk will stop collecting
/// statistics, and print out the collected statistics in a defined format.
/// This is usually called by the benchmark harness right after the actual benchmark.
pub fn harness_end(&'static self) {
self.stats.stop_all(self);
self.inside_harness.store(false, Ordering::SeqCst);
Expand Down Expand Up @@ -264,10 +271,12 @@ impl<VM: VMBinding> MMTK<VM> {
}
}

/// Return true if a collection is in progress.
pub fn gc_in_progress(&self) -> bool {
*self.state.gc_status.lock().unwrap() != GcStatus::NotInGC
}

/// Return true if a collection is in progress and past the preparatory stage.
pub fn gc_in_progress_proper(&self) -> bool {
*self.state.gc_status.lock().unwrap() == GcStatus::GcProper
}
Expand Down Expand Up @@ -338,6 +347,7 @@ impl<VM: VMBinding> MMTK<VM> {
self.gc_requester.request();
}

/// Get a reference to the plan.
pub fn get_plan(&self) -> &dyn Plan<VM = VM> {
unsafe { &**(self.plan.get()) }
}
Expand All @@ -352,6 +362,7 @@ impl<VM: VMBinding> MMTK<VM> {
&mut **(self.plan.get())
}

/// Get the run time options.
pub fn get_options(&self) -> &Options {
&self.options
}
Expand Down
3 changes: 3 additions & 0 deletions src/plan/barriers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,14 @@ use downcast_rs::Downcast;
/// VM bindings may also use this to enable the correct fast-path, if the fast-path is implemented in the binding.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum BarrierSelector {
/// No barrier is used.
NoBarrier,
/// Object remembering barrier is used.
ObjectBarrier,
}

impl BarrierSelector {
/// A const function to check if two barrier selectors are the same.
pub const fn equals(&self, other: BarrierSelector) -> bool {
// cast enum to u8 then compare. Otherwise, we cannot do it in a const fn.
*self as u8 == other as u8
Expand Down
1 change: 1 addition & 0 deletions src/plan/generational/copying/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ pub struct GenCopy<VM: VMBinding> {
pub copyspace1: CopySpace<VM>,
}

/// The plan constraints for the generational copying plan.
pub const GENCOPY_CONSTRAINTS: PlanConstraints = crate::plan::generational::GEN_CONSTRAINTS;

impl<VM: VMBinding> Plan for GenCopy<VM> {
Expand Down
1 change: 1 addition & 0 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ pub struct GenImmix<VM: VMBinding> {
pub last_gc_was_full_heap: AtomicBool,
}

/// The plan constraints for the generational immix plan.
pub const GENIMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints {
// The maximum object size that can be allocated without LOS is restricted by the max immix object size.
// This might be too restrictive, as our default allocator is bump pointer (nursery allocator) which
Expand Down
3 changes: 0 additions & 3 deletions src/plan/generational/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,6 @@ pub const FULL_NURSERY_GC: bool = false;
/// Constraints for generational plans. Each generational plan should overwrite based on this constant.
pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints {
moves_objects: true,
gc_header_bits: 2,
gc_header_words: 0,
num_specialized_scans: 1,
needs_log_bit: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier),
barrier: ACTIVE_BARRIER,
// We may trace duplicate edges in sticky immix (or any plan that uses object remembering barrier). See https://github.com/mmtk/mmtk-core/issues/743.
Expand Down
18 changes: 18 additions & 0 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,8 @@ pub fn create_gc_worker_context<VM: VMBinding>(
/// they may call a wrong method by mistake.
// TODO: Some methods that are not overriden can be moved from the trait to BasePlan.
pub trait Plan: 'static + HasSpaces + Sync + Downcast {
/// Get the plan constraints for the plan.
/// This returns a non-constant value. A constant value can be found in each plan's module if needed.
fn constraints(&self) -> &'static PlanConstraints;

/// Create a copy config for this plan. A copying GC plan MUST override this method,
Expand All @@ -148,21 +150,35 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast {
CopyConfig::default()
}

/// Get a immutable reference to the base plan. `BasePlan` is included by all the MMTk GC plans.
fn base(&self) -> &BasePlan<Self::VM>;

/// Get a mutable reference to the base plan. `BasePlan` is included by all the MMTk GC plans.
fn base_mut(&mut self) -> &mut BasePlan<Self::VM>;

/// Schedule work for the upcoming GC.
fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler<Self::VM>);

/// Get the common plan. CommonPlan is included by most of MMTk GC plans.
fn common(&self) -> &CommonPlan<Self::VM> {
panic!("Common Plan not handled!")
}

/// Return a reference to `GenerationalPlan` to allow
/// access methods specific to generational plans if the plan is a generational plan.
fn generational(
&self,
) -> Option<&dyn crate::plan::generational::global::GenerationalPlan<VM = Self::VM>> {
None
}

/// Get the current run time options.
fn options(&self) -> &Options {
&self.base().options
}

/// Get the allocator mapping between [`crate::AllocationSemantics`] and [`crate::util::alloc::AllocatorSelector`].
/// This defines what space this plan will allocate objects into for different semantics.
fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector>;

/// Prepare the plan before a GC. This is invoked in an initial step in the GC.
Expand All @@ -182,6 +198,8 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast {
/// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method.
fn end_of_gc(&mut self, _tls: VMWorkerThread) {}

/// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible.
/// The default implementation will force a full heap collection for generational plans.
fn notify_emergency_collection(&self) {
if let Some(gen) = self.generational() {
gen.force_full_heap_collection();
Expand Down
4 changes: 1 addition & 3 deletions src/plan/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,9 @@ pub struct Immix<VM: VMBinding> {
last_gc_was_defrag: AtomicBool,
}

/// The plan constraints for the immix plan.
pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints {
moves_objects: crate::policy::immix::DEFRAG,
gc_header_bits: 2,
gc_header_words: 0,
num_specialized_scans: 1,
// Max immix object size is half of a block.
max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE,
needs_prepare_mutator: false,
Expand Down
4 changes: 1 addition & 3 deletions src/plan/markcompact/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,9 @@ pub struct MarkCompact<VM: VMBinding> {
pub common: CommonPlan<VM>,
}

/// The plan constraints for the mark compact plan.
pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints {
moves_objects: true,
gc_header_bits: 2,
gc_header_words: 1,
num_specialized_scans: 2,
needs_forward_after_liveness: true,
max_non_los_default_alloc_bytes:
crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN,
Expand Down
4 changes: 1 addition & 3 deletions src/plan/marksweep/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,9 @@ pub struct MarkSweep<VM: VMBinding> {
ms: MarkSweepSpace<VM>,
}

/// The plan constraints for the mark sweep plan.
pub const MS_CONSTRAINTS: PlanConstraints = PlanConstraints {
moves_objects: false,
gc_header_bits: 2,
gc_header_words: 0,
num_specialized_scans: 1,
max_non_los_default_alloc_bytes: MAX_OBJECT_SIZE,
may_trace_duplicate_edges: true,
needs_prepare_mutator: !cfg!(feature = "malloc_mark_sweep")
Expand Down
2 changes: 1 addition & 1 deletion src/plan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ pub use mutator_context::MutatorContext;

mod plan_constraints;
pub use plan_constraints::PlanConstraints;
pub use plan_constraints::DEFAULT_PLAN_CONSTRAINTS;
pub(crate) use plan_constraints::DEFAULT_PLAN_CONSTRAINTS;

mod tracing;
pub use tracing::{ObjectQueue, ObjectsClosure, VectorObjectQueue, VectorQueue};
Expand Down
30 changes: 27 additions & 3 deletions src/plan/mutator_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,13 @@ impl<VM: VMBinding> std::fmt::Debug for MutatorConfig<VM> {
// - MutatorConfig only has pointers/refs (including fat pointers), and is fixed sized.
#[repr(C)]
pub struct Mutator<VM: VMBinding> {
pub allocators: Allocators<VM>,
pub(crate) allocators: Allocators<VM>,
/// Holds some thread-local states for the barrier.
pub barrier: Box<dyn Barrier<VM>>,
/// The mutator thread that is bound with this Mutator struct.
pub mutator_tls: VMMutatorThread,
pub plan: &'static dyn Plan<VM = VM>,
pub config: MutatorConfig<VM>,
pub(crate) plan: &'static dyn Plan<VM = VM>,
pub(crate) config: MutatorConfig<VM>,
}

impl<VM: VMBinding> MutatorContext<VM> for Mutator<VM> {
Expand Down Expand Up @@ -266,29 +267,52 @@ impl<VM: VMBinding> Mutator<VM> {
// TODO: We should be able to remove this trait, as we removed per-plan mutator implementation, and there is no other type that implements this trait.
// The Mutator struct above is the only type that implements this trait. We should be able to merge them.
pub trait MutatorContext<VM: VMBinding>: Send + 'static {
/// Do the prepare work for this mutator.
fn prepare(&mut self, tls: VMWorkerThread);
/// Do the release work for this mutator.
fn release(&mut self, tls: VMWorkerThread);
/// Allocate memory for an object.
///
/// Arguments:
/// * `size`: the number of bytes required for the object.
/// * `align`: required alignment for the object.
/// * `offset`: offset associated with the alignment. The result plus the offset will be aligned to the given alignment.
/// * `allocator`: the allocation semantic used for this object.
fn alloc(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
) -> Address;
/// The slow path allocation. This is only useful when the binding
/// implements the fast path allocation, and would like to explicitly
/// call the slow path after the fast path allocation fails.
fn alloc_slow(
&mut self,
size: usize,
align: usize,
offset: usize,
allocator: AllocationSemantics,
) -> Address;
/// Perform post-allocation actions. For many allocators none are
/// required.
///
/// Arguments:
/// * `refer`: the newly allocated object.
/// * `bytes`: the size of the space allocated (in bytes).
/// * `allocator`: the allocation semantic used.
fn post_alloc(&mut self, refer: ObjectReference, bytes: usize, allocator: AllocationSemantics);
/// Flush per-mutator remembered sets and create GC work for the remembered sets.
fn flush_remembered_sets(&mut self) {
self.barrier().flush();
}
/// Flush the mutator context.
fn flush(&mut self) {
self.flush_remembered_sets();
}
/// Get the mutator thread for this mutator context. This is the same value as the argument supplied in
/// [`crate::memory_manager::bind_mutator`] when this mutator is created.
fn get_tls(&self) -> VMMutatorThread;
/// Get active barrier trait object
fn barrier(&mut self) -> &mut dyn Barrier<VM>;
Expand Down
1 change: 1 addition & 0 deletions src/plan/nogc/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ pub struct NoGC<VM: VMBinding> {
pub los: ImmortalSpace<VM>,
}

/// The plan constraints for the no gc plan.
pub const NOGC_CONSTRAINTS: PlanConstraints = PlanConstraints {
collects_garbage: false,
..PlanConstraints::default()
Expand Down
1 change: 1 addition & 0 deletions src/plan/pageprotect/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ pub struct PageProtect<VM: VMBinding> {
pub common: CommonPlan<VM>,
}

/// The plan constraints for the page protect plan.
pub const CONSTRAINTS: PlanConstraints = PlanConstraints {
moves_objects: false,
needs_prepare_mutator: false,
Expand Down
Loading

0 comments on commit a87636c

Please sign in to comment.