Skip to content

Commit

Permalink
Conservative stack scanning (#203)
Browse files Browse the repository at this point in the history
This PR ports #157 and
#184 to dev.

It also adds an optimization that if a task is not started, the
conservative stack scanning can be skipped for the task.

Merge with mmtk/julia#80.

---------

Co-authored-by: Yi Lin <qinsoon@gmail.com>
  • Loading branch information
udesou and qinsoon authored Dec 18, 2024
1 parent 0a97e82 commit a6f8cac
Show file tree
Hide file tree
Showing 9 changed files with 248 additions and 67 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ MMTK_BUILD=release MMTK_JULIA_DIR=`pwd`/mmtk-julia make -C julia # or "MMTK_BUI
```

If you would like debugging information in your release build of MMTk, add `debug = true` under `[profile.release]` in `mmtk/Cargo.toml`.
If you would like disable object movement, add the `non_moving` feature when building the Rust binding.

### Checking out and Building Julia with MMTk

Expand All @@ -38,6 +39,7 @@ Before building Julia, build the binding in `mmtk-julia/mmtk`. You must have alr

In `mmtk-core` we currently support either Immix or StickyImmix implementations.
Build it with `cargo build --features immix` or `cargo build --features stickyimmix`.
Optionally, add the `non_moving` feature when building the Rust binding for disabling object movement, which is enabled by default (although we are currently pinning the vast majority of objects).
Add `--release` at the end if you would like to have a release build, otherwise it is a debug build.
For a release build with debugging information, first add `debug = true` under `[profile.release]` in `mmtk/Cargo.toml`.

Expand Down
13 changes: 7 additions & 6 deletions mmtk/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ edition = "2018"
[package.metadata.julia]
# Our CI matches the following line and extract mmtk/julia. If this line is updated, please check ci yaml files and make sure it works.
julia_repo = "https://github.com/mmtk/julia.git"
julia_version = "32d38fb974b955bc2c610a72617cfc59265fa260"
julia_version = "dfd0a0edcb9f2d101bf498d5bfce2bd74153772d"

[lib]
crate-type = ["cdylib"]
Expand Down Expand Up @@ -46,16 +46,17 @@ memoffset = "*"
# ykstackmaps = { git = "https://github.com/udesou/ykstackmaps.git", branch = "udesou-master", version = "*" }

[features]
default = ["mmtk/vm_space", "julia_copy_stack", "object_pinning", "is_mmtk_object", "mmtk/vo_bit_access"]
# We must build with default features
default = ["mmtk/vm_space", "julia_copy_stack", "mmtk/object_pinning", "mmtk/is_mmtk_object", "mmtk/vo_bit_access"]

# Plans
# Default features
julia_copy_stack = []

# Plans: choose one
nogc = []
immix = []
stickyimmix = ["mmtk/sticky_immix_non_moving_nursery", "mmtk/immix_smaller_block"]
marksweep = []
object_pinning = ["mmtk/object_pinning"]
is_mmtk_object = ["mmtk/is_mmtk_object"]

# This feature disables moving
non_moving = ["mmtk/immix_non_moving", "mmtk/immix_smaller_block"]
julia_copy_stack = []
2 changes: 2 additions & 0 deletions mmtk/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ fn main() {
let bindings = bindgen::Builder::default()
.header(format!("{}/src/julia.h", julia_dir))
.header(format!("{}/src/julia_internal.h", julia_dir))
.header(format!("{}/src/gc-common.h", julia_dir))
// Including the paths to depending .h files
.clang_arg("-I")
.clang_arg(format!("{}/mmtk/api", mmtk_dir))
Expand All @@ -51,6 +52,7 @@ fn main() {
.allowlist_item("jl_bt_element_t")
.allowlist_item("jl_taggedvalue_t")
.allowlist_item("_jl_module_using")
.allowlist_item("_bigval_t")
.allowlist_item("MMTkMutatorContext")
// --opaque-type MMTkMutatorContext
.opaque_type("MMTkMutatorContext")
Expand Down
34 changes: 9 additions & 25 deletions mmtk/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,6 @@ pub extern "C" fn mmtk_set_vm_space(start: Address, size: usize) {

#[cfg(feature = "stickyimmix")]
set_side_log_bit_for_region(start, size);
#[cfg(feature = "is_mmtk_object")]
set_side_vo_bit_for_region(start, size);
}

Expand Down Expand Up @@ -371,7 +370,7 @@ pub extern "C" fn mmtk_memory_region_copy(
pub extern "C" fn mmtk_immortal_region_post_alloc(start: Address, size: usize) {
#[cfg(feature = "stickyimmix")]
set_side_log_bit_for_region(start, size);
#[cfg(feature = "is_mmtk_object")]

set_side_vo_bit_for_region(start, size);
}

Expand All @@ -385,7 +384,8 @@ fn set_side_log_bit_for_region(start: Address, size: usize) {
}
}

#[cfg(feature = "is_mmtk_object")]
// We have to set VO bit even if this is a non_moving build. Otherwise, assertions in mmtk-core
// will complain about seeing objects without VO bit.
fn set_side_vo_bit_for_region(start: Address, size: usize) {
debug!(
"Bulk set VO bit {} to {} ({} bytes)",
Expand Down Expand Up @@ -473,9 +473,10 @@ pub extern "C" fn mmtk_get_obj_size(obj: ObjectReference) -> usize {
}
}

#[cfg(all(feature = "object_pinning", not(feature = "non_moving")))]
#[no_mangle]
pub extern "C" fn mmtk_pin_object(object: ObjectReference) -> bool {
crate::early_return_for_non_moving_build!(false);

// We may in the future replace this with a check for the immix space (bound check), which should be much cheaper.
if mmtk_object_is_managed_by_mmtk(object.to_raw_address().as_usize()) {
memory_manager::pin_object(object)
Expand All @@ -485,9 +486,10 @@ pub extern "C" fn mmtk_pin_object(object: ObjectReference) -> bool {
}
}

#[cfg(all(feature = "object_pinning", not(feature = "non_moving")))]
#[no_mangle]
pub extern "C" fn mmtk_unpin_object(object: ObjectReference) -> bool {
crate::early_return_for_non_moving_build!(false);

if mmtk_object_is_managed_by_mmtk(object.to_raw_address().as_usize()) {
memory_manager::unpin_object(object)
} else {
Expand All @@ -496,9 +498,10 @@ pub extern "C" fn mmtk_unpin_object(object: ObjectReference) -> bool {
}
}

#[cfg(all(feature = "object_pinning", not(feature = "non_moving")))]
#[no_mangle]
pub extern "C" fn mmtk_is_pinned(object: ObjectReference) -> bool {
crate::early_return_for_non_moving_build!(false);

if mmtk_object_is_managed_by_mmtk(object.to_raw_address().as_usize()) {
memory_manager::is_pinned(object)
} else {
Expand All @@ -507,25 +510,6 @@ pub extern "C" fn mmtk_is_pinned(object: ObjectReference) -> bool {
}
}

// If the `non-moving` feature is selected, pinning/unpinning is a noop and simply returns false
#[cfg(all(feature = "object_pinning", feature = "non_moving"))]
#[no_mangle]
pub extern "C" fn mmtk_pin_object(_object: ObjectReference) -> bool {
false
}

#[cfg(all(feature = "object_pinning", feature = "non_moving"))]
#[no_mangle]
pub extern "C" fn mmtk_unpin_object(_object: ObjectReference) -> bool {
false
}

#[cfg(all(feature = "object_pinning", feature = "non_moving"))]
#[no_mangle]
pub extern "C" fn mmtk_is_pinned(_object: ObjectReference) -> bool {
false
}

#[no_mangle]
pub extern "C" fn get_mmtk_version() -> *const c_char {
crate::build_info::MMTK_JULIA_FULL_VERSION_STRING
Expand Down
51 changes: 34 additions & 17 deletions mmtk/src/collection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use crate::{
jl_throw_out_of_memory_error,
};
use crate::{JuliaVM, USER_TRIGGERED_GC};
use log::{info, trace};
use log::{debug, trace};
use mmtk::util::alloc::AllocationError;
use mmtk::util::opaque_pointer::*;
use mmtk::vm::{Collection, GCThreadContext};
Expand All @@ -14,6 +14,7 @@ use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicU64, Ordering};
use crate::{BLOCK_FOR_GC, STW_COND, WORLD_HAS_STOPPED};

static GC_START: AtomicU64 = AtomicU64::new(0);
static CURRENT_GC_MAY_MOVE: AtomicBool = AtomicBool::new(true);

pub struct VMCollection {}

Expand All @@ -30,11 +31,18 @@ impl Collection<JuliaVM> for VMCollection {

trace!("Stopped the world!");

// Store if the current GC may move objects -- we will use it when the current GC finishes.
// We cache the value here just in case MMTk may clear it before we use the value.
CURRENT_GC_MAY_MOVE.store(
crate::SINGLETON.get_plan().current_gc_may_move_object(),
Ordering::SeqCst,
);

// Tell MMTk the stacks are ready.
{
use mmtk::vm::ActivePlan;
for mutator in crate::active_plan::VMActivePlan::mutators() {
info!("stop_all_mutators: visiting {:?}", mutator.mutator_tls);
debug!("stop_all_mutators: visiting {:?}", mutator.mutator_tls);
mutator_visitor(mutator);
}
}
Expand All @@ -46,6 +54,9 @@ impl Collection<JuliaVM> for VMCollection {
}

fn resume_mutators(_tls: VMWorkerThread) {
// unpin conservative roots
crate::conservative::unpin_conservative_roots();

// Get the end time of the GC
let end = unsafe { jl_hrtime() };
trace!("gc_end = {}", end);
Expand All @@ -64,7 +75,7 @@ impl Collection<JuliaVM> for VMCollection {
let &(_, ref cvar) = &*STW_COND.clone();
cvar.notify_all();

info!(
debug!(
"Live bytes = {}, total bytes = {}",
crate::api::mmtk_used_bytes(),
crate::api::mmtk_total_bytes()
Expand All @@ -74,27 +85,29 @@ impl Collection<JuliaVM> for VMCollection {
}

fn block_for_gc(_tls: VMMutatorThread) {
info!("Triggered GC!");
debug!("Triggered GC!");

unsafe { jl_mmtk_prepare_to_collect() };

info!("Finished blocking mutator for GC!");
debug!("Finished blocking mutator for GC!");
}

fn spawn_gc_thread(_tls: VMThread, ctx: GCThreadContext<JuliaVM>) {
// Just drop the join handle. The thread will run until the process quits.
let _ = std::thread::spawn(move || {
use mmtk::util::opaque_pointer::*;
use mmtk::util::Address;
let worker_tls = VMWorkerThread(VMThread(OpaquePointer::from_address(unsafe {
Address::from_usize(thread_id::get())
})));
match ctx {
GCThreadContext::Worker(w) => {
mmtk::memory_manager::start_worker(&SINGLETON, worker_tls, w)
let _ = std::thread::Builder::new()
.name("MMTk Worker".to_string())
.spawn(move || {
use mmtk::util::opaque_pointer::*;
use mmtk::util::Address;
let worker_tls = VMWorkerThread(VMThread(OpaquePointer::from_address(unsafe {
Address::from_usize(thread_id::get())
})));
match ctx {
GCThreadContext::Worker(w) => {
mmtk::memory_manager::start_worker(&SINGLETON, worker_tls, w)
}
}
}
});
});
}

fn schedule_finalization(_tls: VMWorkerThread) {}
Expand All @@ -121,14 +134,18 @@ pub fn is_current_gc_nursery() -> bool {
}
}

pub fn is_current_gc_moving() -> bool {
CURRENT_GC_MAY_MOVE.load(Ordering::SeqCst)
}

#[no_mangle]
pub extern "C" fn mmtk_block_thread_for_gc() {
AtomicBool::store(&BLOCK_FOR_GC, true, Ordering::SeqCst);

let &(ref lock, ref cvar) = &*STW_COND.clone();
let mut count = lock.lock().unwrap();

info!("Blocking for GC!");
debug!("Blocking for GC!");

AtomicBool::store(&WORLD_HAS_STOPPED, true, Ordering::SeqCst);

Expand Down
112 changes: 112 additions & 0 deletions mmtk/src/conservative.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
use crate::jl_task_stack_buffer;
use crate::julia_types::*;
use mmtk::memory_manager;
use mmtk::util::constants::BYTES_IN_ADDRESS;
use mmtk::util::{Address, ObjectReference};
use std::collections::HashSet;
use std::sync::Mutex;
lazy_static! {
pub static ref CONSERVATIVE_ROOTS: Mutex<HashSet<ObjectReference>> = Mutex::new(HashSet::new());
}
pub fn pin_conservative_roots() {
crate::early_return_for_non_moving_build!(());
crate::early_return_for_current_gc!();

let mut roots = CONSERVATIVE_ROOTS.lock().unwrap();
let n_roots = roots.len();
roots.retain(|obj| mmtk::memory_manager::pin_object(*obj));
let n_pinned = roots.len();
log::debug!("Conservative roots: {}, pinned: {}", n_roots, n_pinned);
}
pub fn unpin_conservative_roots() {
crate::early_return_for_non_moving_build!(());
crate::early_return_for_current_gc!();

let mut roots = CONSERVATIVE_ROOTS.lock().unwrap();
let n_pinned = roots.len();
let mut n_live = 0;
roots.drain().for_each(|obj| {
if mmtk::memory_manager::is_live_object(obj) {
n_live += 1;
mmtk::memory_manager::unpin_object(obj);
}
});
log::debug!(
"Conservative roots: pinned: {}, unpinned/live {}",
n_pinned,
n_live
);
}
pub fn mmtk_conservative_scan_task_stack(ta: *const jl_task_t) {
crate::early_return_for_non_moving_build!(());
crate::early_return_for_current_gc!();

let mut size: u64 = 0;
let mut ptid: i32 = 0;
log::debug!("mmtk_conservative_scan_native_stack begin ta = {:?}", ta);
let stk = unsafe { jl_task_stack_buffer(ta, &mut size as *mut _, &mut ptid as *mut _) };
log::debug!(
"mmtk_conservative_scan_native_stack continue stk = {}, size = {}, ptid = {:x}",
stk,
size,
ptid
);
if !stk.is_zero() {
log::debug!("Conservatively scan the stack");
// See jl_guard_size
// TODO: Are we sure there are always guard pages we need to skip?
const JL_GUARD_PAGE: usize = 4096 * 8;
let guard_page_start = stk + JL_GUARD_PAGE;
log::debug!("Skip guard page: {}, {}", stk, guard_page_start);
conservative_scan_range(guard_page_start, stk + size as usize);
} else {
log::warn!("Skip stack for {:?}", ta);
}
}
pub fn mmtk_conservative_scan_task_registers(ta: *const jl_task_t) {
crate::early_return_for_non_moving_build!(());
crate::early_return_for_current_gc!();

let (lo, hi) = get_range(&unsafe { &*ta }.ctx);
conservative_scan_range(lo, hi);
}
pub fn mmtk_conservative_scan_ptls_registers(ptls: &mut _jl_tls_states_t) {
crate::early_return_for_non_moving_build!(());
crate::early_return_for_current_gc!();

let (lo, hi) = get_range(&((*ptls).gc_tls.ctx_at_the_time_gc_started));
conservative_scan_range(lo, hi);
}
// TODO: This scans the entire context type, which is slower.
// We actually only need to scan registers.
fn get_range<T>(ctx: &T) -> (Address, Address) {
let start = Address::from_ptr(ctx);
let ty_size = std::mem::size_of::<T>();
(start, start + ty_size)
}
fn conservative_scan_range(lo: Address, hi: Address) {
// The high address is exclusive
let hi = if hi.is_aligned_to(BYTES_IN_ADDRESS) {
hi - BYTES_IN_ADDRESS
} else {
hi.align_down(BYTES_IN_ADDRESS)
};
let lo = lo.align_up(BYTES_IN_ADDRESS);
log::trace!("Scan {} (lo) {} (hi)", lo, hi);
let mut cursor = hi;
while cursor >= lo {
let addr = unsafe { cursor.load::<Address>() };
if let Some(obj) = is_potential_mmtk_object(addr) {
CONSERVATIVE_ROOTS.lock().unwrap().insert(obj);
}
cursor -= BYTES_IN_ADDRESS;
}
}
fn is_potential_mmtk_object(addr: Address) -> Option<ObjectReference> {
if crate::object_model::is_addr_in_immixspace(addr) {
// We only care about immix space. If the object is in other spaces, we won't move them, and we don't need to pin them.
memory_manager::find_object_from_internal_pointer(addr, usize::MAX)
} else {
None
}
}
Loading

0 comments on commit a6f8cac

Please sign in to comment.