Skip to content

Commit

Permalink
Discontiguous mark compact space support (#939)
Browse files Browse the repository at this point in the history
This PR adds discontiguous heap support for mark compact GC, and
rewrites the forwarding pointer calculation and compaction code to be
compactable to both contiguous and discontiguous spaces.

Fixes #930
  • Loading branch information
wenyuzhao authored Sep 5, 2023
1 parent 253e882 commit 40777ed
Show file tree
Hide file tree
Showing 5 changed files with 153 additions and 87 deletions.
2 changes: 2 additions & 0 deletions src/plan/markcompact/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints {
gc_header_words: 1,
num_specialized_scans: 2,
needs_forward_after_liveness: true,
max_non_los_default_alloc_bytes:
crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN,
..PlanConstraints::default()
};

Expand Down
4 changes: 2 additions & 2 deletions src/policy/immortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,15 +166,15 @@ impl<VM: VMBinding> ImmortalSpace<VM> {
.on_block_reset::<VM>(self.common.start, self.common.extent)
} else {
// Otherwise, we reset the mark bit for the allocated regions.
self.pr.for_allocated_regions(|addr, size| {
for (addr, size) in self.pr.iterate_allocated_regions() {
debug!(
"{:?}: reset mark bit from {} to {}",
self.name(),
addr,
addr + size
);
self.mark_state.on_block_reset::<VM>(addr, size);
})
}
}
}

Expand Down
136 changes: 77 additions & 59 deletions src/policy/markcompactspace.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::ops::Range;

use super::sft::SFT;
use super::space::{CommonSpace, Space};
use crate::plan::VectorObjectQueue;
Expand Down Expand Up @@ -314,73 +316,89 @@ impl<VM: VMBinding> MarkCompactSpace<VM> {
mark_bit != 0
}

pub fn to_be_compacted(object: ObjectReference) -> bool {
Self::is_marked(object)
fn to_be_compacted(object: &ObjectReference) -> bool {
Self::is_marked(*object)
}

pub fn calculate_forwarding_pointer(&self) {
let start = self.common.start;
let end = self.pr.cursor();
let mut to = start;

let linear_scan =
crate::util::linear_scan::ObjectIterator::<VM, MarkCompactObjectSize<VM>, true>::new(
start, end,
);
for obj in linear_scan.filter(|obj| Self::to_be_compacted(*obj)) {
let copied_size =
VM::VMObjectModel::get_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES;
let align = VM::VMObjectModel::get_align_when_copied(obj);
let offset = VM::VMObjectModel::get_align_offset_when_copied(obj);
to = align_allocation_no_fill::<VM>(to, align, offset);
let new_obj = VM::VMObjectModel::get_reference_when_copied_to(
obj,
to + Self::HEADER_RESERVED_IN_BYTES,
);

Self::store_header_forwarding_pointer(obj, new_obj);

trace!(
"Calculate forward: {} (size when copied = {}) ~> {} (size = {})",
obj,
VM::VMObjectModel::get_size_when_copied(obj),
to,
copied_size
);
/// Linear scan all the live objects in the given memory region
fn linear_scan_objects(&self, range: Range<Address>) -> impl Iterator<Item = ObjectReference> {
crate::util::linear_scan::ObjectIterator::<VM, MarkCompactObjectSize<VM>, true>::new(
range.start,
range.end,
)
}

to += copied_size;
pub fn calculate_forwarding_pointer(&self) {
let mut to_iter = self.pr.iterate_allocated_regions();
let Some((mut to_cursor, mut to_size)) = to_iter.next() else {
return;
};
let mut to_end = to_cursor + to_size;
for (from_start, size) in self.pr.iterate_allocated_regions() {
let from_end = from_start + size;
// linear scan the contiguous region
for obj in self
.linear_scan_objects(from_start..from_end)
.filter(Self::to_be_compacted)
{
let copied_size =
VM::VMObjectModel::get_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES;
let align = VM::VMObjectModel::get_align_when_copied(obj);
let offset = VM::VMObjectModel::get_align_offset_when_copied(obj);
// move to_cursor to aliged start address
to_cursor = align_allocation_no_fill::<VM>(to_cursor, align, offset);
// move to next to-block if there is no sufficient memory in current region
if to_cursor + copied_size > to_end {
(to_cursor, to_size) = to_iter.next().unwrap();
to_end = to_cursor + to_size;
to_cursor = align_allocation_no_fill::<VM>(to_cursor, align, offset);
debug_assert!(to_cursor + copied_size <= to_end);
}
// Get copied object
let new_obj = VM::VMObjectModel::get_reference_when_copied_to(
obj,
to_cursor + Self::HEADER_RESERVED_IN_BYTES,
);
// update forwarding pointer
Self::store_header_forwarding_pointer(obj, new_obj);
trace!(
"Calculate forward: {} (size when copied = {}) ~> {} (size = {})",
obj,
VM::VMObjectModel::get_size_when_copied(obj),
to_cursor,
copied_size
);
// bump to_cursor
to_cursor += copied_size;
}
}
debug!("Calculate forward end: to = {}", to);
}

pub fn compact(&self) {
let start = self.common.start;
let end = self.pr.cursor();
let mut to = end;

let linear_scan =
crate::util::linear_scan::ObjectIterator::<VM, MarkCompactObjectSize<VM>, true>::new(
start, end,
);
for obj in linear_scan {
// clear the VO bit
vo_bit::unset_vo_bit::<VM>(obj);

let forwarding_pointer = Self::get_header_forwarding_pointer(obj);

trace!("Compact {} to {}", obj, forwarding_pointer);
if !forwarding_pointer.is_null() {
let mut to = Address::ZERO;
for (from_start, size) in self.pr.iterate_allocated_regions() {
let from_end = from_start + size;
for obj in self.linear_scan_objects(from_start..from_end) {
let copied_size = VM::VMObjectModel::get_size_when_copied(obj);
let new_object = forwarding_pointer;
Self::clear_header_forwarding_pointer(new_object);

// copy object
trace!(" copy from {} to {}", obj, new_object);
let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO);
// update VO bit,
vo_bit::set_vo_bit::<VM>(new_object);
to = new_object.to_object_start::<VM>() + copied_size;
debug_assert_eq!(end_of_new_object, to);
// clear the VO bit
vo_bit::unset_vo_bit::<VM>(obj);

let forwarding_pointer = Self::get_header_forwarding_pointer(obj);

trace!("Compact {} to {}", obj, forwarding_pointer);
if !forwarding_pointer.is_null() {
let new_object = forwarding_pointer;
Self::clear_header_forwarding_pointer(new_object);

// copy object
trace!(" copy from {} to {}", obj, new_object);
let end_of_new_object =
VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO);
// update VO bit,
vo_bit::set_vo_bit::<VM>(new_object);
to = new_object.to_object_start::<VM>() + copied_size;
debug_assert_eq!(end_of_new_object, to);
}
}
}

Expand Down
2 changes: 0 additions & 2 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,6 @@ pub struct CommonSpace<VM: VMBinding> {

pub start: Address,
pub extent: usize,
pub head_discontiguous_region: Address,

pub vm_map: &'static dyn VMMap,
pub mmapper: &'static dyn Mmapper,
Expand Down Expand Up @@ -495,7 +494,6 @@ impl<VM: VMBinding> CommonSpace<VM> {
zeroed: args.plan_args.zeroed,
start: unsafe { Address::zero() },
extent: 0,
head_discontiguous_region: unsafe { Address::zero() },
vm_map: args.plan_args.vm_map,
mmapper: args.plan_args.mmapper,
needs_log_bit: args.plan_args.constraints.needs_log_bit,
Expand Down
96 changes: 72 additions & 24 deletions src/util/heap/monotonepageresource.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
use super::layout::vm_layout::{BYTES_IN_CHUNK, PAGES_IN_CHUNK};
use crate::policy::space::required_chunks;
use crate::util::address::Address;
use crate::util::constants::BYTES_IN_PAGE;
use crate::util::conversions::*;
use std::ops::Range;
use std::sync::{Mutex, MutexGuard};

use crate::util::alloc::embedded_meta_data::*;
Expand Down Expand Up @@ -253,22 +255,51 @@ impl<VM: VMBinding> MonotonePageResource<VM> {
}
}*/

pub fn reset_cursor(&self, _start: Address) {
pub fn reset_cursor(&self, top: Address) {
if self.common.contiguous {
let mut guard = self.sync.lock().unwrap();
let cursor = _start.align_up(crate::util::constants::BYTES_IN_PAGE);
let chunk = chunk_align_down(_start);
let start = match guard.conditional {
MonotonePageResourceConditional::Contiguous { start: _start, .. } => _start,
let cursor = top.align_up(crate::util::constants::BYTES_IN_PAGE);
let chunk = chunk_align_down(top);
let space_start = match guard.conditional {
MonotonePageResourceConditional::Contiguous { start, .. } => start,
_ => unreachable!(),
};
let pages = bytes_to_pages(_start - start);
let pages = bytes_to_pages(top - space_start);
self.common.accounting.reset();
self.common.accounting.reserve_and_commit(pages);
guard.current_chunk = chunk;
guard.cursor = cursor;
} else {
unimplemented!();
let mut chunk_start = self.common.get_head_discontiguous_region();
let mut release_regions = false;
let mut live_size = 0;
while !chunk_start.is_zero() {
let chunk_end = chunk_start
+ (self.common.vm_map.get_contiguous_region_chunks(chunk_start)
<< LOG_BYTES_IN_CHUNK);
let next_chunk_start = self.common.vm_map.get_next_contiguous_region(chunk_start);
if top >= chunk_start && top < chunk_end {
// This is the last live chunk
debug_assert!(!release_regions);
let mut guard = self.sync.lock().unwrap();
guard.current_chunk = chunk_start;
guard.sentinel = chunk_end;
guard.cursor = top.align_up(BYTES_IN_PAGE);
live_size += top - chunk_start;
// Release all the remaining regions
release_regions = true;
} else if release_regions {
// release this region
self.common.release_discontiguous_chunks(chunk_start);
} else {
// keep this live region
live_size += chunk_end - chunk_start;
}
chunk_start = next_chunk_start;
}
let pages = bytes_to_pages(live_size);
self.common.accounting.reset();
self.common.accounting.reserve_and_commit(pages);
}
}

Expand All @@ -294,28 +325,45 @@ impl<VM: VMBinding> MonotonePageResource<VM> {
}
}

/// Iterate through allocated regions, and invoke the given function for each region.
pub fn for_allocated_regions<F>(&self, mut f: F)
where
F: FnMut(Address, usize),
{
/// Iterate over all contiguous memory regions in this space.
/// For contiguous space, this iterator should yield only once, and returning a contiguous memory region covering the whole space.
pub fn iterate_allocated_regions(&self) -> impl Iterator<Item = (Address, usize)> + '_ {
struct Iter<'a, VM: VMBinding> {
pr: &'a MonotonePageResource<VM>,
contiguous_space: Option<Range<Address>>,
discontiguous_start: Address,
}
impl<VM: VMBinding> Iterator for Iter<'_, VM> {
type Item = (Address, usize);
fn next(&mut self) -> Option<Self::Item> {
if let Some(range) = self.contiguous_space.take() {
Some((range.start, range.end - range.start))
} else if self.discontiguous_start.is_zero() {
None
} else {
let start = self.discontiguous_start;
self.discontiguous_start = self.pr.vm_map().get_next_contiguous_region(start);
let size = self.pr.vm_map().get_contiguous_region_size(start);
Some((start, size))
}
}
}
let sync = self.sync.lock().unwrap();
match sync.conditional {
MonotonePageResourceConditional::Contiguous { start, .. } => {
let end = sync.cursor.align_up(BYTES_IN_CHUNK);
f(start, end - start);
let cursor = sync.cursor.align_up(BYTES_IN_CHUNK);
Iter {
pr: self,
contiguous_space: Some(start..cursor),
discontiguous_start: Address::ZERO,
}
}
MonotonePageResourceConditional::Discontiguous => {
if !sync.cursor.is_zero() {
f(sync.current_chunk, sync.cursor - sync.current_chunk);

let mut chunk = self.vm_map().get_next_contiguous_region(sync.current_chunk);
while !chunk.is_zero() {
let size = self.vm_map().get_contiguous_region_size(chunk);
f(chunk, size);

chunk = self.vm_map().get_next_contiguous_region(chunk);
}
let discontiguous_start = self.common.get_head_discontiguous_region();
Iter {
pr: self,
contiguous_space: None,
discontiguous_start,
}
}
}
Expand Down

0 comments on commit 40777ed

Please sign in to comment.