diff --git a/julia/mmtk_julia.c b/julia/mmtk_julia.c index 16f914d7..88399ef7 100644 --- a/julia/mmtk_julia.c +++ b/julia/mmtk_julia.c @@ -1,5 +1,6 @@ #include "mmtk_julia.h" #include "mmtk.h" +#include "mmtkMutator.h" #include #include #include "gc.h" @@ -36,23 +37,6 @@ JL_DLLEXPORT void (jl_mmtk_harness_end)(void) mmtk_harness_end(); } -STATIC_INLINE void* alloc_default_object(jl_ptls_t ptls, size_t size, int offset) { - int64_t delta = (-offset -(int64_t)(ptls->cursor)) & 15; // aligned to 16 - uint64_t aligned_addr = (uint64_t)ptls->cursor + delta; - - if(__unlikely(aligned_addr+size > (uint64_t)ptls->limit)) { - jl_ptls_t ptls2 = jl_current_task->ptls; - ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls2->cursor; - void* res = mmtk_alloc(ptls2->mmtk_mutator_ptr, size, 16, offset, 0); - ptls2->cursor = ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor; - ptls2->limit = ptls2->mmtk_mutator_ptr->allocators.immix[0].limit; - return res; - } else { - ptls->cursor = (void*) (aligned_addr+size); - return (void*) aligned_addr; - } -} - JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, int pool_offset, int osize, void *ty) { @@ -62,16 +46,16 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, int pool_offse jl_value_t *v; if ((uintptr_t)ty != jl_buff_tag) { // v needs to be 16 byte aligned, therefore v_tagged needs to be offset accordingly to consider the size of header - jl_taggedvalue_t *v_tagged = (jl_taggedvalue_t *)mmtk_alloc(ptls->mmtk_mutator_ptr, osize, 16, sizeof(jl_taggedvalue_t), 0); // (jl_taggedvalue_t *) alloc_default_object(ptls, osize, sizeof(jl_taggedvalue_t)); + jl_taggedvalue_t *v_tagged = (jl_taggedvalue_t *)mmtk_alloc(&ptls->mmtk_mutator, osize, 16, sizeof(jl_taggedvalue_t), 0); v = jl_valueof(v_tagged); - mmtk_post_alloc(ptls->mmtk_mutator_ptr, v, osize, 0); + mmtk_post_alloc(&ptls->mmtk_mutator, v, osize, 0); } else { // allocating an extra word to store the size of buffer objects - jl_taggedvalue_t *v_tagged = (jl_taggedvalue_t *)mmtk_alloc(ptls->mmtk_mutator_ptr, osize + sizeof(jl_taggedvalue_t), 16, 0, 0); // (jl_taggedvalue_t *) alloc_default_object(ptls, osize + sizeof(jl_taggedvalue_t), 0); + jl_taggedvalue_t *v_tagged = (jl_taggedvalue_t *)mmtk_alloc(&ptls->mmtk_mutator, osize + sizeof(jl_taggedvalue_t), 16, 0, 0); jl_value_t* v_tagged_aligned = ((jl_value_t*)((char*)(v_tagged) + sizeof(jl_taggedvalue_t))); v = jl_valueof(v_tagged_aligned); mmtk_store_obj_size_c(v, osize + sizeof(jl_taggedvalue_t)); - mmtk_post_alloc(ptls->mmtk_mutator_ptr, v, osize + sizeof(jl_taggedvalue_t), 0); + mmtk_post_alloc(&ptls->mmtk_mutator, v, osize + sizeof(jl_taggedvalue_t), 0); } ptls->gc_num.allocd += osize; @@ -95,7 +79,7 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_big(jl_ptls_t ptls, size_t sz) jl_throw(jl_memory_exception); } - bigval_t *v = (bigval_t*)mmtk_alloc_large(ptls->mmtk_mutator_ptr, allocsz, JL_CACHE_BYTE_ALIGNMENT, 0, 2); + bigval_t *v = (bigval_t*)mmtk_alloc_large(&ptls->mmtk_mutator, allocsz, JL_CACHE_BYTE_ALIGNMENT, 0, 2); if (v == NULL) { assert(0 && "Allocation failed"); @@ -107,7 +91,7 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_big(jl_ptls_t ptls, size_t sz) ptls->gc_num.bigalloc++; jl_value_t *result = jl_valueof(&v->header); - mmtk_post_alloc(ptls->mmtk_mutator_ptr, result, allocsz, 2); + mmtk_post_alloc(&ptls->mmtk_mutator, result, allocsz, 2); return result; } @@ -147,7 +131,7 @@ static void mmtk_sweep_malloced_arrays(void) JL_NOTSAFEPOINT extern void mark_metadata_scanned(jl_value_t* obj); extern int8_t check_metadata_scanned(jl_value_t* obj); -int8_t object_has_been_scanned(jl_value_t* obj) +int8_t object_has_been_scanned(void* obj) { uintptr_t tag = (uintptr_t)jl_typeof(obj); jl_datatype_t *vt = (jl_datatype_t*)tag; @@ -164,7 +148,7 @@ int8_t object_has_been_scanned(jl_value_t* obj) return 0; } -void mark_object_as_scanned(jl_value_t* obj) { +void mark_object_as_scanned(void* obj) { if (jl_object_in_image((jl_value_t *)obj)) { jl_taggedvalue_t *o = jl_astaggedvalue(obj); o->bits.gc = GC_MARKED; @@ -185,8 +169,9 @@ void mmtk_exit_from_safepoint(int8_t old_state) { // it will block until GC is done // that thread simply exits from block_for_gc without executing finalizers // when executing finalizers do not let another thread do GC (set a variable such that while that variable is true, no GC can be done) -int8_t set_gc_initial_state(jl_ptls_t ptls) +int8_t set_gc_initial_state(void* ptls_raw) { + jl_ptls_t ptls = (jl_ptls_t) ptls_raw; int8_t old_state = jl_atomic_load_relaxed(&((jl_ptls_t)ptls)->gc_state); jl_atomic_store_release(&((jl_ptls_t)ptls)->gc_state, JL_GC_STATE_WAITING); if (!jl_safepoint_start_gc()) { @@ -231,8 +216,9 @@ void wait_for_the_world(void) } } -size_t get_lo_size(jl_value_t* obj) +size_t get_lo_size(void* obj_raw) { + jl_value_t* obj = (jl_value_t*) obj_raw; jl_taggedvalue_t *v = jl_astaggedvalue(obj); // bigval_header: but we cannot access the function here. So use container_of instead. bigval_t* hdr = container_of(v, bigval_t, header); @@ -246,18 +232,12 @@ void set_jl_last_err(int e) int get_jl_last_err(void) { - gc_n_threads = jl_atomic_load_acquire(&jl_n_threads); - gc_all_tls_states = jl_atomic_load_relaxed(&jl_all_tls_states); - for (int t_i = 0; t_i < gc_n_threads; t_i++) { - jl_ptls_t ptls = gc_all_tls_states[t_i]; - ptls->cursor = 0; - ptls->limit = 0; - } return errno; } -void* get_obj_start_ref(jl_value_t* obj) +void* get_obj_start_ref(void* obj_raw) { + jl_value_t* obj = (jl_value_t*) obj_raw; uintptr_t tag = (uintptr_t)jl_typeof(obj); jl_datatype_t *vt = (jl_datatype_t*)tag; void* obj_start_ref; @@ -271,8 +251,9 @@ void* get_obj_start_ref(jl_value_t* obj) return obj_start_ref; } -size_t get_so_size(jl_value_t* obj) +size_t get_so_size(void* obj_raw) { + jl_value_t* obj = (jl_value_t*) obj_raw; uintptr_t tag = (uintptr_t)jl_typeof(obj); jl_datatype_t *vt = (jl_datatype_t*)tag; @@ -392,8 +373,10 @@ size_t get_so_size(jl_value_t* obj) return 0; } -void run_finalizer_function(jl_value_t *o, jl_value_t *ff, bool is_ptr) +void run_finalizer_function(void *o_raw, void *ff_raw, bool is_ptr) { + jl_value_t *o = (jl_value_t*) o_raw; + jl_value_t *ff = (jl_value_t*) ff_raw; if (is_ptr) { run_finalizer(jl_current_task, (jl_value_t *)(((uintptr_t)o) | 1), (jl_value_t *)ff); } else { @@ -423,7 +406,7 @@ void mmtk_jl_run_pending_finalizers(void* ptls) { } } -void mmtk_jl_run_finalizers(jl_ptls_t ptls) { +void mmtk_jl_run_finalizers(void* ptls) { // Only disable finalizers on current thread // Doing this on all threads is racy (it's impossible to check // or wait for finalizers on other threads without dead lock). @@ -663,7 +646,7 @@ static void jl_gc_queue_remset_mmtk(jl_ptls_t ptls2) } } -void calculate_roots(jl_ptls_t ptls) +void calculate_roots(void* ptls_raw) { for (int t_i = 0; t_i < gc_n_threads; t_i++) gc_premark(gc_all_tls_states[t_i]); @@ -682,8 +665,8 @@ void calculate_roots(jl_ptls_t ptls) queue_roots(); } -JL_DLLEXPORT void scan_julia_exc_obj(jl_task_t* obj, closure_pointer closure, ProcessEdgeFn process_edge) { - jl_task_t *ta = (jl_task_t*)obj; +JL_DLLEXPORT void scan_julia_exc_obj(void* obj_raw, closure_pointer closure, ProcessEdgeFn process_edge) { + jl_task_t *ta = (jl_task_t*)obj_raw; if (ta->excstack) { // inlining label `excstack` from mark_loop // if it is not managed by MMTk, nothing needs to be done because the object does not need to be scanned @@ -745,8 +728,9 @@ const bool PRINT_OBJ_TYPE = false; * directly (not an edge), specifying whether to scan the object or not; and only scan the object * (necessary for boot image / non-MMTk objects) **/ -JL_DLLEXPORT void scan_julia_obj(jl_value_t* obj, closure_pointer closure, ProcessEdgeFn process_edge, ProcessOffsetEdgeFn process_offset_edge) +JL_DLLEXPORT void scan_julia_obj(void* obj_raw, closure_pointer closure, ProcessEdgeFn process_edge, ProcessOffsetEdgeFn process_offset_edge) { + jl_value_t* obj = (jl_value_t*) obj_raw; uintptr_t tag = (uintptr_t)jl_typeof(obj); jl_datatype_t *vt = (jl_datatype_t*)tag; // type of obj @@ -962,6 +946,10 @@ void update_gc_time(uint64_t inc) { gc_num.total_time += inc; } +uintptr_t get_abi_structs_checksum_c(void) { + return sizeof(MMTkMutatorContext); +} + Julia_Upcalls mmtk_upcalls = (Julia_Upcalls) { .scan_julia_obj = scan_julia_obj, .scan_julia_exc_obj = scan_julia_exc_obj, @@ -986,4 +974,5 @@ Julia_Upcalls mmtk_upcalls = (Julia_Upcalls) { .exit_from_safepoint = mmtk_exit_from_safepoint, .jl_hrtime = jl_hrtime, .update_gc_time = update_gc_time, + .get_abi_structs_checksum_c = get_abi_structs_checksum_c, }; diff --git a/julia/mmtk_julia.h b/julia/mmtk_julia.h index dc4dcd97..f946a8e1 100644 --- a/julia/mmtk_julia.h +++ b/julia/mmtk_julia.h @@ -3,32 +3,16 @@ extern Julia_Upcalls mmtk_upcalls; -void calculate_roots(jl_ptls_t ptls); - -void run_finalizer_function(jl_value_t *o, jl_value_t *ff, bool is_ptr); - int get_jl_last_err(void); void set_jl_last_err(int e); -size_t get_lo_size(jl_value_t* obj); - -int8_t set_gc_initial_state(jl_ptls_t ptls); - void set_gc_final_state(int8_t old_state); int set_gc_running_state(jl_ptls_t ptls); void set_gc_old_state(int8_t old_state); -void mark_object_as_scanned(jl_value_t* obj); - -int8_t object_has_been_scanned(jl_value_t* obj); - void mmtk_jl_gc_run_all_finalizers(void); -void mmtk_jl_run_finalizers(jl_ptls_t tls); - void mmtk_jl_run_pending_finalizers(void* tls); - -JL_DLLEXPORT void scan_julia_obj(jl_value_t* obj, closure_pointer closure, ProcessEdgeFn process_edge, ProcessOffsetEdgeFn process_offset_edge); diff --git a/mmtk/Cargo.toml b/mmtk/Cargo.toml index 8dee5b7e..af1ec561 100644 --- a/mmtk/Cargo.toml +++ b/mmtk/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" [package.metadata.julia] # Our CI matches the following line and extract mmtk/julia. If this line is updated, please check ci yaml files and make sure it works. julia_repo = "https://github.com/mmtk/julia.git" -julia_version = "0eeb64b0191b08b0ce7b59a66dea6139db60dbd2" +julia_version = "98a66ba3c0925ea21bfe051a191210eeae7df0f2" [lib] crate-type = ["staticlib", "rlib", "dylib"] diff --git a/mmtk/api/mmtk.h b/mmtk/api/mmtk.h index 8105f49d..263d5f70 100644 --- a/mmtk/api/mmtk.h +++ b/mmtk/api/mmtk.h @@ -4,7 +4,6 @@ #include #include #include -#include "gc.h" #ifdef __cplusplus extern "C" { @@ -24,7 +23,7 @@ typedef void (*ProcessOffsetEdgeFn)(closure_pointer closure, void* slot, int off * Allocation */ extern MMTk_Mutator mmtk_bind_mutator(void *tls, int tid); -extern void mmtk_add_mutator_ref(void* mutator_ref); +extern void mmtk_post_bind_mutator(MMTk_Mutator mutator, MMTk_Mutator original_mutator); extern void mmtk_destroy_mutator(MMTk_Mutator mutator); extern void* mmtk_alloc(MMTk_Mutator mutator, size_t size, @@ -78,29 +77,30 @@ extern const void* MMTK_SIDE_LOG_BIT_BASE_ADDRESS; // * int is 4 bytes // * size_t is 8 bytes typedef struct { - void (* scan_julia_obj) (jl_value_t* obj, closure_pointer closure, ProcessEdgeFn process_edge, ProcessOffsetEdgeFn process_offset_edge); - void (* scan_julia_exc_obj) (jl_task_t* obj, closure_pointer closure, ProcessEdgeFn process_edge); + void (* scan_julia_obj) (void* obj, closure_pointer closure, ProcessEdgeFn process_edge, ProcessOffsetEdgeFn process_offset_edge); + void (* scan_julia_exc_obj) (void* obj, closure_pointer closure, ProcessEdgeFn process_edge); void* (* get_stackbase) (int16_t tid); - void (* calculate_roots) (jl_ptls_t tls); - void (* run_finalizer_function) (jl_value_t* obj, jl_value_t* function, bool is_ptr); + void (* calculate_roots) (void* tls); + void (* run_finalizer_function) (void* obj, void* function, bool is_ptr); int (* get_jl_last_err) (void); void (* set_jl_last_err) (int e); - size_t (* get_lo_size) (jl_value_t* obj); - size_t (* get_so_size) (jl_value_t* obj); - void* (* get_obj_start_ref) (jl_value_t* obj); + size_t (* get_lo_size) (void* obj); + size_t (* get_so_size) (void* obj); + void* (* get_obj_start_ref) (void* obj); void (* wait_for_the_world) (void); - int8_t (* set_gc_initial_state) (jl_ptls_t tls); + int8_t (* set_gc_initial_state) (void* tls); void (* set_gc_final_state) (int8_t old_state); void (* set_gc_old_state) (int8_t old_state); - void (* mmtk_jl_run_finalizers) (jl_ptls_t tls); + void (* mmtk_jl_run_finalizers) (void* tls); void (* jl_throw_out_of_memory_error) (void); - void (* mark_object_as_scanned) (jl_value_t* obj); - int8_t (* object_has_been_scanned) (jl_value_t* obj); + void (* mark_object_as_scanned) (void* obj); + int8_t (* object_has_been_scanned) (void* obj); void (* sweep_malloced_array) (void); void (* wait_in_a_safepoint) (void); void (* exit_from_safepoint) (int8_t old_state); uint64_t (* jl_hrtime) (void); void (* update_gc_time) (uint64_t); + uintptr_t (* get_abi_structs_checksum_c) (void); } Julia_Upcalls; /** diff --git a/mmtk/api/mmtkMutator.h b/mmtk/api/mmtkMutator.h index 421cb07f..ba1d5ab0 100644 --- a/mmtk/api/mmtkMutator.h +++ b/mmtk/api/mmtkMutator.h @@ -15,11 +15,12 @@ typedef struct { } RustDynPtr; // These constants should match the constants defind in mmtk::util::alloc::allocators -// const int MAX_BUMP_ALLOCATORS = 6; -// const int MAX_LARGE_OBJECT_ALLOCATORS = 2; -// const int MAX_MALLOC_ALLOCATORS = 1; -// const int MAX_IMMIX_ALLOCATORS = 1; -// const int MAX_MARK_COMPACT_ALLOCATORS = 1; +#define MAX_BUMP_ALLOCATORS 6 +#define MAX_LARGE_OBJECT_ALLOCATORS 2 +#define MAX_MALLOC_ALLOCATORS 1 +#define MAX_IMMIX_ALLOCATORS 1 +#define MAX_FREE_LIST_ALLOCATORS 2 +#define MAX_MARK_COMPACT_ALLOCATORS 1 // The following types should have the same layout as the types with the same name in MMTk core (Rust) @@ -51,25 +52,46 @@ typedef struct { uint8_t _align[7]; uint8_t line_opt_tag; uintptr_t line_opt; - uint8_t alloc_slow_for_stress; } ImmixAllocator; +typedef struct { + void* Address; +} FLBlock; + +typedef struct { + FLBlock first; + FLBlock last; + size_t size; + char lock; +} FLBlockList; + +typedef struct { + void* tls; + void* space; + RustDynPtr plan; + FLBlockList* available_blocks; + FLBlockList* available_blocks_stress; + FLBlockList* unswept_blocks; + FLBlockList* consumed_blocks; +} FreeListAllocator; + typedef struct { void* tls; void* space; RustDynPtr plan; -} MMTkMallocAllocator; +} MMTkMallocAllocator; // Prefix with MMTk to avoid name clash typedef struct { BumpAllocator bump_allocator; } MarkCompactAllocator; typedef struct { - BumpAllocator bump_pointer[6]; - LargeObjectAllocator large_object[2]; - MMTkMallocAllocator malloc[1]; - ImmixAllocator immix[1]; - MarkCompactAllocator markcompact[1]; + BumpAllocator bump_pointer[MAX_BUMP_ALLOCATORS]; + LargeObjectAllocator large_object[MAX_LARGE_OBJECT_ALLOCATORS]; + MMTkMallocAllocator malloc[MAX_MALLOC_ALLOCATORS]; + ImmixAllocator immix[MAX_IMMIX_ALLOCATORS]; + FreeListAllocator free_list[MAX_FREE_LIST_ALLOCATORS]; + MarkCompactAllocator markcompact[MAX_MARK_COMPACT_ALLOCATORS]; } Allocators; typedef struct { @@ -81,9 +103,10 @@ typedef struct { typedef struct { Allocators allocators; - void* barrier; + RustDynPtr barrier; void* mutator_tls; RustDynPtr plan; MutatorConfig config; } MMTkMutatorContext; + #endif // MMTK_MUTATOR_HPP \ No newline at end of file diff --git a/mmtk/runtime/runtime_gc_x64.c b/mmtk/runtime/runtime_gc_x64.c index 42402795..34245dd4 100644 --- a/mmtk/runtime/runtime_gc_x64.c +++ b/mmtk/runtime/runtime_gc_x64.c @@ -5,14 +5,6 @@ long JULIA_HEADER_SIZE = 0; -void* get_mutator_ref(void* mutator) { - return mutator; -} - -void* get_mutator_from_ref(void* mutator) { - return mutator; -} - extern void mmtk_start_spawned_worker_thread(void*, void*); extern void mmtk_start_spawned_controller_thread(void*, void*); diff --git a/mmtk/src/active_plan.rs b/mmtk/src/active_plan.rs index ddba9730..d1caa76c 100644 --- a/mmtk/src/active_plan.rs +++ b/mmtk/src/active_plan.rs @@ -1,5 +1,5 @@ use crate::JuliaVM; -use crate::{get_mutator_from_ref, MUTATORS, MUTATOR_TLS, SINGLETON}; +use crate::{MUTATORS, SINGLETON}; use mmtk::util::opaque_pointer::*; use mmtk::util::Address; use mmtk::vm::ActivePlan; @@ -7,17 +7,22 @@ use mmtk::Mutator; use mmtk::Plan; use mmtk::{plan::ObjectQueue, scheduler::GCWorker, util::ObjectReference}; +use std::collections::HashMap; use std::sync::RwLockReadGuard; pub struct JuliaMutatorIterator<'a> { - guard: RwLockReadGuard<'a, Vec>, + // We do not use this field, but this lock guard makes sure that no concurrent access to MUTATORS. + _guard: RwLockReadGuard<'a, HashMap>, + vec: Vec
, cursor: usize, } impl<'a> JuliaMutatorIterator<'a> { - fn new(guard: RwLockReadGuard<'a, Vec>) -> Self { + fn new(guard: RwLockReadGuard<'a, HashMap>) -> Self { + let vec = guard.keys().map(|addr| *addr).collect(); Self { - guard: guard, + _guard: guard, + vec, cursor: 0, } } @@ -27,20 +32,12 @@ impl<'a> Iterator for JuliaMutatorIterator<'a> { type Item = &'a mut Mutator; fn next(&mut self) -> Option { - let ref mutators = self.guard; - let mutator_idx = self.cursor; self.cursor += 1; - let mutator = mutators.get(mutator_idx); - - match mutator { - Some(m) => { - let mutator = unsafe { get_mutator_from_ref(*m) }; - Some(unsafe { &mut *mutator }) - } - None => None, - } + self.vec + .get(mutator_idx) + .map(|addr| unsafe { &mut *(addr.to_mut_ptr::>()) }) } } @@ -57,12 +54,7 @@ impl ActivePlan for VMActivePlan { fn is_mutator(tls: VMThread) -> bool { // FIXME have a tls field to check whether it is a mutator tls - let tls_str = format!("{:?}", tls); - let is_mutator = MUTATOR_TLS.read().unwrap().contains(&tls_str); - if !is_mutator { - println!("Is the tls {:?} a mutator? {}", tls_str, is_mutator); - } - is_mutator + MUTATORS.read().unwrap().keys().find(|mutator_addr| unsafe { &*mutator_addr.to_mut_ptr::>() }.mutator_tls.0 == tls).is_some() } fn mutator(_tls: VMMutatorThread) -> &'static mut Mutator { diff --git a/mmtk/src/api.rs b/mmtk/src/api.rs index 61d6b226..b006f284 100644 --- a/mmtk/src/api.rs +++ b/mmtk/src/api.rs @@ -10,8 +10,8 @@ use crate::JULIA_HEADER_SIZE; use crate::SINGLETON; use crate::UPCALLS; use crate::{ - get_mutator_ref, set_julia_obj_header_size, ARE_MUTATORS_BLOCKED, BUILDER, DISABLED_GC, - FINALIZERS_RUNNING, MUTATORS, MUTATOR_TLS, USER_TRIGGERED_GC, + set_julia_obj_header_size, BUILDER, DISABLED_GC, FINALIZERS_RUNNING, MUTATORS, + USER_TRIGGERED_GC, }; use crate::{ROOT_EDGES, ROOT_NODES}; @@ -24,10 +24,8 @@ use mmtk::util::opaque_pointer::*; use mmtk::util::{Address, ObjectReference, OpaquePointer}; use mmtk::AllocationSemantics; use mmtk::Mutator; -use std::collections::HashMap; use std::ffi::CStr; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::RwLockWriteGuard; #[no_mangle] pub extern "C" fn mmtk_gc_init( @@ -42,6 +40,12 @@ pub extern "C" fn mmtk_gc_init( set_julia_obj_header_size(header_size); }; + // Assert to make sure our ABI is correct + assert_eq!( + unsafe { ((*UPCALLS).get_abi_structs_checksum_c)() }, + crate::util::get_abi_structs_checksum_rust() + ); + { let mut builder = BUILDER.lock().unwrap(); @@ -118,32 +122,40 @@ pub extern "C" fn mmtk_start_control_collector( #[no_mangle] pub extern "C" fn mmtk_bind_mutator(tls: VMMutatorThread, tid: usize) -> *mut Mutator { - let mut are_mutators_blocked: RwLockWriteGuard> = - ARE_MUTATORS_BLOCKED.write().unwrap(); - are_mutators_blocked.insert(tid, AtomicBool::new(false)); let mutator_box = memory_manager::bind_mutator(&SINGLETON, tls); let res = Box::into_raw(mutator_box); - let mutator_ref = unsafe { get_mutator_ref(res) }; - info!("Binding mutator {:?} to thread id = {}", res, tid); - - MUTATORS.write().unwrap().push(mutator_ref); - - let tls_str = format!("{:?}", tls.0); - MUTATOR_TLS.write().unwrap().insert(tls_str); res } #[no_mangle] -pub extern "C" fn mmtk_add_mutator_ref(mutator_ref: ObjectReference) { - MUTATORS.write().unwrap().push(mutator_ref); +pub extern "C" fn mmtk_post_bind_mutator( + mutator: *mut Mutator, + original_box_mutator: *mut Mutator, +) { + // We have to store the original boxed mutator. Otherwise, we may have dangling pointers in mutator. + MUTATORS.write().unwrap().insert( + Address::from_mut_ptr(mutator), + Address::from_mut_ptr(original_box_mutator), + ); } #[no_mangle] pub extern "C" fn mmtk_destroy_mutator(mutator: *mut Mutator) { - memory_manager::destroy_mutator(unsafe { &mut *mutator }) + // destroy the mutator with MMTk. + memory_manager::destroy_mutator(unsafe { &mut *mutator }); + + let mut mutators = MUTATORS.write().unwrap(); + let key = Address::from_mut_ptr(mutator); + + // Clear the original boxed mutator + let orig_mutator = mutators.get(&key).unwrap(); + let _ = unsafe { Box::from_raw(orig_mutator.to_mut_ptr::>()) }; + + // Remove from our hashmap + mutators.remove(&key); } #[no_mangle] @@ -394,6 +406,7 @@ pub extern "C" fn mmtk_malloc_aligned(size: usize, align: usize) -> Address { let extra = (align - 1) + ptr_size + size_size; let mem = memory_manager::counted_malloc(&SINGLETON, size + extra); + let result = (mem + extra) & !(align - 1); let result = unsafe { Address::from_usize(result) }; diff --git a/mmtk/src/lib.rs b/mmtk/src/lib.rs index b013988d..3aef983f 100644 --- a/mmtk/src/lib.rs +++ b/mmtk/src/lib.rs @@ -15,7 +15,8 @@ use mmtk::Mutator; use mmtk::MMTK; use reference_glue::JuliaFinalizableObject; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; +use std::collections::HashSet; use std::ptr::null_mut; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Condvar, Mutex, RwLock}; @@ -91,8 +92,6 @@ pub static DISABLED_GC: AtomicBool = AtomicBool::new(false); pub static USER_TRIGGERED_GC: AtomicBool = AtomicBool::new(false); lazy_static! { - pub static ref ARE_MUTATORS_BLOCKED: RwLock> = - RwLock::new(HashMap::new()); pub static ref STW_COND: Arc<(Mutex, Condvar)> = Arc::new((Mutex::new(0), Condvar::new())); pub static ref STOP_MUTATORS: Arc<(Mutex, Condvar)> = @@ -101,8 +100,12 @@ lazy_static! { pub static ref ROOT_EDGES: Mutex> = Mutex::new(HashSet::new()); pub static ref FINALIZER_ROOTS: RwLock> = RwLock::new(HashSet::new()); - pub static ref MUTATOR_TLS: RwLock> = RwLock::new(HashSet::new()); - pub static ref MUTATORS: RwLock> = RwLock::new(vec![]); + + // We create a boxed mutator with MMTk core, and we mem copy its content to jl_tls_state_t (shallow copy). + // This map stores the pair of the mutator address in jl_tls_state_t and the original boxed mutator. + // As we only do a shallow copy, we should not free the original boxed mutator, until the thread is getting destroyed. + // Otherwise, we will have dangling pointers. + pub static ref MUTATORS: RwLock> = RwLock::new(HashMap::new()); } #[link(name = "runtime_gc_c")] @@ -157,6 +160,7 @@ pub struct Julia_Upcalls { pub exit_from_safepoint: extern "C" fn(old_state: i8), pub jl_hrtime: extern "C" fn() -> u64, pub update_gc_time: extern "C" fn(u64), + pub get_abi_structs_checksum_c: extern "C" fn() -> usize, } pub static mut UPCALLS: *const Julia_Upcalls = null_mut(); diff --git a/mmtk/src/util.rs b/mmtk/src/util.rs index 4ea2ebdf..a7cb5da0 100644 --- a/mmtk/src/util.rs +++ b/mmtk/src/util.rs @@ -34,3 +34,8 @@ impl RootLabel { } } } + +pub(crate) fn get_abi_structs_checksum_rust() -> usize { + use std::mem; + return mem::size_of::>(); +}