Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid calling mmtk_gc_poll frequently #20

Merged
merged 1 commit into from
Jul 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/julia_threads.h
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,7 @@ typedef struct _jl_tls_states_t {

#ifdef MMTK_GC
MMTkMutatorContext mmtk_mutator;
size_t malloc_sz_since_last_poll;
#endif

// some hidden state (usually just because we don't have the type's size declaration)
Expand Down
28 changes: 24 additions & 4 deletions src/mmtk-gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,24 @@ JL_DLLEXPORT void jl_gc_set_cb_notify_external_free(jl_gc_cb_notify_external_fre

inline void maybe_collect(jl_ptls_t ptls)
{
mmtk_gc_poll(ptls);
// Just do a safe point for general maybe_collect
jl_gc_safepoint_(ptls);
}

// This is only used for malloc. We need to know if we need to do GC. However, keeping checking with MMTk (mmtk_gc_poll),
// is expensive. So we only check for every few allocations.
static inline void malloc_maybe_collect(jl_ptls_t ptls, size_t sz)
{
// We do not need to carefully maintain malloc_sz_since_last_poll. We just need to
// avoid using mmtk_gc_poll too frequently, and try to be precise on our heap usage
// as much as we can.
if (ptls->malloc_sz_since_last_poll > 4096) {
jl_atomic_store_relaxed(&ptls->malloc_sz_since_last_poll, 0);
mmtk_gc_poll(ptls);
} else {
jl_atomic_fetch_add_relaxed(&ptls->malloc_sz_since_last_poll, sz);
jl_gc_safepoint_(ptls);
}
}


Expand Down Expand Up @@ -266,6 +283,9 @@ void jl_init_thread_heap(jl_ptls_t ptls)
memset(&ptls->gc_num, 0, sizeof(ptls->gc_num));
jl_atomic_store_relaxed(&ptls->gc_num.allocd, -(int64_t)gc_num.interval);

// Clear the malloc sz count
jl_atomic_store_relaxed(&ptls->malloc_sz_since_last_poll, 0);

// Create mutator
MMTk_Mutator mmtk_mutator = mmtk_bind_mutator((void *)ptls, ptls->tid);
// Copy the mutator to the thread local storage
Expand Down Expand Up @@ -363,7 +383,7 @@ JL_DLLEXPORT void *jl_gc_counted_malloc(size_t sz)
jl_task_t *ct = jl_current_task;
if (pgcstack && ct->world_age) {
jl_ptls_t ptls = ct->ptls;
maybe_collect(ptls);
malloc_maybe_collect(ptls, sz);
jl_atomic_store_relaxed(&ptls->gc_num.allocd,
jl_atomic_load_relaxed(&ptls->gc_num.allocd) + sz);
jl_atomic_store_relaxed(&ptls->gc_num.malloc,
Expand All @@ -379,7 +399,7 @@ JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz)
jl_task_t *ct = jl_current_task;
if (pgcstack && ct->world_age) {
jl_ptls_t ptls = ct->ptls;
maybe_collect(ptls);
malloc_maybe_collect(ptls, sz);
jl_atomic_store_relaxed(&ptls->gc_num.allocd,
jl_atomic_load_relaxed(&ptls->gc_num.allocd) + nm*sz);
jl_atomic_store_relaxed(&ptls->gc_num.malloc,
Expand Down Expand Up @@ -411,7 +431,7 @@ JL_DLLEXPORT void *jl_gc_counted_realloc_with_old_size(void *p, size_t old, size
jl_task_t *ct = jl_current_task;
if (pgcstack && ct->world_age) {
jl_ptls_t ptls = ct->ptls;
maybe_collect(ptls);
malloc_maybe_collect(ptls, sz);
if (sz < old)
jl_atomic_store_relaxed(&ptls->gc_num.freed,
jl_atomic_load_relaxed(&ptls->gc_num.freed) + (old - sz));
Expand Down