Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make perm alloc calls specific to GC implementation #9

Merged
merged 1 commit into from
May 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 0 additions & 87 deletions src/gc-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -521,94 +521,7 @@ JL_DLLEXPORT void *jl_gc_managed_realloc(void *d, size_t sz, size_t oldsz,
return gc_managed_realloc_(ptls, d, sz, oldsz, isaligned, owner, 1);
}

// Perm gen allocator
// 2M pool
#define GC_PERM_POOL_SIZE (2 * 1024 * 1024)
// 20k limit for pool allocation. At most 1% fragmentation
#define GC_PERM_POOL_LIMIT (20 * 1024)
uv_mutex_t gc_perm_lock;
static uintptr_t gc_perm_pool = 0;
static uintptr_t gc_perm_end = 0;

static void *gc_perm_alloc_large(size_t sz, int zero, unsigned align, unsigned offset) JL_NOTSAFEPOINT
{
// `align` must be power of two
assert(offset == 0 || offset < align);
const size_t malloc_align = sizeof(void*) == 8 ? 16 : 4;
if (align > 1 && (offset != 0 || align > malloc_align))
sz += align - 1;
int last_errno = errno;
#ifdef _OS_WINDOWS_
DWORD last_error = GetLastError();
#endif
void *base = zero ? calloc(1, sz) : malloc(sz);
if (base == NULL)
jl_throw(jl_memory_exception);
#ifdef _OS_WINDOWS_
SetLastError(last_error);
#endif
errno = last_errno;
jl_may_leak(base);
assert(align > 0);
unsigned diff = (offset - (uintptr_t)base) % align;
return (void*)((char*)base + diff);
}

STATIC_INLINE void *gc_try_perm_alloc_pool(size_t sz, unsigned align, unsigned offset) JL_NOTSAFEPOINT
{
uintptr_t pool = LLT_ALIGN(gc_perm_pool + offset, (uintptr_t)align) - offset;
uintptr_t end = pool + sz;
if (end > gc_perm_end)
return NULL;
gc_perm_pool = end;
return (void*)jl_assume(pool);
}

// **NOT** a safepoint
void *jl_gc_perm_alloc_nolock(size_t sz, int zero, unsigned align, unsigned offset)
{
// The caller should have acquired `gc_perm_lock`
assert(align < GC_PERM_POOL_LIMIT);
#ifndef MEMDEBUG
if (__unlikely(sz > GC_PERM_POOL_LIMIT))
#endif
return gc_perm_alloc_large(sz, zero, align, offset);
void *ptr = gc_try_perm_alloc_pool(sz, align, offset);
if (__likely(ptr))
return ptr;
int last_errno = errno;
#ifdef _OS_WINDOWS_
DWORD last_error = GetLastError();
void *pool = VirtualAlloc(NULL, GC_PERM_POOL_SIZE, MEM_COMMIT, PAGE_READWRITE);
SetLastError(last_error);
errno = last_errno;
if (__unlikely(pool == NULL))
return NULL;
#else
void *pool = mmap(0, GC_PERM_POOL_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
errno = last_errno;
if (__unlikely(pool == MAP_FAILED))
return NULL;
#endif
gc_perm_pool = (uintptr_t)pool;
gc_perm_end = gc_perm_pool + GC_PERM_POOL_SIZE;
return gc_try_perm_alloc_pool(sz, align, offset);
}

// **NOT** a safepoint
void *jl_gc_perm_alloc(size_t sz, int zero, unsigned align, unsigned offset)
{
assert(align < GC_PERM_POOL_LIMIT);
#ifndef MEMDEBUG
if (__unlikely(sz > GC_PERM_POOL_LIMIT))
#endif
return gc_perm_alloc_large(sz, zero, align, offset);
uv_mutex_lock(&gc_perm_lock);
void *p = jl_gc_perm_alloc_nolock(sz, zero, align, offset);
uv_mutex_unlock(&gc_perm_lock);
return p;
}

JL_DLLEXPORT void jl_gc_add_finalizer(jl_value_t *v, jl_function_t *f)
{
Expand Down
99 changes: 99 additions & 0 deletions src/gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,105 @@ JL_DLLEXPORT void jl_gc_set_cb_notify_external_free(jl_gc_cb_notify_external_fre
jl_gc_deregister_callback(&gc_cblist_notify_external_free, (jl_gc_cb_func_t)cb);
}

// Perm gen allocator
// 2M pool
#define GC_PERM_POOL_SIZE (2 * 1024 * 1024)
// 20k limit for pool allocation. At most 1% fragmentation
#define GC_PERM_POOL_LIMIT (20 * 1024)

static uintptr_t gc_perm_pool = 0;
static uintptr_t gc_perm_end = 0;

static void *gc_perm_alloc_large(size_t sz, int zero, unsigned align, unsigned offset) JL_NOTSAFEPOINT
{
// `align` must be power of two
assert(offset == 0 || offset < align);
const size_t malloc_align = sizeof(void*) == 8 ? 16 : 4;
if (align > 1 && (offset != 0 || align > malloc_align))
sz += align - 1;
int last_errno = errno;
#ifdef _OS_WINDOWS_
DWORD last_error = GetLastError();
#endif
void *base = zero ? calloc(1, sz) : malloc(sz);
if (base == NULL)
jl_throw(jl_memory_exception);
#ifdef _OS_WINDOWS_
SetLastError(last_error);
#endif
errno = last_errno;
jl_may_leak(base);
assert(align > 0);
unsigned diff = (offset - (uintptr_t)base) % align;
return (void*)((char*)base + diff);
}

STATIC_INLINE void *gc_try_perm_alloc_pool(size_t sz, unsigned align, unsigned offset) JL_NOTSAFEPOINT
{
uintptr_t pool = LLT_ALIGN(gc_perm_pool + offset, (uintptr_t)align) - offset;
uintptr_t end = pool + sz;
if (end > gc_perm_end)
return NULL;
gc_perm_pool = end;
return (void*)jl_assume(pool);
}

// **NOT** a safepoint
void *jl_gc_perm_alloc_nolock(size_t sz, int zero, unsigned align, unsigned offset)
{
// The caller should have acquired `gc_perm_lock`
assert(align < GC_PERM_POOL_LIMIT);
#ifndef MEMDEBUG
if (__unlikely(sz > GC_PERM_POOL_LIMIT))
#endif
return gc_perm_alloc_large(sz, zero, align, offset);
void *ptr = gc_try_perm_alloc_pool(sz, align, offset);
if (__likely(ptr))
return ptr;
int last_errno = errno;
#ifdef _OS_WINDOWS_
DWORD last_error = GetLastError();
void *pool = VirtualAlloc(NULL, GC_PERM_POOL_SIZE, MEM_COMMIT, PAGE_READWRITE);
SetLastError(last_error);
errno = last_errno;
if (__unlikely(pool == NULL))
return NULL;
#else
void *pool = mmap(0, GC_PERM_POOL_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
errno = last_errno;
if (__unlikely(pool == MAP_FAILED))
return NULL;
#endif
gc_perm_pool = (uintptr_t)pool;
gc_perm_end = gc_perm_pool + GC_PERM_POOL_SIZE;
return gc_try_perm_alloc_pool(sz, align, offset);
}

// **NOT** a safepoint
void *jl_gc_perm_alloc(size_t sz, int zero, unsigned align, unsigned offset)
{
assert(align < GC_PERM_POOL_LIMIT);
#ifndef MEMDEBUG
if (__unlikely(sz > GC_PERM_POOL_LIMIT))
#endif
return gc_perm_alloc_large(sz, zero, align, offset);
uv_mutex_lock(&gc_perm_lock);
void *p = jl_gc_perm_alloc_nolock(sz, zero, align, offset);
uv_mutex_unlock(&gc_perm_lock);
return p;
}

void jl_gc_notify_image_load(const char* img_data, size_t len)
{
// Do nothing
}

void jl_gc_notify_image_alloc(char* img_data, size_t len)
{
// Do nothing
}

// Protect all access to `finalizer_list_marked` and `to_finalize`.
// For accessing `ptls->finalizers`, the lock is needed if a thread
// is going to realloc the buffer (of its own list) or accessing the
Expand Down
8 changes: 8 additions & 0 deletions src/julia_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,7 @@ jl_value_t *jl_gc_big_alloc_noinline(jl_ptls_t ptls, size_t allocsz);
#ifdef MMTK_GC
JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, int pool_offset, int osize, void* ty);
JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_big(jl_ptls_t ptls, size_t allocsz);
extern void post_alloc(void* mutator, void* obj, size_t bytes, int allocator);
#endif // MMTK_GC
JL_DLLEXPORT int jl_gc_classify_pools(size_t sz, int *osize) JL_NOTSAFEPOINT;
extern uv_mutex_t gc_perm_lock;
Expand All @@ -344,6 +345,8 @@ void *jl_gc_perm_alloc(size_t sz, int zero,
void jl_gc_force_mark_old(jl_ptls_t ptls, jl_value_t *v);
void gc_sweep_sysimg(void);

void jl_gc_notify_image_load(const char* img_data, size_t len);
void jl_gc_notify_image_alloc(char* img_data, size_t len);

// pools are 16376 bytes large (GC_POOL_SZ - GC_PAGE_OFFSET)
static const int jl_gc_sizeclasses[] = {
Expand Down Expand Up @@ -534,8 +537,13 @@ STATIC_INLINE jl_value_t *jl_gc_permobj(size_t sz, void *ty) JL_NOTSAFEPOINT
sizeof(void*) * 2 : 16));
jl_taggedvalue_t *o = (jl_taggedvalue_t*)jl_gc_perm_alloc(allocsz, 0, align,
sizeof(void*) % align);
// Possibly we do not need this for MMTk. We could declare a post_alloc func and define it differently in two GCs.
uintptr_t tag = (uintptr_t)ty;
o->header = tag | GC_OLD_MARKED;
#ifdef MMTK_GC
jl_ptls_t ptls = jl_current_task->ptls;
post_alloc(ptls->mmtk_mutator_ptr, jl_valueof(o), allocsz, 1);
#endif
return jl_valueof(o);
}
jl_value_t *jl_permbox8(jl_datatype_t *t, int8_t x);
Expand Down
22 changes: 22 additions & 0 deletions src/mmtk-gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -480,6 +480,28 @@ void objprofile_reset(void)
{
}

void *jl_gc_perm_alloc_nolock(size_t sz, int zero, unsigned align, unsigned offset)
{
jl_ptls_t ptls = jl_current_task->ptls;
void* addr = alloc(ptls->mmtk_mutator_ptr, sz, align, offset, 1);
return addr;
}

void *jl_gc_perm_alloc(size_t sz, int zero, unsigned align, unsigned offset)
{
return jl_gc_perm_alloc_nolock(sz, zero, align, offset);
}

void jl_gc_notify_image_load(const char* img_data, size_t len)
{
// TODO: We should notify MMTk about the image (VM space)
}

void jl_gc_notify_image_alloc(char* img_data, size_t len)
{
// TODO: We should call MMTk to bulk set object metadata for the image region
}

#ifdef __cplusplus
}
#endif
Expand Down
3 changes: 3 additions & 0 deletions src/staticdata.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,6 +489,7 @@ static void jl_load_sysimg_so(void)
jl_dlsym(jl_sysimg_handle, "jl_system_image_data", (void **)&sysimg_data, 1);
size_t *plen;
jl_dlsym(jl_sysimg_handle, "jl_system_image_size", (void **)&plen, 1);
jl_gc_notify_image_load(sysimg_data, *plen);
jl_restore_system_image_data(sysimg_data, *plen);
}

Expand Down Expand Up @@ -3235,6 +3236,7 @@ static jl_value_t *jl_restore_package_image_from_stream(ios_t *f, jl_image_t *im
JL_SIGATOMIC_BEGIN();
size_t len = dataendpos - datastartpos;
char *sysimg = (char*)jl_gc_perm_alloc(len, 0, 64, 0);
jl_gc_notify_image_alloc(sysimg, len);
ios_seek(f, datastartpos);
if (ios_readall(f, sysimg, len) != len || jl_crc32c(0, sysimg, len) != (uint32_t)checksum) {
restored = jl_get_exceptionf(jl_errorexception_type, "Error reading system image file.");
Expand Down Expand Up @@ -3331,6 +3333,7 @@ JL_DLLEXPORT void jl_restore_system_image(const char *fname)
ios_seek_end(&f);
size_t len = ios_pos(&f);
char *sysimg = (char*)jl_gc_perm_alloc(len, 0, 64, 0);
jl_gc_notify_image_alloc(sysimg, len);
ios_seek(&f, 0);
if (ios_readall(&f, sysimg, len) != len)
jl_errorf("Error reading system image file.");
Expand Down
4 changes: 4 additions & 0 deletions src/symbol.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ static jl_sym_t *mk_symbol(const char *str, size_t len) JL_NOTSAFEPOINT
sym = (jl_sym_t*)jl_valueof(tag);
// set to old marked so that we won't look at it in the GC or write barrier.
tag->header = ((uintptr_t)jl_symbol_type) | GC_OLD_MARKED;
#ifdef MMTK_GC
jl_ptls_t ptls = jl_current_task->ptls;
post_alloc(ptls->mmtk_mutator_ptr, jl_valueof(tag), nb, 1);
#endif
jl_atomic_store_relaxed(&sym->left, NULL);
jl_atomic_store_relaxed(&sym->right, NULL);
sym->hash = hash_symbol(str, len);
Expand Down