Skip to content

Commit

Permalink
Add experimental feature for pooled pages
Browse files Browse the repository at this point in the history
This commit adds the environment variable
RUBY_GC_EXPERIMENTAL_FEATURE_SIZE_POOL_POOLED_PAGES that turns on a new
pooled pages algorithm for incremental marking. When this environment
is not set, the old algorithm is used. When this environment is set,
the new algorithm is used.

The old algorithm is too optimistic, it assumes that all size pools will
be used at the same speed, so it marks very litle at each incremental
marking step. However, if if one size pool is filled, then there will be
a long pause in incremental marking. The new algorithm is pessimistic,
meaning it assumes that one size pool will be filled before the other
size pools are filled, so it will mark more object at each incremental
marking step.
  • Loading branch information
peterzhu2118 committed Aug 14, 2023
1 parent 44a431c commit edc3301
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 13 deletions.
74 changes: 61 additions & 13 deletions gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -694,9 +694,13 @@ typedef struct rb_heap_struct {
struct heap_page *sweeping_page; /* iterator for .pages */
struct heap_page *compact_cursor;
uintptr_t compact_cursor_index;

#if GC_ENABLE_INCREMENTAL_MARK
struct heap_page *pooled_pages;
size_t pooled_slots;
size_t step_slots;
#endif

size_t total_pages; /* total page count in a heap */
size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
} rb_heap_t;
Expand Down Expand Up @@ -868,6 +872,8 @@ typedef struct rb_objspace {
struct {
size_t pooled_slots;
size_t step_slots;

bool experimental_feature_size_pool_pooled_pages;
} rincgc;
#endif

Expand Down Expand Up @@ -2017,7 +2023,13 @@ heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *pa

page->free_next = heap->pooled_pages;
heap->pooled_pages = page;
objspace->rincgc.pooled_slots += page->free_slots;

if (objspace->rincgc.experimental_feature_size_pool_pooled_pages) {
heap->pooled_slots += page->free_slots;
}
else {
objspace->rincgc.pooled_slots += page->free_slots;
}

asan_lock_freelist(page);
}
Expand Down Expand Up @@ -2625,13 +2637,25 @@ ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *ca

#if GC_ENABLE_INCREMENTAL_MARK
if (is_incremental_marking(objspace)) {
// Not allowed to allocate without running an incremental marking step
if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
return Qfalse;
if (objspace->rincgc.experimental_feature_size_pool_pooled_pages) {
// Not allowed to allocate without running an incremental marking step
if (size_pool_cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
return Qfalse;
}

if (p) {
size_pool_cache->incremental_mark_step_allocated_slots++;
}
}
else {
// Not allowed to allocate without running an incremental marking step
if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
return Qfalse;
}

if (p) {
cache->incremental_mark_step_allocated_slots++;
if (p) {
cache->incremental_mark_step_allocated_slots++;
}
}
}
#endif
Expand Down Expand Up @@ -2745,6 +2769,7 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, boo
rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;
rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];

VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);

Expand All @@ -2765,6 +2790,7 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, boo
if (is_incremental_marking(objspace)) {
gc_continue(objspace, size_pool, heap);
cache->incremental_mark_step_allocated_slots = 0;
size_pool_cache->incremental_mark_step_allocated_slots = 0;

// Retry allocation after resetting incremental_mark_step_allocated_slots
obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
Expand Down Expand Up @@ -3811,6 +3837,10 @@ Init_heap(void)
heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
#endif

if (getenv("RUBY_GC_EXPERIMENTAL_FEATURE_SIZE_POOL_POOLED_PAGES")) {
objspace->rincgc.experimental_feature_size_pool_pooled_pages = true;
}

objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
objspace->obj_to_id_tbl = st_init_numtable();
Expand Down Expand Up @@ -5818,6 +5848,8 @@ gc_sweep_start(rb_objspace_t *objspace)
rb_size_pool_t *size_pool = &size_pools[i];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);

heap->pooled_slots = 0;

gc_sweep_start_heap(objspace, heap);

#if USE_RVARGC
Expand Down Expand Up @@ -5929,6 +5961,7 @@ gc_sweep_finish(rb_objspace_t *objspace)
}
eden_heap->pooled_pages = NULL;
objspace->rincgc.pooled_slots = 0;
eden_heap->pooled_slots = 0;
}
#endif
#endif
Expand Down Expand Up @@ -8181,13 +8214,24 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)

if (full_mark) {
#if GC_ENABLE_INCREMENTAL_MARK
size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
if (objspace->rincgc.experimental_feature_size_pool_pooled_pages) {
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_size_pool_t *size_pool = &size_pools[i];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);

if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
"objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
"objspace->rincgc.step_slots: %"PRIdSIZE", \n",
objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
size_t incremental_marking_steps = (heap->pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
heap->step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
}
}
else {
size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;

if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
"objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
"objspace->rincgc.step_slots: %"PRIdSIZE", \n",
objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
}
#endif
objspace->flags.during_minor_gc = FALSE;
if (ruby_enable_autocompact) {
Expand Down Expand Up @@ -8674,7 +8718,9 @@ gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t
if (heap->free_pages) {
gc_report(2, objspace, "gc_marks_continue: has pooled pages");

marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
size_t step_slots =
objspace->rincgc.experimental_feature_size_pool_pooled_pages ? heap->step_slots : objspace->rincgc.step_slots;
marking_finished = gc_marks_step(objspace, step_slots);
}
else {
gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
Expand Down Expand Up @@ -9200,6 +9246,8 @@ rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];

cache->incremental_mark_step_allocated_slots = 0;

struct heap_page *page = cache->using_page;
RVALUE *freelist = cache->freelist;
RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
Expand Down
1 change: 1 addition & 0 deletions internal/gc.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ struct rb_objspace; /* in vm_core.h */
typedef struct ractor_newobj_size_pool_cache {
struct RVALUE *freelist;
struct heap_page *using_page;
size_t incremental_mark_step_allocated_slots;
} rb_ractor_newobj_size_pool_cache_t;

typedef struct ractor_newobj_cache {
Expand Down

0 comments on commit edc3301

Please sign in to comment.