diff --git a/benchmark/string_concat.yml b/benchmark/string_concat.yml index da14692f5ecc2c..f11f95ee9a7891 100644 --- a/benchmark/string_concat.yml +++ b/benchmark/string_concat.yml @@ -45,7 +45,7 @@ benchmark: "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" \ "#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}#{CHUNK}" - interpolation_same_size_pool: | + interpolation_same_heap: | buffer = "#{SHORT}#{SHORT}" - interpolation_switching_size_pools: | + interpolation_switching_heaps: | buffer = "#{SHORT}#{LONG}" diff --git a/gc.c b/gc.c index 0f50974017a70e..66d0565f151c7d 100644 --- a/gc.c +++ b/gc.c @@ -310,13 +310,13 @@ rb_gc_set_shape(VALUE obj, uint32_t shape_id) } uint32_t -rb_gc_rebuild_shape(VALUE obj, size_t size_pool_id) +rb_gc_rebuild_shape(VALUE obj, size_t heap_id) { rb_shape_t *orig_shape = rb_shape_get_shape(obj); if (rb_shape_obj_too_complex(obj)) return (uint32_t)OBJ_TOO_COMPLEX_SHAPE_ID; - rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)(size_pool_id + FIRST_T_OBJECT_SHAPE_ID)); + rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)(heap_id + FIRST_T_OBJECT_SHAPE_ID)); rb_shape_t *new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape); if (!new_shape) return 0; @@ -577,7 +577,7 @@ typedef struct gc_function_map { void (*ractor_cache_free)(void *objspace_ptr, void *cache); void (*set_params)(void *objspace_ptr); void (*init)(void); - size_t *(*size_pool_sizes)(void *objspace_ptr); + size_t *(*heap_sizes)(void *objspace_ptr); // Shutdown void (*shutdown_free_objects)(void *objspace_ptr); // GC @@ -594,7 +594,7 @@ typedef struct gc_function_map { // Object allocation VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size); size_t (*obj_slot_size)(VALUE obj); - size_t (*size_pool_id_for_size)(void *objspace_ptr, size_t size); + size_t (*heap_id_for_size)(void *objspace_ptr, size_t size); bool (*size_allocatable_p)(size_t size); // Malloc void *(*malloc)(void *objspace_ptr, size_t size); @@ -708,7 +708,7 @@ ruby_external_gc_init(void) load_external_gc_func(ractor_cache_free); load_external_gc_func(set_params); load_external_gc_func(init); - load_external_gc_func(size_pool_sizes); + load_external_gc_func(heap_sizes); // Shutdown load_external_gc_func(shutdown_free_objects); // GC @@ -725,7 +725,7 @@ ruby_external_gc_init(void) // Object allocation load_external_gc_func(new_obj); load_external_gc_func(obj_slot_size); - load_external_gc_func(size_pool_id_for_size); + load_external_gc_func(heap_id_for_size); load_external_gc_func(size_allocatable_p); // Malloc load_external_gc_func(malloc); @@ -787,7 +787,7 @@ ruby_external_gc_init(void) # define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free # define rb_gc_impl_set_params rb_gc_functions.set_params # define rb_gc_impl_init rb_gc_functions.init -# define rb_gc_impl_size_pool_sizes rb_gc_functions.size_pool_sizes +# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes // Shutdown # define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects // GC @@ -804,7 +804,7 @@ ruby_external_gc_init(void) // Object allocation # define rb_gc_impl_new_obj rb_gc_functions.new_obj # define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size -# define rb_gc_impl_size_pool_id_for_size rb_gc_functions.size_pool_id_for_size +# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size # define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p // Malloc # define rb_gc_impl_malloc rb_gc_functions.malloc @@ -3000,9 +3000,9 @@ rb_gc_prepare_heap(void) } size_t -rb_gc_size_pool_id_for_size(size_t size) +rb_gc_heap_id_for_size(size_t size) { - return rb_gc_impl_size_pool_id_for_size(rb_gc_get_objspace(), size); + return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size); } bool @@ -3452,9 +3452,9 @@ rb_gc_initial_stress_set(VALUE flag) } size_t * -rb_gc_size_pool_sizes(void) +rb_gc_heap_sizes(void) { - return rb_gc_impl_size_pool_sizes(rb_gc_get_objspace()); + return rb_gc_impl_heap_sizes(rb_gc_get_objspace()); } VALUE diff --git a/gc/default.c b/gc/default.c index 377786885c2326..d7f5cff3942db4 100644 --- a/gc/default.c +++ b/gc/default.c @@ -150,22 +150,22 @@ #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_ROOT_TICKS) -#ifndef SIZE_POOL_COUNT -# define SIZE_POOL_COUNT 5 +#ifndef HEAP_COUNT +# define HEAP_COUNT 5 #endif -typedef struct ractor_newobj_size_pool_cache { +typedef struct ractor_newobj_heap_cache { struct free_slot *freelist; struct heap_page *using_page; -} rb_ractor_newobj_size_pool_cache_t; +} rb_ractor_newobj_heap_cache_t; typedef struct ractor_newobj_cache { size_t incremental_mark_step_allocated_slots; - rb_ractor_newobj_size_pool_cache_t size_pool_caches[SIZE_POOL_COUNT]; + rb_ractor_newobj_heap_cache_t heap_caches[HEAP_COUNT]; } rb_ractor_newobj_cache_t; typedef struct { - size_t size_pool_init_slots[SIZE_POOL_COUNT]; + size_t heap_init_slots[HEAP_COUNT]; size_t heap_free_slots; double growth_factor; size_t growth_max_slots; @@ -405,7 +405,7 @@ typedef struct mark_stack { typedef int (*gc_compact_compare_func)(const void *l, const void *r, void *d); -typedef struct rb_size_pool_struct { +typedef struct rb_heap_struct { short slot_size; /* Basic statistics */ @@ -429,7 +429,7 @@ typedef struct rb_size_pool_struct { size_t total_pages; /* total page count in a heap */ size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */ -} rb_size_pool_t; +} rb_heap_t; enum { gc_stress_no_major, @@ -477,7 +477,7 @@ typedef struct rb_objspace { rb_event_flag_t hook_events; unsigned long long next_object_id; - rb_size_pool_t size_pools[SIZE_POOL_COUNT]; + rb_heap_t heaps[HEAP_COUNT]; size_t empty_pages_count; struct heap_page *empty_pages; @@ -724,7 +724,7 @@ struct heap_page { unsigned int has_uncollectible_wb_unprotected_objects : 1; } flags; - rb_size_pool_t *size_pool; + rb_heap_t *heap; struct heap_page *free_next; struct heap_page_body *body; @@ -769,7 +769,7 @@ heap_page_in_global_empty_pages_pool(rb_objspace_t *objspace, struct heap_page * if (page->total_slots == 0) { GC_ASSERT(page->start == 0); GC_ASSERT(page->slot_size == 0); - GC_ASSERT(page->size_pool == NULL); + GC_ASSERT(page->heap == NULL); GC_ASSERT(page->free_slots == 0); asan_unpoisoning_memory_region(&page->freelist, sizeof(&page->freelist)) { GC_ASSERT(page->freelist == NULL); @@ -780,7 +780,7 @@ heap_page_in_global_empty_pages_pool(rb_objspace_t *objspace, struct heap_page * else { GC_ASSERT(page->start != 0); GC_ASSERT(page->slot_size != 0); - GC_ASSERT(page->size_pool != NULL); + GC_ASSERT(page->heap != NULL); return false; } @@ -843,7 +843,7 @@ RVALUE_AGE_SET(VALUE obj, int age) #define heap_pages_himem objspace->heap_pages.range[1] #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages #define heap_pages_deferred_final objspace->heap_pages.deferred_final -#define size_pools objspace->size_pools +#define heaps objspace->heaps #define during_gc objspace->flags.during_gc #define finalizing objspace->atomic_flags.finalizing #define finalizer_table objspace->finalizer_table @@ -904,8 +904,8 @@ gc_mode_verify(enum gc_mode mode) static inline bool has_sweeping_pages(rb_objspace_t *objspace) { - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - if ((&size_pools[i])->sweeping_page) { + for (int i = 0; i < HEAP_COUNT; i++) { + if ((&heaps[i])->sweeping_page) { return TRUE; } } @@ -916,8 +916,8 @@ static inline size_t heap_eden_total_pages(rb_objspace_t *objspace) { size_t count = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - count += (&size_pools[i])->total_pages; + for (int i = 0; i < HEAP_COUNT; i++) { + count += (&heaps[i])->total_pages; } return count; } @@ -926,9 +926,9 @@ static inline size_t total_allocated_objects(rb_objspace_t *objspace) { size_t count = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - count += size_pool->total_allocated_objects; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + count += heap->total_allocated_objects; } return count; } @@ -937,9 +937,9 @@ static inline size_t total_freed_objects(rb_objspace_t *objspace) { size_t count = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - count += size_pool->total_freed_objects; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + count += heap->total_freed_objects; } return count; } @@ -948,9 +948,9 @@ static inline size_t total_final_slots_count(rb_objspace_t *objspace) { size_t count = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - count += size_pool->final_slots_count; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + count += heap->final_slots_count; } return count; } @@ -1011,11 +1011,11 @@ static void gc_marking_enter(rb_objspace_t *objspace); static void gc_marking_exit(rb_objspace_t *objspace); static void gc_sweeping_enter(rb_objspace_t *objspace); static void gc_sweeping_exit(rb_objspace_t *objspace); -static bool gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool); +static bool gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap); static void gc_sweep(rb_objspace_t *objspace); -static void gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool); -static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool); +static void gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap); +static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap); static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr); static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr); @@ -1214,8 +1214,8 @@ RVALUE_UNCOLLECTIBLE(rb_objspace_t *objspace, VALUE obj) #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj)) static int rgengc_remember(rb_objspace_t *objspace, VALUE obj); -static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_size_pool_t *size_pool); -static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_size_pool_t *size_pool); +static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap); +static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap); static int check_rvalue_consistency_force(rb_objspace_t *objspace, const VALUE obj, int terminate) @@ -1519,10 +1519,10 @@ rb_gc_impl_get_measure_total_time(void *objspace_ptr) } static size_t -minimum_slots_for_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +minimum_slots_for_heap(rb_objspace_t *objspace, rb_heap_t *heap) { - size_t size_pool_idx = size_pool - size_pools; - return gc_params.size_pool_init_slots[size_pool_idx]; + size_t heap_idx = heap - heaps; + return gc_params.heap_init_slots[heap_idx]; } static int @@ -1657,8 +1657,8 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj } static void -size_pool_allocatable_slots_expand(rb_objspace_t *objspace, - rb_size_pool_t *size_pool, size_t free_slots, size_t total_slots) +heap_allocatable_slots_expand(rb_objspace_t *objspace, + rb_heap_t *heap, size_t free_slots, size_t total_slots) { double goal_ratio = gc_params.heap_free_slots_goal_ratio; size_t target_total_slots; @@ -1667,7 +1667,7 @@ size_pool_allocatable_slots_expand(rb_objspace_t *objspace, target_total_slots = (size_t)(total_slots * gc_params.growth_factor); } else if (total_slots == 0) { - target_total_slots = minimum_slots_for_size_pool(objspace, size_pool); + target_total_slots = minimum_slots_for_heap(objspace, heap); } else { /* Find `f' where free_slots = f * total_slots * goal_ratio @@ -1703,14 +1703,14 @@ size_pool_allocatable_slots_expand(rb_objspace_t *objspace, } static inline void -heap_add_freepage(rb_size_pool_t *size_pool, struct heap_page *page) +heap_add_freepage(rb_heap_t *heap, struct heap_page *page) { asan_unlock_freelist(page); GC_ASSERT(page->free_slots != 0); GC_ASSERT(page->freelist != NULL); - page->free_next = size_pool->free_pages; - size_pool->free_pages = page; + page->free_next = heap->free_pages; + heap->free_pages = page; RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist); @@ -1718,25 +1718,25 @@ heap_add_freepage(rb_size_pool_t *size_pool, struct heap_page *page) } static inline void -heap_add_poolpage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_page *page) +heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page) { asan_unlock_freelist(page); GC_ASSERT(page->free_slots != 0); GC_ASSERT(page->freelist != NULL); - page->free_next = size_pool->pooled_pages; - size_pool->pooled_pages = page; + page->free_next = heap->pooled_pages; + heap->pooled_pages = page; objspace->rincgc.pooled_slots += page->free_slots; asan_lock_freelist(page); } static void -heap_unlink_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_page *page) +heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page) { ccan_list_del(&page->page_node); - size_pool->total_pages--; - size_pool->total_slots -= page->total_slots; + heap->total_pages--; + heap->total_slots -= page->total_slots; } static void @@ -1786,7 +1786,7 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace) // Get number of pages estimated for the smallest size pool CEILDIV(objspace->heap_pages.allocatable_slots, HEAP_PAGE_OBJ_LIMIT) * // Estimate the average slot size multiple - (1 << (SIZE_POOL_COUNT / 2)); + (1 << (HEAP_COUNT / 2)); if (objspace->empty_pages != NULL && objspace->empty_pages_count > pages_to_keep_count) { GC_ASSERT(objspace->empty_pages_count > 0); @@ -1975,10 +1975,10 @@ heap_page_allocate(rb_objspace_t *objspace) } static void -size_pool_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_page *page) +heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page) { /* Adding to eden heap during incremental sweeping is forbidden */ - GC_ASSERT(!size_pool->sweeping_page); + GC_ASSERT(!heap->sweeping_page); GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, page)); /* adjust obj_limit (object number available in this page) */ @@ -1993,50 +1993,50 @@ size_pool_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct he * In other words, ensure there are an even number of objects * per bit plane. */ if (NUM_IN_PAGE(start) == 1) { - start += size_pool->slot_size - BASE_SLOT_SIZE; + start += heap->slot_size - BASE_SLOT_SIZE; } - GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % size_pool->slot_size == 0); + GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % heap->slot_size == 0); } - int slot_count = (int)((HEAP_PAGE_SIZE - (start - (uintptr_t)page->body))/size_pool->slot_size); + int slot_count = (int)((HEAP_PAGE_SIZE - (start - (uintptr_t)page->body))/heap->slot_size); page->start = start; page->total_slots = slot_count; - page->slot_size = size_pool->slot_size; - page->size_pool = size_pool; + page->slot_size = heap->slot_size; + page->heap = heap; asan_unlock_freelist(page); page->freelist = NULL; asan_unpoison_memory_region(page->body, HEAP_PAGE_SIZE, false); - for (VALUE p = (VALUE)start; p < start + (slot_count * size_pool->slot_size); p += size_pool->slot_size) { + for (VALUE p = (VALUE)start; p < start + (slot_count * heap->slot_size); p += heap->slot_size) { heap_page_add_freeobj(objspace, page, p); } asan_lock_freelist(page); page->free_slots = slot_count; - size_pool->total_allocated_pages++; + heap->total_allocated_pages++; - ccan_list_add_tail(&size_pool->pages, &page->page_node); - size_pool->total_pages++; - size_pool->total_slots += page->total_slots; + ccan_list_add_tail(&heap->pages, &page->page_node); + heap->total_pages++; + heap->total_slots += page->total_slots; } static int -heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_heap_t *heap) { if (objspace->heap_pages.allocatable_slots > 0) { gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", " "allocatable_slots: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n", - rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_slots, size_pool->total_pages); + rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_slots, heap->total_pages); struct heap_page *page = heap_page_resurrect(objspace); if (page == NULL) { page = heap_page_allocate(objspace); } - size_pool_add_page(objspace, size_pool, page); - heap_add_freepage(size_pool, page); + heap_add_page(objspace, heap, page); + heap_add_freepage(heap, page); if (objspace->heap_pages.allocatable_slots > (size_t)page->total_slots) { objspace->heap_pages.allocatable_slots -= page->total_slots; @@ -2052,77 +2052,77 @@ heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_size_pool_t *size_ } static void -heap_page_allocate_and_initialize_force(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +heap_page_allocate_and_initialize_force(rb_objspace_t *objspace, rb_heap_t *heap) { size_t prev_allocatable_slots = objspace->heap_pages.allocatable_slots; // Set allocatable slots to 1 to force a page to be created. objspace->heap_pages.allocatable_slots = 1; - heap_page_allocate_and_initialize(objspace, size_pool); - GC_ASSERT(size_pool->free_pages != NULL); + heap_page_allocate_and_initialize(objspace, heap); + GC_ASSERT(heap->free_pages != NULL); objspace->heap_pages.allocatable_slots = prev_allocatable_slots; } static void -gc_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +gc_continue(rb_objspace_t *objspace, rb_heap_t *heap) { unsigned int lock_lev; gc_enter(objspace, gc_enter_event_continue, &lock_lev); /* Continue marking if in incremental marking. */ if (is_incremental_marking(objspace)) { - if (gc_marks_continue(objspace, size_pool)) { + if (gc_marks_continue(objspace, heap)) { gc_sweep(objspace); } } /* Continue sweeping if in lazy sweeping or the previous incremental * marking finished and did not yield a free page. */ - if (size_pool->free_pages == NULL && is_lazy_sweeping(objspace)) { - gc_sweep_continue(objspace, size_pool); + if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) { + gc_sweep_continue(objspace, heap); } gc_exit(objspace, gc_enter_event_continue, &lock_lev); } static void -heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap) { - GC_ASSERT(size_pool->free_pages == NULL); + GC_ASSERT(heap->free_pages == NULL); - if (size_pool->total_slots < gc_params.size_pool_init_slots[size_pool - size_pools] && - size_pool->sweeping_page == NULL) { - heap_page_allocate_and_initialize_force(objspace, size_pool); - GC_ASSERT(size_pool->free_pages != NULL); + if (heap->total_slots < gc_params.heap_init_slots[heap - heaps] && + heap->sweeping_page == NULL) { + heap_page_allocate_and_initialize_force(objspace, heap); + GC_ASSERT(heap->free_pages != NULL); return; } /* Continue incremental marking or lazy sweeping, if in any of those steps. */ - gc_continue(objspace, size_pool); + gc_continue(objspace, heap); - if (size_pool->free_pages == NULL) { - heap_page_allocate_and_initialize(objspace, size_pool); + if (heap->free_pages == NULL) { + heap_page_allocate_and_initialize(objspace, heap); } /* If we still don't have a free page and not allowed to create a new page, * we should start a new GC cycle. */ - if (size_pool->free_pages == NULL) { + if (heap->free_pages == NULL) { if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) { rb_memerror(); } else { if (objspace->heap_pages.allocatable_slots == 0 && !gc_config_full_mark_val) { - size_pool_allocatable_slots_expand(objspace, size_pool, - size_pool->freed_slots + size_pool->empty_slots, - size_pool->total_slots); + heap_allocatable_slots_expand(objspace, heap, + heap->freed_slots + heap->empty_slots, + heap->total_slots); GC_ASSERT(objspace->heap_pages.allocatable_slots > 0); } /* Do steps of incremental marking or lazy sweeping if the GC run permits. */ - gc_continue(objspace, size_pool); + gc_continue(objspace, heap); /* If we're not incremental marking (e.g. a minor GC) or finished * sweeping and still don't have a free page, then - * gc_sweep_finish_size_pool should allow us to create a new page. */ - if (size_pool->free_pages == NULL && !heap_page_allocate_and_initialize(objspace, size_pool)) { + * gc_sweep_finish_heap should allow us to create a new page. */ + if (heap->free_pages == NULL && !heap_page_allocate_and_initialize(objspace, heap)) { if (gc_needs_major_flags == GPR_FLAG_NONE) { rb_bug("cannot create a new page after GC"); } @@ -2132,10 +2132,10 @@ heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool) } else { /* Do steps of incremental marking or lazy sweeping. */ - gc_continue(objspace, size_pool); + gc_continue(objspace, heap); - if (size_pool->free_pages == NULL && - !heap_page_allocate_and_initialize(objspace, size_pool)) { + if (heap->free_pages == NULL && + !heap_page_allocate_and_initialize(objspace, heap)) { rb_bug("cannot create a new page after major GC"); } } @@ -2144,7 +2144,7 @@ heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool) } } - GC_ASSERT(size_pool->free_pages != NULL); + GC_ASSERT(heap->free_pages != NULL); } static inline VALUE @@ -2249,15 +2249,15 @@ rb_gc_impl_obj_slot_size(VALUE obj) } static inline size_t -size_pool_slot_size(unsigned char pool_id) +heap_slot_size(unsigned char pool_id) { - GC_ASSERT(pool_id < SIZE_POOL_COUNT); + GC_ASSERT(pool_id < HEAP_COUNT); size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE; #if RGENGC_CHECK_MODE rb_objspace_t *objspace = rb_gc_get_objspace(); - GC_ASSERT(size_pools[pool_id].slot_size == (short)slot_size); + GC_ASSERT(heaps[pool_id].slot_size == (short)slot_size); #endif slot_size -= RVALUE_OVERHEAD; @@ -2268,15 +2268,15 @@ size_pool_slot_size(unsigned char pool_id) bool rb_gc_impl_size_allocatable_p(size_t size) { - return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1); + return size <= heap_slot_size(HEAP_COUNT - 1); } static inline VALUE ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, - size_t size_pool_idx) + size_t heap_idx) { - rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx]; - struct free_slot *p = size_pool_cache->freelist; + rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx]; + struct free_slot *p = heap_cache->freelist; if (RB_UNLIKELY(is_incremental_marking(objspace))) { // Not allowed to allocate without running an incremental marking step @@ -2291,8 +2291,8 @@ ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *ca if (RB_LIKELY(p)) { VALUE obj = (VALUE)p; - MAYBE_UNUSED(const size_t) stride = size_pool_slot_size(size_pool_idx); - size_pool_cache->freelist = p->next; + MAYBE_UNUSED(const size_t) stride = heap_slot_size(heap_idx); + heap_cache->freelist = p->next; asan_unpoison_memory_region(p, stride, true); #if RGENGC_CHECK_MODE GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == stride); @@ -2307,16 +2307,16 @@ ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *ca } static struct heap_page * -heap_next_free_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +heap_next_free_page(rb_objspace_t *objspace, rb_heap_t *heap) { struct heap_page *page; - if (size_pool->free_pages == NULL) { - heap_prepare(objspace, size_pool); + if (heap->free_pages == NULL) { + heap_prepare(objspace, heap); } - page = size_pool->free_pages; - size_pool->free_pages = page->free_next; + page = heap->free_pages; + heap->free_pages = page->free_next; GC_ASSERT(page->free_slots != 0); @@ -2326,78 +2326,78 @@ heap_next_free_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool) } static inline void -ractor_cache_set_page(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx, +ractor_cache_set_page(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, struct heap_page *page) { gc_report(3, objspace, "ractor_set_cache: Using page %p\n", (void *)page->body); - rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx]; + rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx]; - GC_ASSERT(size_pool_cache->freelist == NULL); + GC_ASSERT(heap_cache->freelist == NULL); GC_ASSERT(page->free_slots != 0); GC_ASSERT(page->freelist != NULL); - size_pool_cache->using_page = page; - size_pool_cache->freelist = page->freelist; + heap_cache->using_page = page; + heap_cache->freelist = page->freelist; page->free_slots = 0; page->freelist = NULL; - asan_unpoison_object((VALUE)size_pool_cache->freelist, false); - GC_ASSERT(RB_TYPE_P((VALUE)size_pool_cache->freelist, T_NONE)); - asan_poison_object((VALUE)size_pool_cache->freelist); + asan_unpoison_object((VALUE)heap_cache->freelist, false); + GC_ASSERT(RB_TYPE_P((VALUE)heap_cache->freelist, T_NONE)); + asan_poison_object((VALUE)heap_cache->freelist); } static inline size_t -size_pool_idx_for_size(size_t size) +heap_idx_for_size(size_t size) { size += RVALUE_OVERHEAD; size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE); - /* size_pool_idx is ceil(log2(slot_count)) */ - size_t size_pool_idx = 64 - nlz_int64(slot_count - 1); + /* heap_idx is ceil(log2(slot_count)) */ + size_t heap_idx = 64 - nlz_int64(slot_count - 1); - if (size_pool_idx >= SIZE_POOL_COUNT) { - rb_bug("size_pool_idx_for_size: allocation size too large " - "(size=%"PRIuSIZE"u, size_pool_idx=%"PRIuSIZE"u)", size, size_pool_idx); + if (heap_idx >= HEAP_COUNT) { + rb_bug("heap_idx_for_size: allocation size too large " + "(size=%"PRIuSIZE"u, heap_idx=%"PRIuSIZE"u)", size, heap_idx); } #if RGENGC_CHECK_MODE rb_objspace_t *objspace = rb_gc_get_objspace(); - GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size); - if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size); + GC_ASSERT(size <= (size_t)heaps[heap_idx].slot_size); + if (heap_idx > 0) GC_ASSERT(size > (size_t)heaps[heap_idx - 1].slot_size); #endif - return size_pool_idx; + return heap_idx; } size_t -rb_gc_impl_size_pool_id_for_size(void *objspace_ptr, size_t size) +rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size) { - return size_pool_idx_for_size(size); + return heap_idx_for_size(size); } -static size_t size_pool_sizes[SIZE_POOL_COUNT + 1] = { 0 }; +static size_t heap_sizes[HEAP_COUNT + 1] = { 0 }; size_t * -rb_gc_impl_size_pool_sizes(void *objspace_ptr) +rb_gc_impl_heap_sizes(void *objspace_ptr) { - if (size_pool_sizes[0] == 0) { - for (unsigned char i = 0; i < SIZE_POOL_COUNT; i++) { - size_pool_sizes[i] = size_pool_slot_size(i); + if (heap_sizes[0] == 0) { + for (unsigned char i = 0; i < HEAP_COUNT; i++) { + heap_sizes[i] = heap_slot_size(i); } } - return size_pool_sizes; + return heap_sizes; } -NOINLINE(static VALUE newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx, bool vm_locked)); +NOINLINE(static VALUE newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked)); static VALUE -newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx, bool vm_locked) +newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked) { - rb_size_pool_t *size_pool = &size_pools[size_pool_idx]; + rb_heap_t *heap = &heaps[heap_idx]; VALUE obj = Qfalse; unsigned int lev = 0; @@ -2411,20 +2411,20 @@ newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size { if (is_incremental_marking(objspace)) { - gc_continue(objspace, size_pool); + gc_continue(objspace, heap); cache->incremental_mark_step_allocated_slots = 0; // Retry allocation after resetting incremental_mark_step_allocated_slots - obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx); + obj = ractor_cache_allocate_slot(objspace, cache, heap_idx); } if (obj == Qfalse) { // Get next free page (possibly running GC) - struct heap_page *page = heap_next_free_page(objspace, size_pool); - ractor_cache_set_page(objspace, cache, size_pool_idx, page); + struct heap_page *page = heap_next_free_page(objspace, heap); + ractor_cache_set_page(objspace, cache, heap_idx, page); // Retry allocation after moving to new page - obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx); + obj = ractor_cache_allocate_slot(objspace, cache, heap_idx); } } @@ -2439,27 +2439,27 @@ newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size } static VALUE -newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx, bool vm_locked) +newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked) { - VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx); + VALUE obj = ractor_cache_allocate_slot(objspace, cache, heap_idx); if (RB_UNLIKELY(obj == Qfalse)) { - obj = newobj_cache_miss(objspace, cache, size_pool_idx, vm_locked); + obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked); } - rb_size_pool_t *size_pool = &size_pools[size_pool_idx]; - size_pool->total_allocated_objects++; + rb_heap_t *heap = &heaps[heap_idx]; + heap->total_allocated_objects++; GC_ASSERT(rb_gc_multi_ractor_p() || - size_pool->total_slots >= - (size_pool->total_allocated_objects - size_pool->total_freed_objects - size_pool->final_slots_count)); + heap->total_slots >= + (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count)); return obj; } -ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t size_pool_idx)); +ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx)); static inline VALUE -newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t size_pool_idx) +newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx) { VALUE obj; unsigned int lev; @@ -2480,7 +2480,7 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_new } } - obj = newobj_alloc(objspace, cache, size_pool_idx, true); + obj = newobj_alloc(objspace, cache, heap_idx, true); newobj_init(klass, flags, wb_protected, objspace, obj); } rb_gc_cr_unlock(lev); @@ -2489,20 +2489,20 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_new } NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, - rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx)); + rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)); NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, - rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx)); + rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)); static VALUE -newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx) +newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx) { - return newobj_slowpath(klass, flags, objspace, cache, TRUE, size_pool_idx); + return newobj_slowpath(klass, flags, objspace, cache, TRUE, heap_idx); } static VALUE -newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx) +newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx) { - return newobj_slowpath(klass, flags, objspace, cache, FALSE, size_pool_idx); + return newobj_slowpath(klass, flags, objspace, cache, FALSE, heap_idx); } VALUE @@ -2521,21 +2521,21 @@ rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags } } - size_t size_pool_idx = size_pool_idx_for_size(alloc_size); + size_t heap_idx = heap_idx_for_size(alloc_size); rb_ractor_newobj_cache_t *cache = (rb_ractor_newobj_cache_t *)cache_ptr; if (!RB_UNLIKELY(during_gc || ruby_gc_stressful) && wb_protected) { - obj = newobj_alloc(objspace, cache, size_pool_idx, false); + obj = newobj_alloc(objspace, cache, heap_idx, false); newobj_init(klass, flags, wb_protected, objspace, obj); } else { RB_DEBUG_COUNTER_INC(obj_newobj_slowpath); obj = wb_protected ? - newobj_slowpath_wb_protected(klass, flags, objspace, cache, size_pool_idx) : - newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, size_pool_idx); + newobj_slowpath_wb_protected(klass, flags, objspace, cache, heap_idx) : + newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, heap_idx); } return newobj_fill(obj, v1, v2, v3); @@ -2635,7 +2635,7 @@ rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), voi struct heap_page *page = GET_HEAP_PAGE(obj); page->final_slots++; - page->size_pool->final_slots_count++; + page->heap->final_slots_count++; } static void @@ -2666,8 +2666,8 @@ struct each_obj_data { each_page_callback *each_page_callback; void *data; - struct heap_page **pages[SIZE_POOL_COUNT]; - size_t pages_counts[SIZE_POOL_COUNT]; + struct heap_page **pages[HEAP_COUNT]; + size_t pages_counts[HEAP_COUNT]; }; static VALUE @@ -2681,7 +2681,7 @@ objspace_each_objects_ensure(VALUE arg) objspace->flags.dont_incremental = FALSE; } - for (int i = 0; i < SIZE_POOL_COUNT; i++) { + for (int i = 0; i < HEAP_COUNT; i++) { struct heap_page **pages = data->pages[i]; free(pages); } @@ -2695,10 +2695,10 @@ objspace_each_objects_try(VALUE arg) struct each_obj_data *data = (struct each_obj_data *)arg; rb_objspace_t *objspace = data->objspace; - /* Copy pages from all size_pools to their respective buffers. */ - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - size_t size = size_pool->total_pages * sizeof(struct heap_page *); + /* Copy pages from all heaps to their respective buffers. */ + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + size_t size = heap->total_pages * sizeof(struct heap_page *); struct heap_page **pages = malloc(size); if (!pages) rb_memerror(); @@ -2710,21 +2710,21 @@ objspace_each_objects_try(VALUE arg) * an infinite loop. */ struct heap_page *page = 0; size_t pages_count = 0; - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { pages[pages_count] = page; pages_count++; } data->pages[i] = pages; data->pages_counts[i] = pages_count; - GC_ASSERT(pages_count == size_pool->total_pages); + GC_ASSERT(pages_count == heap->total_pages); } - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; size_t pages_count = data->pages_counts[i]; struct heap_page **pages = data->pages[i]; - struct heap_page *page = ccan_list_top(&size_pool->pages, struct heap_page, page_node); + struct heap_page *page = ccan_list_top(&heap->pages, struct heap_page, page_node); for (size_t i = 0; i < pages_count; i++) { /* If we have reached the end of the linked list then there are no * more pages, so break. */ @@ -2735,10 +2735,10 @@ objspace_each_objects_try(VALUE arg) if (pages[i] != page) continue; uintptr_t pstart = (uintptr_t)page->start; - uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size); + uintptr_t pend = pstart + (page->total_slots * heap->slot_size); if (data->each_obj_callback && - (*data->each_obj_callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) { + (*data->each_obj_callback)((void *)pstart, (void *)pend, heap->slot_size, data->data)) { break; } if (data->each_page_callback && @@ -2746,7 +2746,7 @@ objspace_each_objects_try(VALUE arg) break; } - page = ccan_list_next(&size_pool->pages, page, page_node); + page = ccan_list_next(&heap->pages, page, page_node); } } @@ -2937,14 +2937,14 @@ finalize_list(rb_objspace_t *objspace, VALUE zombie) obj_free_object_id(objspace, zombie); } - GC_ASSERT(page->size_pool->final_slots_count > 0); + GC_ASSERT(page->heap->final_slots_count > 0); GC_ASSERT(page->final_slots > 0); - page->size_pool->final_slots_count--; + page->heap->final_slots_count--; page->final_slots--; page->free_slots++; heap_page_add_freeobj(objspace, page, zombie); - page->size_pool->total_freed_objects++; + page->heap->total_freed_objects++; } rb_gc_vm_unlock(lev); @@ -3002,21 +3002,21 @@ gc_abort(void *objspace_ptr) } if (is_lazy_sweeping(objspace)) { - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; - size_pool->sweeping_page = NULL; + heap->sweeping_page = NULL; struct heap_page *page = NULL; - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { page->flags.before_sweep = false; } } } - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - rgengc_mark_and_rememberset_clear(objspace, size_pool); + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + rgengc_mark_and_rememberset_clear(objspace, heap); } gc_mode_set(objspace, gc_mode_none); @@ -3155,9 +3155,9 @@ static size_t objspace_available_slots(rb_objspace_t *objspace) { size_t total_slots = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - total_slots += size_pool->total_slots; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + total_slots += heap->total_slots; } return total_slots; } @@ -3221,7 +3221,7 @@ unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body) } static bool -try_move(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_page *free_page, VALUE src) +try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src) { GC_ASSERT(gc_is_moveable_obj(objspace, src)); @@ -3267,13 +3267,13 @@ try_move(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_page *f } static void -gc_unprotect_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap) { - struct heap_page *cursor = size_pool->compact_cursor; + struct heap_page *cursor = heap->compact_cursor; while (cursor) { unlock_page_body(objspace, cursor->body); - cursor = ccan_list_next(&size_pool->pages, cursor, page_node); + cursor = ccan_list_next(&heap->pages, cursor, page_node); } } @@ -3467,9 +3467,9 @@ install_handlers(void) static void gc_compact_finish(rb_objspace_t *objspace) { - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - gc_unprotect_pages(objspace, size_pool); + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + gc_unprotect_pages(objspace, heap); } uninstall_handlers(); @@ -3477,11 +3477,11 @@ gc_compact_finish(rb_objspace_t *objspace) gc_update_references(objspace); objspace->profile.compact_count++; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - size_pool->compact_cursor = NULL; - size_pool->free_pages = NULL; - size_pool->compact_cursor_index = 0; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + heap->compact_cursor = NULL; + heap->free_pages = NULL; + heap->compact_cursor_index = 0; } if (gc_prof_enabled(objspace)) { @@ -3499,7 +3499,7 @@ struct gc_sweep_context { }; static inline void -gc_sweep_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx) +gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx) { struct heap_page *sweep_page = ctx->page; short slot_size = sweep_page->slot_size; @@ -3579,10 +3579,10 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, uintptr_t p, } static inline void -gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct gc_sweep_context *ctx) +gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx) { struct heap_page *sweep_page = ctx->page; - GC_ASSERT(sweep_page->size_pool == size_pool); + GC_ASSERT(sweep_page->heap == heap); uintptr_t p; bits_t *bits, bitset; @@ -3617,19 +3617,19 @@ gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct gc_swee bitset = ~bits[0]; bitset >>= NUM_IN_PAGE(p); if (bitset) { - gc_sweep_plane(objspace, size_pool, p, bitset, ctx); + gc_sweep_plane(objspace, heap, p, bitset, ctx); } p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE; for (int i = 1; i < bitmap_plane_count; i++) { bitset = ~bits[i]; if (bitset) { - gc_sweep_plane(objspace, size_pool, p, bitset, ctx); + gc_sweep_plane(objspace, heap, p, bitset, ctx); } p += BITS_BITLENGTH * BASE_SLOT_SIZE; } - if (!size_pool->compact_cursor) { + if (!heap->compact_cursor) { gc_setup_mark_bits(sweep_page); } @@ -3646,7 +3646,7 @@ gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct gc_swee ctx->freed_slots, ctx->empty_slots, ctx->final_slots); sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots; - sweep_page->size_pool->total_freed_objects += ctx->freed_slots; + sweep_page->heap->total_freed_objects += ctx->freed_slots; if (heap_pages_deferred_final && !finalizing) { gc_finalize_deferred_register(objspace); @@ -3722,15 +3722,15 @@ heap_page_freelist_append(struct heap_page *page, struct free_slot *freelist) } static void -gc_sweep_start_heap(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap) { - size_pool->sweeping_page = ccan_list_top(&size_pool->pages, struct heap_page, page_node); - size_pool->free_pages = NULL; - size_pool->pooled_pages = NULL; + heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node); + heap->free_pages = NULL; + heap->pooled_pages = NULL; if (!objspace->flags.immediate_sweep) { struct heap_page *page = NULL; - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { page->flags.before_sweep = TRUE; } } @@ -3752,8 +3752,8 @@ gc_ractor_newobj_cache_clear(void *c, void *data) newobj_cache->incremental_mark_step_allocated_slots = 0; - for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) { - rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx]; + for (size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) { + rb_ractor_newobj_heap_cache_t *cache = &newobj_cache->heap_caches[heap_idx]; struct heap_page *page = cache->using_page; struct free_slot *freelist = cache->freelist; @@ -3782,15 +3782,15 @@ gc_sweep_start(rb_objspace_t *objspace) } #endif - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - gc_sweep_start_heap(objspace, size_pool); + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + gc_sweep_start_heap(objspace, heap); - /* We should call gc_sweep_finish_size_pool for size pools with no pages. */ - if (size_pool->sweeping_page == NULL) { - GC_ASSERT(size_pool->total_pages == 0); - GC_ASSERT(size_pool->total_slots == 0); - gc_sweep_finish_size_pool(objspace, size_pool); + /* We should call gc_sweep_finish_heap for size pools with no pages. */ + if (heap->sweeping_page == NULL) { + GC_ASSERT(heap->total_pages == 0); + GC_ASSERT(heap->total_slots == 0); + gc_sweep_finish_heap(objspace, heap); } } @@ -3798,17 +3798,17 @@ gc_sweep_start(rb_objspace_t *objspace) } static void -gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap) { - size_t total_slots = size_pool->total_slots; - size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots; + size_t total_slots = heap->total_slots; + size_t swept_slots = heap->freed_slots + heap->empty_slots; - size_t init_slots = gc_params.size_pool_init_slots[size_pool - size_pools]; + size_t init_slots = gc_params.heap_init_slots[heap - heaps]; size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio); if (swept_slots < min_free_slots && /* The heap is a growth heap if it freed more slots than had empty slots. */ - (size_pool->empty_slots == 0 || size_pool->freed_slots > size_pool->empty_slots)) { + (heap->empty_slots == 0 || heap->freed_slots > heap->empty_slots)) { /* If we don't have enough slots and we have pages on the tomb heap, move * pages from the tomb heap to the eden heap. This may prevent page * creation thrashing (frequently allocating and deallocting pages) and @@ -3816,8 +3816,8 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool) struct heap_page *resurrected_page; while (swept_slots < min_free_slots && (resurrected_page = heap_page_resurrect(objspace))) { - size_pool_add_page(objspace, size_pool, resurrected_page); - heap_add_freepage(size_pool, resurrected_page); + heap_add_page(objspace, heap, resurrected_page); + heap_add_freepage(heap, resurrected_page); swept_slots += resurrected_page->free_slots; } @@ -3827,11 +3827,11 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool) * RVALUE_OLD_AGE minor GC since the last major GC. */ if (is_full_marking(objspace) || objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) { - size_pool_allocatable_slots_expand(objspace, size_pool, swept_slots, size_pool->total_slots); + heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots); } else { gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE; - size_pool->force_major_gc_count++; + heap->force_major_gc_count++; } } } @@ -3845,22 +3845,22 @@ gc_sweep_finish(rb_objspace_t *objspace) gc_prof_set_heap_info(objspace); heap_pages_free_unused_pages(objspace); - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; - size_pool->freed_slots = 0; - size_pool->empty_slots = 0; + heap->freed_slots = 0; + heap->empty_slots = 0; if (!will_be_incremental_marking(objspace)) { - struct heap_page *end_page = size_pool->free_pages; + struct heap_page *end_page = heap->free_pages; if (end_page) { while (end_page->free_next) end_page = end_page->free_next; - end_page->free_next = size_pool->pooled_pages; + end_page->free_next = heap->pooled_pages; } else { - size_pool->free_pages = size_pool->pooled_pages; + heap->free_pages = heap->pooled_pages; } - size_pool->pooled_pages = NULL; + heap->pooled_pages = NULL; objspace->rincgc.pooled_slots = 0; } } @@ -3874,9 +3874,9 @@ gc_sweep_finish(rb_objspace_t *objspace) } static int -gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap) { - struct heap_page *sweep_page = size_pool->sweeping_page; + struct heap_page *sweep_page = heap->sweeping_page; int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP; int swept_slots = 0; int pooled_slots = 0; @@ -3896,10 +3896,10 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool) .freed_slots = 0, .empty_slots = 0, }; - gc_sweep_page(objspace, size_pool, &ctx); + gc_sweep_page(objspace, heap, &ctx); int free_slots = ctx.freed_slots + ctx.empty_slots; - size_pool->sweeping_page = ccan_list_next(&size_pool->pages, sweep_page, page_node); + heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node); if (free_slots == sweep_page->total_slots && heap_pages_freeable_pages > 0 && @@ -3907,12 +3907,12 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool) heap_pages_freeable_pages--; unlink_limit--; /* There are no living objects, so move this page to the global empty pages. */ - heap_unlink_page(objspace, size_pool, sweep_page); + heap_unlink_page(objspace, heap, sweep_page); sweep_page->start = 0; sweep_page->total_slots = 0; sweep_page->slot_size = 0; - sweep_page->size_pool = NULL; + sweep_page->heap = NULL; sweep_page->free_slots = 0; asan_unlock_freelist(sweep_page); @@ -3926,15 +3926,15 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool) objspace->empty_pages = sweep_page; } else if (free_slots > 0) { - size_pool->freed_slots += ctx.freed_slots; - size_pool->empty_slots += ctx.empty_slots; + heap->freed_slots += ctx.freed_slots; + heap->empty_slots += ctx.empty_slots; if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) { - heap_add_poolpage(objspace, size_pool, sweep_page); + heap_add_poolpage(objspace, heap, sweep_page); pooled_slots += free_slots; } else { - heap_add_freepage(size_pool, sweep_page); + heap_add_freepage(heap, sweep_page); swept_slots += free_slots; if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) { break; @@ -3944,10 +3944,10 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool) else { sweep_page->free_next = NULL; } - } while ((sweep_page = size_pool->sweeping_page)); + } while ((sweep_page = heap->sweeping_page)); - if (!size_pool->sweeping_page) { - gc_sweep_finish_size_pool(objspace, size_pool); + if (!heap->sweeping_page) { + gc_sweep_finish_heap(objspace, heap); if (!has_sweeping_pages(objspace)) { gc_sweep_finish(objspace); @@ -3958,35 +3958,35 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool) gc_prof_sweep_timer_stop(objspace); #endif - return size_pool->free_pages != NULL; + return heap->free_pages != NULL; } static void gc_sweep_rest(rb_objspace_t *objspace) { - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; - while (size_pool->sweeping_page) { - gc_sweep_step(objspace, size_pool); + while (heap->sweeping_page) { + gc_sweep_step(objspace, heap); } } } static void -gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool) +gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *sweep_heap) { GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD); if (!GC_ENABLE_LAZY_SWEEP) return; gc_sweeping_enter(objspace); - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - if (!gc_sweep_step(objspace, size_pool)) { - /* sweep_size_pool requires a free slot but sweeping did not yield any + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + if (!gc_sweep_step(objspace, heap)) { + /* sweep_heap requires a free slot but sweeping did not yield any * and we cannot allocate a new page. */ - if (size_pool == sweep_size_pool && objspace->heap_pages.allocatable_slots == 0) { + if (heap == sweep_heap && objspace->heap_pages.allocatable_slots == 0) { /* Not allowed to create a new page so finish sweeping. */ gc_sweep_rest(objspace); break; @@ -4101,14 +4101,14 @@ gc_compact_start(rb_objspace_t *objspace) struct heap_page *page = NULL; gc_mode_transition(objspace, gc_mode_compacting); - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - ccan_list_for_each(&size_pool->pages, page, page_node) { + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + ccan_list_for_each(&heap->pages, page, page_node) { page->flags.before_sweep = TRUE; } - size_pool->compact_cursor = ccan_list_tail(&size_pool->pages, struct heap_page, page_node); - size_pool->compact_cursor_index = 0; + heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node); + heap->compact_cursor_index = 0; } if (gc_prof_enabled(objspace)) { @@ -4153,9 +4153,9 @@ gc_sweep(rb_objspace_t *objspace) else { /* Sweep every size pool. */ - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - gc_sweep_step(objspace, size_pool); + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + gc_sweep_step(objspace, heap); } } @@ -5160,8 +5160,8 @@ static int gc_verify_heap_pages(rb_objspace_t *objspace) { int remembered_old_objects = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - remembered_old_objects += gc_verify_heap_pages_(objspace, &((&size_pools[i])->pages)); + for (int i = 0; i < HEAP_COUNT; i++) { + remembered_old_objects += gc_verify_heap_pages_(objspace, &((&heaps[i])->pages)); } return remembered_old_objects; } @@ -5268,21 +5268,21 @@ gc_verify_internal_consistency(void *objspace_ptr) } static void -heap_move_pooled_pages_to_free_pages(rb_size_pool_t *size_pool) +heap_move_pooled_pages_to_free_pages(rb_heap_t *heap) { - if (size_pool->pooled_pages) { - if (size_pool->free_pages) { - struct heap_page *free_pages_tail = size_pool->free_pages; + if (heap->pooled_pages) { + if (heap->free_pages) { + struct heap_page *free_pages_tail = heap->free_pages; while (free_pages_tail->free_next) { free_pages_tail = free_pages_tail->free_next; } - free_pages_tail->free_next = size_pool->pooled_pages; + free_pages_tail->free_next = heap->pooled_pages; } else { - size_pool->free_pages = size_pool->pooled_pages; + heap->free_pages = heap->pooled_pages; } - size_pool->pooled_pages = NULL; + heap->pooled_pages = NULL; } } @@ -5328,11 +5328,11 @@ gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits } static void -gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap) { struct heap_page *page = 0; - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { bits_t *mark_bits = page->mark_bits; bits_t *wbun_bits = page->wb_unprotected_bits; uintptr_t p = page->start; @@ -5405,8 +5405,8 @@ gc_marks_finish(rb_objspace_t *objspace) objspace->flags.during_incremental_marking = FALSE; /* check children of all marked wb-unprotected objects */ - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - gc_marks_wb_unprotected_objects(objspace, &size_pools[i]); + for (int i = 0; i < HEAP_COUNT; i++) { + gc_marks_wb_unprotected_objects(objspace, &heaps[i]); } } @@ -5439,8 +5439,8 @@ gc_marks_finish(rb_objspace_t *objspace) /* Setup freeable slots. */ size_t total_init_slots = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - total_init_slots += gc_params.size_pool_init_slots[i] * r_mul; + for (int i = 0; i < HEAP_COUNT; i++) { + total_init_slots += gc_params.heap_init_slots[i] * r_mul; } if (max_free_slots < total_init_slots) { @@ -5500,14 +5500,14 @@ gc_marks_finish(rb_objspace_t *objspace) } static bool -gc_compact_heap_cursors_met_p(rb_size_pool_t *size_pool) +gc_compact_heap_cursors_met_p(rb_heap_t *heap) { - return size_pool->sweeping_page == size_pool->compact_cursor; + return heap->sweeping_page == heap->compact_cursor; } -static rb_size_pool_t * -gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, VALUE obj) +static rb_heap_t * +gc_compact_destination_pool(rb_objspace_t *objspace, rb_heap_t *src_pool, VALUE obj) { size_t obj_size = rb_gc_obj_optimal_size(obj); if (obj_size == 0) { @@ -5516,34 +5516,34 @@ gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, V size_t idx = 0; if (rb_gc_impl_size_allocatable_p(obj_size)) { - idx = size_pool_idx_for_size(obj_size); + idx = heap_idx_for_size(obj_size); } - return &size_pools[idx]; + return &heaps[idx]; } static bool -gc_compact_move(rb_objspace_t *objspace, rb_size_pool_t *size_pool, VALUE src) +gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, VALUE src) { GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED); GC_ASSERT(gc_is_moveable_obj(objspace, src)); - rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src); + rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, heap, src); uint32_t orig_shape = 0; uint32_t new_shape = 0; if (gc_compact_heap_cursors_met_p(dest_pool)) { - return dest_pool != size_pool; + return dest_pool != heap; } if (RB_TYPE_P(src, T_OBJECT)) { orig_shape = rb_gc_get_shape(src); - if (dest_pool != size_pool) { - new_shape = rb_gc_rebuild_shape(src, dest_pool - size_pools); + if (dest_pool != heap) { + new_shape = rb_gc_rebuild_shape(src, dest_pool - heaps); if (new_shape == 0) { - dest_pool = size_pool; + dest_pool = heap; } } } @@ -5569,7 +5569,7 @@ gc_compact_move(rb_objspace_t *objspace, rb_size_pool_t *size_pool, VALUE src) dest_pool->sweeping_page = ccan_list_next(&dest_pool->pages, dest_pool->sweeping_page, page_node); if (gc_compact_heap_cursors_met_p(dest_pool)) { - return dest_pool != size_pool; + return dest_pool != heap; } } @@ -5585,7 +5585,7 @@ gc_compact_move(rb_objspace_t *objspace, rb_size_pool_t *size_pool, VALUE src) } static bool -gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, uintptr_t p, bits_t bitset, struct heap_page *page) +gc_compact_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page) { short slot_size = page->slot_size; short slot_bits = slot_size / BASE_SLOT_SIZE; @@ -5599,7 +5599,7 @@ gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, uintptr_t p objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++; if (gc_is_moveable_obj(objspace, vp)) { - if (!gc_compact_move(objspace, size_pool, vp)) { + if (!gc_compact_move(objspace, heap, vp)) { //the cursors met. bubble up return false; } @@ -5614,9 +5614,9 @@ gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, uintptr_t p // Iterate up all the objects in page, moving them to where they want to go static bool -gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_page *page) +gc_compact_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page) { - GC_ASSERT(page == size_pool->compact_cursor); + GC_ASSERT(page == heap->compact_cursor); bits_t *mark_bits, *pin_bits; bits_t bitset; @@ -5629,7 +5629,7 @@ gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_ bitset = (mark_bits[0] & ~pin_bits[0]); bitset >>= NUM_IN_PAGE(p); if (bitset) { - if (!gc_compact_plane(objspace, size_pool, (uintptr_t)p, bitset, page)) + if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page)) return false; } p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE; @@ -5637,7 +5637,7 @@ gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_ for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) { bitset = (mark_bits[j] & ~pin_bits[j]); if (bitset) { - if (!gc_compact_plane(objspace, size_pool, (uintptr_t)p, bitset, page)) + if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page)) return false; } p += BITS_BITLENGTH * BASE_SLOT_SIZE; @@ -5649,11 +5649,11 @@ gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, struct heap_ static bool gc_compact_all_compacted_p(rb_objspace_t *objspace) { - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; - if (size_pool->total_pages > 0 && - !gc_compact_heap_cursors_met_p(size_pool)) { + if (heap->total_pages > 0 && + !gc_compact_heap_cursors_met_p(heap)) { return false; } } @@ -5670,16 +5670,16 @@ gc_sweep_compact(rb_objspace_t *objspace) #endif while (!gc_compact_all_compacted_p(objspace)) { - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; - if (gc_compact_heap_cursors_met_p(size_pool)) { + if (gc_compact_heap_cursors_met_p(heap)) { continue; } - struct heap_page *start_page = size_pool->compact_cursor; + struct heap_page *start_page = heap->compact_cursor; - if (!gc_compact_page(objspace, size_pool, start_page)) { + if (!gc_compact_page(objspace, heap, start_page)) { lock_page_body(objspace, start_page->body); continue; @@ -5688,7 +5688,7 @@ gc_sweep_compact(rb_objspace_t *objspace) // If we get here, we've finished moving all objects on the compact_cursor page // So we can lock it and move the cursor on to the next one. lock_page_body(objspace, start_page->body); - size_pool->compact_cursor = ccan_list_prev(&size_pool->pages, size_pool->compact_cursor, page_node); + heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node); } } @@ -5704,8 +5704,8 @@ gc_marks_rest(rb_objspace_t *objspace) { gc_report(1, objspace, "gc_marks_rest\n"); - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - (&size_pools[i])->pooled_pages = NULL; + for (int i = 0; i < HEAP_COUNT; i++) { + (&heaps[i])->pooled_pages = NULL; } if (is_incremental_marking(objspace)) { @@ -5734,14 +5734,14 @@ gc_marks_step(rb_objspace_t *objspace, size_t slots) } static bool -gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap) { GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD); bool marking_finished = true; gc_marking_enter(objspace); - if (size_pool->free_pages) { + if (heap->free_pages) { gc_report(2, objspace, "gc_marks_continue: has pooled pages"); marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots); @@ -5749,7 +5749,7 @@ gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool) else { gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n", mark_stack_size(&objspace->mark_stack)); - size_pool->force_incremental_marking_finish_count++; + heap->force_incremental_marking_finish_count++; gc_marks_rest(objspace); } @@ -5783,15 +5783,15 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.last_major_gc = objspace->profile.count; objspace->marked_slots = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - rgengc_mark_and_rememberset_clear(objspace, size_pool); - heap_move_pooled_pages_to_free_pages(size_pool); + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + rgengc_mark_and_rememberset_clear(objspace, heap); + heap_move_pooled_pages_to_free_pages(heap); if (objspace->flags.during_compacting) { struct heap_page *page = NULL; - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { page->pinned_slots = 0; } } @@ -5803,8 +5803,8 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */ objspace->profile.minor_gc_count++; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rgengc_rememberset_mark(objspace, &size_pools[i]); + for (int i = 0; i < HEAP_COUNT; i++) { + rgengc_rememberset_mark(objspace, &heaps[i]); } } @@ -5946,7 +5946,7 @@ rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitse } static void -rgengc_rememberset_mark(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap) { size_t j; struct heap_page *page = 0; @@ -5955,7 +5955,7 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_size_pool_t *size_pool) #endif gc_report(1, objspace, "rgengc_rememberset_mark: start\n"); - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) { uintptr_t p = page->start; bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT]; @@ -5998,11 +5998,11 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_size_pool_t *size_pool) } static void -rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap) { struct heap_page *page = 0; - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); @@ -6239,12 +6239,12 @@ rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache) } static void -heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool) +heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap) { - if (!size_pool->free_pages) { - if (!heap_page_allocate_and_initialize(objspace, size_pool)) { + if (!heap->free_pages) { + if (!heap_page_allocate_and_initialize(objspace, heap)) { objspace->heap_pages.allocatable_slots = 1; - heap_page_allocate_and_initialize(objspace, size_pool); + heap_page_allocate_and_initialize(objspace, heap); } } } @@ -6253,9 +6253,9 @@ static int ready_to_gc(rb_objspace_t *objspace) { if (dont_gc_val() || during_gc || ruby_disable_gc) { - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - heap_ready_to_gc(objspace, size_pool); + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + heap_ready_to_gc(objspace, heap); } return FALSE; } @@ -7014,8 +7014,8 @@ gc_move(rb_objspace_t *objspace, VALUE src, VALUE dest, size_t src_slot_size, si RMOVED(src)->destination = dest; GC_ASSERT(BUILTIN_TYPE(dest) != T_NONE); - GET_HEAP_PAGE(src)->size_pool->total_freed_objects++; - GET_HEAP_PAGE(dest)->size_pool->total_allocated_objects++; + GET_HEAP_PAGE(src)->heap->total_freed_objects++; + GET_HEAP_PAGE(dest)->heap->total_allocated_objects++; return src; } @@ -7048,16 +7048,16 @@ compare_free_slots(const void *left, const void *right, void *dummy) static void gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func) { - for (int j = 0; j < SIZE_POOL_COUNT; j++) { - rb_size_pool_t *size_pool = &size_pools[j]; + for (int j = 0; j < HEAP_COUNT; j++) { + rb_heap_t *heap = &heaps[j]; - size_t total_pages = size_pool->total_pages; + size_t total_pages = heap->total_pages; size_t size = rb_size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError); struct heap_page *page = 0, **page_list = malloc(size); size_t i = 0; - size_pool->free_pages = NULL; - ccan_list_for_each(&size_pool->pages, page, page_node) { + heap->free_pages = NULL; + ccan_list_for_each(&heap->pages, page, page_node) { page_list[i++] = page; GC_ASSERT(page); } @@ -7069,12 +7069,12 @@ gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func co ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_func, NULL); /* Reset the eden heap */ - ccan_list_head_init(&size_pool->pages); + ccan_list_head_init(&heap->pages); for (i = 0; i < total_pages; i++) { - ccan_list_add(&size_pool->pages, &page_list[i]->page_node); + ccan_list_add(&heap->pages, &page_list[i]->page_node); if (page_list[i]->free_slots != 0) { - heap_add_freepage(size_pool, page_list[i]); + heap_add_freepage(heap, page_list[i]); } } @@ -7135,16 +7135,16 @@ gc_update_references(rb_objspace_t *objspace) struct heap_page *page = NULL; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { + for (int i = 0; i < HEAP_COUNT; i++) { bool should_set_mark_bits = TRUE; - rb_size_pool_t *size_pool = &size_pools[i]; + rb_heap_t *heap = &heaps[i]; - ccan_list_for_each(&size_pool->pages, page, page_node) { + ccan_list_for_each(&heap->pages, page, page_node) { uintptr_t start = (uintptr_t)page->start; - uintptr_t end = start + (page->total_slots * size_pool->slot_size); + uintptr_t end = start + (page->total_slots * heap->slot_size); - gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page); - if (page == size_pool->sweeping_page) { + gc_ref_update((void *)start, (void *)end, heap->slot_size, objspace, page); + if (page == heap->sweeping_page) { should_set_mark_bits = FALSE; } if (should_set_mark_bits) { @@ -7622,7 +7622,7 @@ setup_gc_stat_heap_symbols(void) } static size_t -stat_one_heap(rb_size_pool_t *size_pool, VALUE hash, VALUE key) +stat_one_heap(rb_heap_t *heap, VALUE hash, VALUE key) { #define SET(name, attr) \ if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \ @@ -7630,14 +7630,14 @@ stat_one_heap(rb_size_pool_t *size_pool, VALUE hash, VALUE key) else if (hash != Qnil) \ rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr)); - SET(slot_size, size_pool->slot_size); - SET(heap_eden_pages, size_pool->total_pages); - SET(heap_eden_slots, size_pool->total_slots); - SET(total_allocated_pages, size_pool->total_allocated_pages); - SET(force_major_gc_count, size_pool->force_major_gc_count); - SET(force_incremental_marking_finish_count, size_pool->force_incremental_marking_finish_count); - SET(total_allocated_objects, size_pool->total_allocated_objects); - SET(total_freed_objects, size_pool->total_freed_objects); + SET(slot_size, heap->slot_size); + SET(heap_eden_pages, heap->total_pages); + SET(heap_eden_slots, heap->total_slots); + SET(total_allocated_pages, heap->total_allocated_pages); + SET(force_major_gc_count, heap->force_major_gc_count); + SET(force_incremental_marking_finish_count, heap->force_incremental_marking_finish_count); + SET(total_allocated_objects, heap->total_allocated_objects); + SET(total_freed_objects, heap->total_freed_objects); #undef SET if (!NIL_P(key)) { /* matched key should return above */ @@ -7659,28 +7659,28 @@ rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym) rb_raise(rb_eTypeError, "non-hash given"); } - for (int i = 0; i < SIZE_POOL_COUNT; i++) { + for (int i = 0; i < HEAP_COUNT; i++) { VALUE hash = rb_hash_aref(hash_or_sym, INT2FIX(i)); if (NIL_P(hash)) { hash = rb_hash_new(); rb_hash_aset(hash_or_sym, INT2FIX(i), hash); } - stat_one_heap(&size_pools[i], hash, Qnil); + stat_one_heap(&heaps[i], hash, Qnil); } } else if (FIXNUM_P(heap_name)) { - int size_pool_idx = FIX2INT(heap_name); + int heap_idx = FIX2INT(heap_name); - if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) { + if (heap_idx < 0 || heap_idx >= HEAP_COUNT) { rb_raise(rb_eArgError, "size pool index out of range"); } if (SYMBOL_P(hash_or_sym)) { - return stat_one_heap(&size_pools[size_pool_idx], Qnil, hash_or_sym); + return stat_one_heap(&heaps[heap_idx], Qnil, hash_or_sym); } else if (RB_TYPE_P(hash_or_sym, T_HASH)) { - return stat_one_heap(&size_pools[size_pool_idx], hash_or_sym, Qnil); + return stat_one_heap(&heaps[heap_idx], hash_or_sym, Qnil); } else { rb_raise(rb_eTypeError, "non-hash or symbol given"); @@ -7903,11 +7903,11 @@ rb_gc_impl_set_params(void *objspace_ptr) /* ok */ } - for (int i = 0; i < SIZE_POOL_COUNT; i++) { + for (int i = 0; i < HEAP_COUNT; i++) { char env_key[sizeof("RUBY_GC_HEAP_" "_INIT_SLOTS") + DECIMAL_SIZE_OF_BITS(sizeof(int) * CHAR_BIT)]; snprintf(env_key, sizeof(env_key), "RUBY_GC_HEAP_%d_INIT_SLOTS", i); - get_envparam_size(env_key, &gc_params.size_pool_init_slots[i], 0); + get_envparam_size(env_key, &gc_params.heap_init_slots[i], 0); } get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE); @@ -9092,7 +9092,7 @@ gc_compact(VALUE self) #if GC_CAN_COMPILE_COMPACTION struct desired_compaction_pages_i_data { rb_objspace_t *objspace; - size_t required_slots[SIZE_POOL_COUNT]; + size_t required_slots[HEAP_COUNT]; }; static int @@ -9101,15 +9101,15 @@ desired_compaction_pages_i(struct heap_page *page, void *data) struct desired_compaction_pages_i_data *tdata = data; rb_objspace_t *objspace = tdata->objspace; VALUE vstart = (VALUE)page->start; - VALUE vend = vstart + (VALUE)(page->total_slots * page->size_pool->slot_size); + VALUE vend = vstart + (VALUE)(page->total_slots * page->heap->slot_size); - for (VALUE v = vstart; v != vend; v += page->size_pool->slot_size) { + for (VALUE v = vstart; v != vend; v += page->heap->slot_size) { asan_unpoisoning_object(v) { /* skip T_NONEs; they won't be moved */ if (BUILTIN_TYPE(v) != T_NONE) { - rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, page->size_pool, v); - size_t dest_pool_idx = dest_pool - size_pools; + rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, page->heap, v); + size_t dest_pool_idx = dest_pool - heaps; tdata->required_slots[dest_pool_idx]++; } } @@ -9171,14 +9171,14 @@ gc_verify_compaction_references(int argc, VALUE* argv, VALUE self) /* Find out which pool has the most pages */ size_t max_existing_pages = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - max_existing_pages = MAX(max_existing_pages, size_pool->total_pages); + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + max_existing_pages = MAX(max_existing_pages, heap->total_pages); } /* Add pages to each size pool so that compaction is guaranteed to move every object */ - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; size_t pages_to_add = 0; /* @@ -9187,14 +9187,14 @@ gc_verify_compaction_references(int argc, VALUE* argv, VALUE self) * through all of the pools in `gc_sweep_compact` without hitting the "sweep & * compact cursors met" condition on some pools before fully compacting others */ - pages_to_add += max_existing_pages - size_pool->total_pages; + pages_to_add += max_existing_pages - heap->total_pages; /* * Step 2: Now add additional free pages to each size pool sufficient to hold all objects * that want to be in that size pool, whether moved into it or moved within it */ objspace->heap_pages.allocatable_slots = desired_compaction.required_slots[i]; while (objspace->heap_pages.allocatable_slots > 0) { - heap_page_allocate_and_initialize(objspace, size_pool); + heap_page_allocate_and_initialize(objspace, heap); } /* * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects @@ -9203,7 +9203,7 @@ gc_verify_compaction_references(int argc, VALUE* argv, VALUE self) pages_to_add += 2; for (; pages_to_add > 0; pages_to_add--) { - heap_page_allocate_and_initialize_force(objspace, size_pool); + heap_page_allocate_and_initialize_force(objspace, heap); } } } @@ -9245,10 +9245,10 @@ rb_gc_impl_objspace_free(void *objspace_ptr) heap_pages_lomem = 0; heap_pages_himem = 0; - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; - size_pool->total_pages = 0; - size_pool->total_slots = 0; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; + heap->total_pages = 0; + heap->total_slots = 0; } st_free_table(objspace->id_to_obj_tbl); @@ -9316,12 +9316,12 @@ rb_gc_impl_objspace_init(void *objspace_ptr) rb_bug("Could not preregister postponed job for GC"); } - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - rb_size_pool_t *size_pool = &size_pools[i]; + for (int i = 0; i < HEAP_COUNT; i++) { + rb_heap_t *heap = &heaps[i]; - size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE; + heap->slot_size = (1 << i) * BASE_SLOT_SIZE; - ccan_list_head_init(&size_pool->pages); + ccan_list_head_init(&heap->pages); } rb_darray_make(&objspace->heap_pages.sorted, 0); @@ -9343,9 +9343,9 @@ rb_gc_impl_objspace_init(void *objspace_ptr) objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min; #endif /* Set size pools allocatable pages. */ - for (int i = 0; i < SIZE_POOL_COUNT; i++) { - /* Set the default value of size_pool_init_slots. */ - gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS; + for (int i = 0; i < HEAP_COUNT; i++) { + /* Set the default value of heap_init_slots. */ + gc_params.heap_init_slots[i] = GC_HEAP_INIT_SLOTS; } init_mark_stack(&objspace->mark_stack); @@ -9364,8 +9364,8 @@ rb_gc_impl_init(void) rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT)); rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE)); rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE)); - rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT)); - rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1))); + rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_COUNT")), LONG2FIX(HEAP_COUNT)); + rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(heap_slot_size(HEAP_COUNT - 1))); rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), LONG2FIX(RVALUE_OLD_AGE)); if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) { rb_hash_aset(gc_constants, ID2SYM(rb_intern("RB_BUG_INSTEAD_OF_RB_MEMERROR")), Qtrue); diff --git a/gc/gc.h b/gc/gc.h index 9116822e812226..b4da013c1a8e52 100644 --- a/gc/gc.h +++ b/gc/gc.h @@ -40,7 +40,7 @@ const char *rb_obj_info(VALUE obj); bool rb_gc_shutdown_call_finalizer_p(VALUE obj); uint32_t rb_gc_get_shape(VALUE obj); void rb_gc_set_shape(VALUE obj, uint32_t shape_id); -uint32_t rb_gc_rebuild_shape(VALUE obj, size_t size_pool_id); +uint32_t rb_gc_rebuild_shape(VALUE obj, size_t heap_id); size_t rb_obj_memsize_of(VALUE obj); RUBY_SYMBOL_EXPORT_END diff --git a/gc/gc_impl.h b/gc/gc_impl.h index 925c336f95689f..045fce6c662ef8 100644 --- a/gc/gc_impl.h +++ b/gc/gc_impl.h @@ -32,7 +32,7 @@ GC_IMPL_FN void *rb_gc_impl_ractor_cache_alloc(void *objspace_ptr); GC_IMPL_FN void rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache); GC_IMPL_FN void rb_gc_impl_set_params(void *objspace_ptr); GC_IMPL_FN void rb_gc_impl_init(void); -GC_IMPL_FN size_t *rb_gc_impl_size_pool_sizes(void *objspace_ptr); +GC_IMPL_FN size_t *rb_gc_impl_heap_sizes(void *objspace_ptr); // Shutdown GC_IMPL_FN void rb_gc_impl_shutdown_free_objects(void *objspace_ptr); // GC @@ -49,7 +49,7 @@ GC_IMPL_FN VALUE rb_gc_impl_config_set(void *objspace_ptr, VALUE hash); // Object allocation GC_IMPL_FN VALUE rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size); GC_IMPL_FN size_t rb_gc_impl_obj_slot_size(VALUE obj); -GC_IMPL_FN size_t rb_gc_impl_size_pool_id_for_size(void *objspace_ptr, size_t size); +GC_IMPL_FN size_t rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size); GC_IMPL_FN bool rb_gc_impl_size_allocatable_p(size_t size); // Malloc GC_IMPL_FN void *rb_gc_impl_malloc(void *objspace_ptr, size_t size); diff --git a/internal/class.h b/internal/class.h index 8a6c95623317b3..f94434b9389165 100644 --- a/internal/class.h +++ b/internal/class.h @@ -83,7 +83,7 @@ struct RClass { struct rb_id_table *m_tbl; }; -// Assert that classes can be embedded in size_pools[2] (which has 160B slot size) +// Assert that classes can be embedded in heaps[2] (which has 160B slot size) STATIC_ASSERT(sizeof_rb_classext_t, sizeof(struct RClass) + sizeof(rb_classext_t) <= 4 * RVALUE_SIZE); struct RClass_and_rb_classext_t { diff --git a/internal/gc.h b/internal/gc.h index 81ca1a51d8b6f7..0f48916ee89e41 100644 --- a/internal/gc.h +++ b/internal/gc.h @@ -201,8 +201,8 @@ void *rb_gc_ractor_cache_alloc(void); void rb_gc_ractor_cache_free(void *cache); bool rb_gc_size_allocatable_p(size_t size); -size_t *rb_gc_size_pool_sizes(void); -size_t rb_gc_size_pool_id_for_size(size_t size); +size_t *rb_gc_heap_sizes(void); +size_t rb_gc_heap_id_for_size(size_t size); void rb_gc_mark_and_move(VALUE *ptr); diff --git a/misc/lldb_cruby.py b/misc/lldb_cruby.py index 400ccb45b9aaf2..0707bb42d3027d 100755 --- a/misc/lldb_cruby.py +++ b/misc/lldb_cruby.py @@ -547,7 +547,7 @@ def __init__(self, page, target): self.target = target self.start = page.GetChildMemberWithName('start').GetValueAsUnsigned(); self.num_slots = page.GetChildMemberWithName('total_slots').unsigned - self.slot_size = page.GetChildMemberWithName('size_pool').GetChildMemberWithName('slot_size').unsigned + self.slot_size = page.GetChildMemberWithName('heap').GetChildMemberWithName('slot_size').unsigned self.counter = 0 self.tRBasic = target.FindFirstType("struct RBasic") self.tRValue = target.FindFirstType("struct RVALUE") diff --git a/object.c b/object.c index ae6ec6ea545086..1bd476b0228a0f 100644 --- a/object.c +++ b/object.c @@ -135,7 +135,7 @@ rb_class_allocate_instance(VALUE klass) RUBY_ASSERT(rb_shape_get_shape(obj)->type == SHAPE_ROOT); // Set the shape to the specific T_OBJECT shape. - ROBJECT_SET_SHAPE_ID(obj, (shape_id_t)(rb_gc_size_pool_id_for_size(size) + FIRST_T_OBJECT_SHAPE_ID)); + ROBJECT_SET_SHAPE_ID(obj, (shape_id_t)(rb_gc_heap_id_for_size(size) + FIRST_T_OBJECT_SHAPE_ID)); #if RUBY_DEBUG RUBY_ASSERT(!rb_shape_obj_too_complex(obj)); @@ -358,7 +358,7 @@ rb_obj_copy_ivar(VALUE dest, VALUE obj) rb_shape_t * initial_shape = rb_shape_get_shape(dest); - if (initial_shape->size_pool_index != src_shape->size_pool_index) { + if (initial_shape->heap_index != src_shape->heap_index) { RUBY_ASSERT(initial_shape->type == SHAPE_T_OBJECT); shape_to_set_on_dest = rb_shape_rebuild_shape(initial_shape, src_shape); diff --git a/rjit_c.rb b/rjit_c.rb index d2142e99633e4c..422b7f8e3de912 100644 --- a/rjit_c.rb +++ b/rjit_c.rb @@ -1446,7 +1446,7 @@ def C.rb_shape next_iv_index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF((*((struct rb_shape *)NULL)), next_iv_index)")], capacity: [CType::Immediate.parse("uint32_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_shape *)NULL)), capacity)")], type: [CType::Immediate.parse("uint8_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_shape *)NULL)), type)")], - size_pool_index: [CType::Immediate.parse("uint8_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_shape *)NULL)), size_pool_index)")], + heap_index: [CType::Immediate.parse("uint8_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_shape *)NULL)), heap_index)")], parent_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct rb_shape *)NULL)), parent_id)")], ancestor_index: [CType::Pointer.new { self.redblack_node_t }, Primitive.cexpr!("OFFSETOF((*((struct rb_shape *)NULL)), ancestor_index)")], ) diff --git a/shape.c b/shape.c index cfb6bae969d24e..b0e886692814ee 100644 --- a/shape.c +++ b/shape.c @@ -418,7 +418,7 @@ rb_shape_alloc(ID edge_name, rb_shape_t * parent, enum shape_type type) { rb_shape_t * shape = rb_shape_alloc_with_parent_id(edge_name, rb_shape_id(parent)); shape->type = (uint8_t)type; - shape->size_pool_index = parent->size_pool_index; + shape->heap_index = parent->heap_index; shape->capacity = parent->capacity; shape->edges = 0; return shape; @@ -1059,7 +1059,7 @@ rb_shape_t_to_rb_cShape(rb_shape_t *shape) INT2NUM(shape->parent_id), rb_shape_edge_name(shape), INT2NUM(shape->next_iv_index), - INT2NUM(shape->size_pool_index), + INT2NUM(shape->heap_index), INT2NUM(shape->type), INT2NUM(shape->capacity)); rb_obj_freeze(obj); @@ -1266,7 +1266,7 @@ Init_default_shapes(void) rb_shape_t *root = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID); root->capacity = 0; root->type = SHAPE_ROOT; - root->size_pool_index = 0; + root->heap_index = 0; GET_SHAPE_TREE()->root_shape = root; RUBY_ASSERT(rb_shape_id(GET_SHAPE_TREE()->root_shape) == ROOT_SHAPE_ID); @@ -1282,16 +1282,16 @@ Init_default_shapes(void) rb_shape_t *too_complex_shape = rb_shape_alloc_with_parent_id(0, ROOT_SHAPE_ID); too_complex_shape->type = SHAPE_OBJ_TOO_COMPLEX; - too_complex_shape->size_pool_index = 0; + too_complex_shape->heap_index = 0; RUBY_ASSERT(OBJ_TOO_COMPLEX_SHAPE_ID == (GET_SHAPE_TREE()->next_shape_id - 1)); RUBY_ASSERT(rb_shape_id(too_complex_shape) == OBJ_TOO_COMPLEX_SHAPE_ID); // Make shapes for T_OBJECT - size_t *sizes = rb_gc_size_pool_sizes(); + size_t *sizes = rb_gc_heap_sizes(); for (int i = 0; sizes[i] > 0; i++) { rb_shape_t *t_object_shape = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID); t_object_shape->type = SHAPE_T_OBJECT; - t_object_shape->size_pool_index = i; + t_object_shape->heap_index = i; t_object_shape->capacity = (uint32_t)((sizes[i] - offsetof(struct RObject, as.ary)) / sizeof(VALUE)); t_object_shape->edges = rb_id_table_create(0); t_object_shape->ancestor_index = LEAF; @@ -1308,7 +1308,7 @@ Init_shape(void) "parent_id", "edge_name", "next_iv_index", - "size_pool_index", + "heap_index", "type", "capacity", NULL); diff --git a/shape.h b/shape.h index d02613d714467a..3fdbc34a397efa 100644 --- a/shape.h +++ b/shape.h @@ -47,7 +47,7 @@ struct rb_shape { attr_index_t next_iv_index; uint32_t capacity; // Total capacity of the object with this shape uint8_t type; - uint8_t size_pool_index; + uint8_t heap_index; shape_id_t parent_id; redblack_node_t * ancestor_index; }; diff --git a/test/ruby/test_gc.rb b/test/ruby/test_gc.rb index 9a9796dc55ecfa..31ad71a1a18a1a 100644 --- a/test/ruby/test_gc.rb +++ b/test/ruby/test_gc.rb @@ -226,7 +226,7 @@ def test_stat_heap GC.stat_heap(0, stat_heap) GC.stat(stat) - GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT].times do |i| + GC::INTERNAL_CONSTANTS[:HEAP_COUNT].times do |i| EnvUtil.without_gc do GC.stat_heap(i, stat_heap) GC.stat(stat) @@ -248,7 +248,7 @@ def test_stat_heap assert_equal stat_heap[:slot_size], GC.stat_heap(0)[:slot_size] assert_raise(ArgumentError) { GC.stat_heap(-1) } - assert_raise(ArgumentError) { GC.stat_heap(GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT]) } + assert_raise(ArgumentError) { GC.stat_heap(GC::INTERNAL_CONSTANTS[:HEAP_COUNT]) } end def test_stat_heap_all @@ -259,7 +259,7 @@ def test_stat_heap_all GC.stat_heap(0, stat_heap) GC.stat_heap(nil, stat_heap_all) - GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT].times do |i| + GC::INTERNAL_CONSTANTS[:HEAP_COUNT].times do |i| GC.stat_heap(nil, stat_heap_all) GC.stat_heap(i, stat_heap) @@ -538,7 +538,7 @@ def test_gc_parameter_init_slots gc_count = GC.stat(:count) # Fill up all of the size pools to the init slots - GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT].times do |i| + GC::INTERNAL_CONSTANTS[:HEAP_COUNT].times do |i| capa = (GC.stat_heap(i, :slot_size) - GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD] - (2 * RbConfig::SIZEOF["void*"])) / RbConfig::SIZEOF["void*"] while GC.stat_heap(i, :heap_eden_slots) < GC_HEAP_INIT_SLOTS Array.new(capa) @@ -558,7 +558,7 @@ def test_gc_parameter_init_slots gc_count = GC.stat(:count) # Fill up all of the size pools to the init slots - GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT].times do |i| + GC::INTERNAL_CONSTANTS[:HEAP_COUNT].times do |i| capa = (GC.stat_heap(i, :slot_size) - GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD] - (2 * RbConfig::SIZEOF["void*"])) / RbConfig::SIZEOF["void*"] while GC.stat_heap(i, :heap_eden_slots) < SIZES[i] Array.new(capa) diff --git a/test/ruby/test_gc_compact.rb b/test/ruby/test_gc_compact.rb index c331968b3d5c13..26d7c71687fa96 100644 --- a/test/ruby/test_gc_compact.rb +++ b/test/ruby/test_gc_compact.rb @@ -283,7 +283,7 @@ def test_updating_references_for_embed_frozen_shared_arrays end; end - def test_moving_arrays_down_size_pools + def test_moving_arrays_down_heaps omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 assert_separately(%w[-robjspace], "#{<<~"begin;"}\n#{<<~"end;"}", timeout: 10) @@ -305,7 +305,7 @@ def test_moving_arrays_down_size_pools end; end - def test_moving_arrays_up_size_pools + def test_moving_arrays_up_heaps omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 assert_separately(%w[-robjspace], "#{<<~"begin;"}\n#{<<~"end;"}", timeout: 10) @@ -329,7 +329,7 @@ def test_moving_arrays_up_size_pools end; end - def test_moving_objects_between_size_pools + def test_moving_objects_between_heaps omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 assert_separately(%w[-robjspace], "#{<<~"begin;"}\n#{<<~"end;"}", timeout: 60) @@ -361,7 +361,7 @@ def add_ivars end; end - def test_moving_strings_up_size_pools + def test_moving_strings_up_heaps omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 assert_separately(%w[-robjspace], "#{<<~"begin;"}\n#{<<~"end;"}", timeout: 30) @@ -382,7 +382,7 @@ def test_moving_strings_up_size_pools end; end - def test_moving_strings_down_size_pools + def test_moving_strings_down_heaps omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 assert_separately(%w[-robjspace], "#{<<~"begin;"}\n#{<<~"end;"}", timeout: 30) @@ -402,7 +402,7 @@ def test_moving_strings_down_size_pools end; end - def test_moving_hashes_down_size_pools + def test_moving_hashes_down_heaps omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 # AR and ST hashes are in the same size pool on 32 bit omit unless RbConfig::SIZEOF["uint64_t"] <= RbConfig::SIZEOF["void*"] @@ -425,7 +425,7 @@ def test_moving_hashes_down_size_pools end; end - def test_moving_objects_between_size_pools_keeps_shape_frozen_status + def test_moving_objects_between_heaps_keeps_shape_frozen_status # [Bug #19536] assert_separately([], "#{<<~"begin;"}\n#{<<~"end;"}") begin; diff --git a/test/ruby/test_string.rb b/test/ruby/test_string.rb index 8658097ae409ec..ba8d86d442d225 100644 --- a/test/ruby/test_string.rb +++ b/test/ruby/test_string.rb @@ -662,8 +662,8 @@ def test_concat_literals assert_equal(Encoding::UTF_8, "#{s}x".encoding) end - def test_string_interpolations_across_size_pools_get_embedded - omit if GC::INTERNAL_CONSTANTS[:SIZE_POOL_COUNT] == 1 + def test_string_interpolations_across_heaps_get_embedded + omit if GC::INTERNAL_CONSTANTS[:HEAP_COUNT] == 1 require 'objspace' base_slot_size = GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE] diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index 03cb053fd323d9..ea4c14e512628b 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -648,7 +648,7 @@ pub struct rb_shape { pub next_iv_index: attr_index_t, pub capacity: u32, pub type_: u8, - pub size_pool_index: u8, + pub heap_index: u8, pub parent_id: shape_id_t, pub ancestor_index: *mut redblack_node_t, }