From 4cb6d7558c78f7b04d31fd482eef0ff16d083baf Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Tue, 31 May 2016 17:46:36 -0700 Subject: [PATCH 1/7] start dividing the heaps --- class.c | 2 +- gc.c | 96 ++++++++++++++++++++++++++++++++++----------- include/ruby/ruby.h | 4 +- 3 files changed, 78 insertions(+), 24 deletions(-) diff --git a/class.c b/class.c index ce2ede5c2b3fca..4c0d908fa8ede1 100644 --- a/class.c +++ b/class.c @@ -162,7 +162,7 @@ rb_class_detach_module_subclasses(VALUE klass) static VALUE class_alloc(VALUE flags, VALUE klass) { - NEWOBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0)); + NEW_OLDISH_OBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0)); obj->ptr = ZALLOC(rb_classext_t); /* ZALLOC RCLASS_IV_TBL(obj) = 0; diff --git a/gc.c b/gc.c index 2eb049184cae81..f6a158332987c3 100644 --- a/gc.c +++ b/gc.c @@ -527,6 +527,7 @@ typedef struct rb_objspace { size_t total_allocated_objects; rb_heap_t eden_heap; + rb_heap_t oldish_heap; rb_heap_t tomb_heap; /* heap for zombies and ghosts */ struct { @@ -732,6 +733,7 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress; #define heap_pages_final_slots objspace->heap_pages.final_slots #define heap_pages_deferred_final objspace->heap_pages.deferred_final #define heap_eden (&objspace->eden_heap) +#define heap_oldish (&objspace->oldish_heap) #define heap_tomb (&objspace->tomb_heap) #define dont_gc objspace->flags.dont_gc #define during_gc objspace->flags.during_gc @@ -1320,7 +1322,7 @@ static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page); void rb_objspace_free(rb_objspace_t *objspace) { - if (is_lazy_sweeping(heap_eden)) + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) rb_bug("lazy sweeping underway when freeing object space"); if (objspace->profile.records) { @@ -1349,6 +1351,9 @@ rb_objspace_free(rb_objspace_t *objspace) objspace->eden_heap.total_pages = 0; objspace->eden_heap.total_slots = 0; objspace->eden_heap.pages = NULL; + objspace->oldish_heap.total_pages = 0; + objspace->oldish_heap.total_slots = 0; + objspace->oldish_heap.pages = NULL; } free_stack_chunks(&objspace->mark_stack); #if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE) @@ -1362,6 +1367,7 @@ heap_pages_expand_sorted(rb_objspace_t *objspace) { size_t next_length = heap_allocatable_pages; next_length += heap_eden->total_pages; + next_length += heap_oldish->total_pages; next_length += heap_tomb->total_pages; if (next_length > heap_pages_sorted_length) { @@ -1647,7 +1653,7 @@ heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots static void heap_set_increment(rb_objspace_t *objspace, size_t additional_pages) { - size_t used = heap_eden->total_pages; + size_t used = heap_eden->total_pages + heap_oldish->total_pages; size_t next_used_limit = used + additional_pages; if (next_used_limit == heap_allocated_pages) next_used_limit++; @@ -1935,6 +1941,12 @@ rb_newobj(void) return newobj_of(0, T_NONE, 0, 0, 0, FALSE); } +VALUE +rb_new_oldish_obj_of(VALUE klass, VALUE flags) +{ + return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED); +} + VALUE rb_newobj_of(VALUE klass, VALUE flags) { @@ -2315,6 +2327,7 @@ Init_heap(void) #endif heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); + heap_add_pages(objspace, heap_oldish, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); init_mark_stack(&objspace->mark_stack); #ifdef USE_SIGALTSTACK @@ -2936,7 +2949,7 @@ heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr) static inline int is_swept_object(rb_objspace_t *objspace, VALUE ptr) { - if (heap_is_swept_object(objspace, heap_eden, ptr)) { + if (heap_is_swept_object(objspace, heap_eden, ptr) || heap_is_swept_object(objspace, heap_oldish, ptr)) { return TRUE; } else { @@ -2949,6 +2962,7 @@ static inline int is_garbage_object(rb_objspace_t *objspace, VALUE ptr) { if (!is_lazy_sweeping(heap_eden) || + !is_lazy_sweeping(heap_oldish) || is_swept_object(objspace, ptr) || MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) { @@ -3386,7 +3400,7 @@ count_objects(int argc, VALUE *argv, VALUE os) static size_t objspace_available_slots(rb_objspace_t *objspace) { - return heap_eden->total_slots + heap_tomb->total_slots; + return heap_eden->total_slots + heap_tomb->total_slots + heap_oldish->total_slots; } static size_t @@ -3573,6 +3587,7 @@ gc_sweep_start(rb_objspace_t *objspace) { gc_mode_transition(objspace, gc_mode_sweeping); gc_sweep_start_heap(objspace, heap_eden); + gc_sweep_start_heap(objspace, heap_oldish); } static void @@ -3670,6 +3685,12 @@ gc_sweep_rest(rb_objspace_t *objspace) while (has_sweeping_pages(heap)) { gc_sweep_step(objspace, heap); } + + heap = heap_oldish; + + while (has_sweeping_pages(heap)) { + gc_sweep_step(objspace, heap); + } } #if GC_ENABLE_LAZY_SWEEP @@ -3689,6 +3710,17 @@ gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap) } #endif +static void +gc_mark_before_sweep(rb_heap_t *heap) +{ + struct heap_page *page; + page = heap->sweep_pages; + while (page) { + page->flags.before_sweep = TRUE; + page = page->next; + } +} + static void gc_sweep(rb_objspace_t *objspace) { @@ -3707,17 +3739,15 @@ gc_sweep(rb_objspace_t *objspace) #endif } else { - struct heap_page *page; gc_sweep_start(objspace); - page = heap_eden->sweep_pages; - while (page) { - page->flags.before_sweep = TRUE; - page = page->next; - } + gc_mark_before_sweep(heap_eden); + gc_mark_before_sweep(heap_oldish); gc_sweep_step(objspace, heap_eden); + gc_sweep_step(objspace, heap_oldish); } gc_heap_prepare_minimum_pages(objspace, heap_eden); + gc_heap_prepare_minimum_pages(objspace, heap_oldish); } /* Marking - Marking stack */ @@ -5183,6 +5213,7 @@ gc_verify_heap_pages(rb_objspace_t *objspace) { int rememberd_old_objects = 0; rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_eden->pages); + rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_oldish->pages); rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_tomb->pages); return rememberd_old_objects; } @@ -5227,7 +5258,7 @@ gc_verify_internal_consistency(VALUE dummy) /* check counters */ - if (!is_lazy_sweeping(heap_eden) && !finalizing) { + if ((!is_lazy_sweeping(heap_eden) || !is_lazy_sweeping(heap_oldish)) && !finalizing) { if (objspace_live_slots(objspace) != data.live_object_count) { fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n", (int)heap_pages_final_slots, (int)objspace->profile.total_freed_objects); @@ -5305,6 +5336,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.last_major_gc = objspace->profile.count; objspace->marked_slots = 0; rgengc_mark_and_rememberset_clear(objspace, heap_eden); + rgengc_mark_and_rememberset_clear(objspace, heap_oldish); } else { objspace->flags.during_minor_gc = TRUE; @@ -5312,6 +5344,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */ objspace->profile.minor_gc_count++; rgengc_rememberset_mark(objspace, heap_eden); + rgengc_rememberset_mark(objspace, heap_oldish); } #endif @@ -5322,9 +5355,9 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) #if GC_ENABLE_INCREMENTAL_MARK static void -gc_marks_wb_unprotected_objects(rb_objspace_t *objspace) +gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap) { - struct heap_page *page = heap_eden->pages; + struct heap_page *page = heap->pages; while (page) { bits_t *mark_bits = page->mark_bits; @@ -5387,6 +5420,12 @@ gc_marks_finish(rb_objspace_t *objspace) return FALSE; /* continue marking phase */ } + if (heap_oldish->pooled_pages) { + heap_move_pooled_pages_to_free_pages(heap_oldish); + gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n"); + return FALSE; /* continue marking phase */ + } + if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) { rb_bug("gc_marks_finish: mark stack is not empty (%d).", (int)mark_stack_size(&objspace->mark_stack)); } @@ -5406,7 +5445,8 @@ gc_marks_finish(rb_objspace_t *objspace) objspace->flags.during_incremental_marking = FALSE; /* check children of all marked wb-unprotected objects */ - gc_marks_wb_unprotected_objects(objspace); + gc_marks_wb_unprotected_objects(objspace, heap_eden); + gc_marks_wb_unprotected_objects(objspace, heap_oldish); } #endif /* GC_ENABLE_INCREMENTAL_MARK */ @@ -5430,14 +5470,15 @@ gc_marks_finish(rb_objspace_t *objspace) { /* decide full GC is needed or not */ rb_heap_t *heap = heap_eden; - size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap->total_slots; + rb_heap_t *oheap = heap_oldish; + size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap->total_slots + oheap->total_slots; size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */ size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio); size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio); int full_marking = is_full_marking(objspace); #if RGENGC_CHECK_MODE - assert(heap->total_slots >= objspace->marked_slots); + assert((heap->total_slots + oheap->total_slots) >= objspace->marked_slots); #endif /* setup free-able page counts */ @@ -5531,6 +5572,7 @@ gc_marks_rest(rb_objspace_t *objspace) #if GC_ENABLE_INCREMENTAL_MARK heap_eden->pooled_pages = NULL; + heap_oldish->pooled_pages = NULL; #endif if (is_incremental_marking(objspace)) { @@ -5634,7 +5676,7 @@ gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...) status = is_full_marking(objspace) ? "+" : "-"; } else { - if (is_lazy_sweeping(heap_eden)) { + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) { status = "S"; } if (is_incremental_marking(objspace)) { @@ -6214,6 +6256,7 @@ ready_to_gc(rb_objspace_t *objspace) { if (dont_gc || during_gc || ruby_disable_gc) { heap_ready_to_gc(objspace, heap_eden); + heap_ready_to_gc(objspace, heap_oldish); return FALSE; } else { @@ -6318,6 +6361,7 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, if (RGENGC_CHECK_MODE) { assert(gc_mode(objspace) == gc_mode_none); assert(!is_lazy_sweeping(heap_eden)); + assert(!is_lazy_sweeping(heap_oldish)); assert(!is_incremental_marking(objspace)); #if RGENGC_CHECK_MODE >= 2 gc_verify_internal_consistency(Qnil); @@ -6397,7 +6441,7 @@ static void gc_rest(rb_objspace_t *objspace) { int marking = is_incremental_marking(objspace); - int sweeping = is_lazy_sweeping(heap_eden); + int sweeping = is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish); if (marking || sweeping) { gc_enter(objspace, "gc_rest"); @@ -6409,7 +6453,7 @@ gc_rest(rb_objspace_t *objspace) gc_marks_rest(objspace); POP_MARK_FUNC_DATA(); } - if (is_lazy_sweeping(heap_eden)) { + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) { gc_sweep_rest(objspace); } gc_exit(objspace, "gc_rest"); @@ -6439,7 +6483,7 @@ gc_current_status_fill(rb_objspace_t *objspace, char *buff) } else if (is_sweeping(objspace)) { buff[i++] = 'S'; - if (is_lazy_sweeping(heap_eden)) buff[i++] = 'L'; + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) buff[i++] = 'L'; } else { buff[i++] = 'N'; @@ -6825,6 +6869,7 @@ enum gc_stat_sym { gc_stat_sym_heap_final_slots, gc_stat_sym_heap_marked_slots, gc_stat_sym_heap_eden_pages, + gc_stat_sym_heap_oldish_pages, gc_stat_sym_heap_tomb_pages, gc_stat_sym_total_allocated_pages, gc_stat_sym_total_freed_pages, @@ -6858,6 +6903,7 @@ enum gc_stat_sym { enum gc_stat_compat_sym { gc_stat_compat_sym_gc_stat_heap_used, gc_stat_compat_sym_heap_eden_page_length, + gc_stat_compat_sym_heap_oldish_page_length, gc_stat_compat_sym_heap_tomb_page_length, gc_stat_compat_sym_heap_increment, gc_stat_compat_sym_heap_length, @@ -6901,6 +6947,7 @@ setup_gc_stat_symbols(void) S(heap_final_slots); S(heap_marked_slots); S(heap_eden_pages); + S(heap_oldish_pages); S(heap_tomb_pages); S(total_allocated_pages); S(total_freed_pages); @@ -6932,6 +6979,7 @@ setup_gc_stat_symbols(void) #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s)) S(gc_stat_heap_used); S(heap_eden_page_length); + S(heap_oldish_page_length); S(heap_tomb_page_length); S(heap_increment); S(heap_length); @@ -6965,6 +7013,7 @@ setup_gc_stat_symbols(void) #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s] rb_hash_aset(table, OLD_SYM(gc_stat_heap_used), NEW_SYM(heap_allocated_pages)); rb_hash_aset(table, OLD_SYM(heap_eden_page_length), NEW_SYM(heap_eden_pages)); + rb_hash_aset(table, OLD_SYM(heap_oldish_page_length), NEW_SYM(heap_oldish_pages)); rb_hash_aset(table, OLD_SYM(heap_tomb_page_length), NEW_SYM(heap_tomb_pages)); rb_hash_aset(table, OLD_SYM(heap_increment), NEW_SYM(heap_allocatable_pages)); rb_hash_aset(table, OLD_SYM(heap_length), NEW_SYM(heap_sorted_length)); @@ -7073,6 +7122,7 @@ gc_stat_internal(VALUE hash_or_sym) SET(heap_final_slots, heap_pages_final_slots); SET(heap_marked_slots, objspace->marked_slots); SET(heap_eden_pages, heap_eden->total_pages); + SET(heap_oldish_pages, heap_oldish->total_pages); SET(heap_tomb_pages, heap_tomb->total_pages); SET(total_allocated_pages, objspace->profile.total_allocated_pages); SET(total_freed_pages, objspace->profile.total_freed_pages); @@ -7147,6 +7197,7 @@ gc_stat_internal(VALUE hash_or_sym) * :heap_final_slots=>0, * :heap_marked_slots=>0, * :heap_eden_pages=>24, + * :heap_oldish_pages=>24, * :heap_tomb_pages=>0, * :total_allocated_pages=>24, * :total_freed_pages=>0, @@ -7407,6 +7458,7 @@ gc_set_initial_pages(void) if (min_pages > heap_eden->total_pages) { heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages); } + heap_add_pages(objspace, heap_oldish, 10); } /* @@ -7725,7 +7777,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si if (type == MEMOP_TYPE_MALLOC) { retry: if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) { - if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) { + if (ruby_thread_has_gvl_p() && (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish))) { gc_rest(objspace); /* gc_rest can reduce malloc_increase */ goto retry; } @@ -9390,7 +9442,7 @@ rb_gcdebug_print_obj_condition(VALUE obj) fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false"); #endif - if (is_lazy_sweeping(heap_eden)) { + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) { fprintf(stderr, "lazy sweeping?: true\n"); fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet"); } diff --git a/include/ruby/ruby.h b/include/ruby/ruby.h index 634fe60c5125b3..46d25a9921bccf 100644 --- a/include/ruby/ruby.h +++ b/include/ruby/ruby.h @@ -737,9 +737,11 @@ VALUE rb_newobj(void); VALUE rb_newobj_of(VALUE, VALUE); VALUE rb_obj_setup(VALUE obj, VALUE klass, VALUE type); #define RB_NEWOBJ(obj,type) type *(obj) = (type*)rb_newobj() -#define RB_NEWOBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_newobj_of(klass, flags) +#define RB_NEWOBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_new_oldish_obj_of(klass, flags) +#define RB_NEW_OLDISH_OBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_newobj_of(klass, flags) #define NEWOBJ(obj,type) RB_NEWOBJ(obj,type) #define NEWOBJ_OF(obj,type,klass,flags) RB_NEWOBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ +#define NEW_OLDISH_OBJ_OF(obj,type,klass,flags) RB_NEW_OLDISH_OBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ #define OBJSETUP(obj,c,t) rb_obj_setup(obj, c, t) /* use NEWOBJ_OF instead of NEWOBJ()+OBJSETUP() */ #define CLONESETUP(clone,obj) rb_clone_setup(clone,obj) #define DUPSETUP(dup,obj) rb_dup_setup(dup,obj) From 97296f2ba7d56b928f7cfdc9db74cbf2629adb3d Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Thu, 16 Jun 2016 13:37:35 -0700 Subject: [PATCH 2/7] call the right function --- include/ruby/ruby.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/ruby/ruby.h b/include/ruby/ruby.h index 46d25a9921bccf..8e85808cb9be54 100644 --- a/include/ruby/ruby.h +++ b/include/ruby/ruby.h @@ -735,10 +735,11 @@ VALUE rb_int2big(SIGNED_VALUE); VALUE rb_newobj(void); VALUE rb_newobj_of(VALUE, VALUE); +VALUE rb_new_oldish_obj_of(VALUE, VALUE); VALUE rb_obj_setup(VALUE obj, VALUE klass, VALUE type); #define RB_NEWOBJ(obj,type) type *(obj) = (type*)rb_newobj() -#define RB_NEWOBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_new_oldish_obj_of(klass, flags) -#define RB_NEW_OLDISH_OBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_newobj_of(klass, flags) +#define RB_NEWOBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_newobj_of(klass, flags) +#define RB_NEW_OLDISH_OBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_new_oldish_obj_of(klass, flags) #define NEWOBJ(obj,type) RB_NEWOBJ(obj,type) #define NEWOBJ_OF(obj,type,klass,flags) RB_NEWOBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ #define NEW_OLDISH_OBJ_OF(obj,type,klass,flags) RB_NEW_OLDISH_OBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ From f21e36209228a496a3c34970ae5d421c7fef0eb3 Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Thu, 16 Jun 2016 15:40:44 -0700 Subject: [PATCH 3/7] allocate in a different heap --- gc.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/gc.c b/gc.c index f6a158332987c3..fd5025299c9e44 100644 --- a/gc.c +++ b/gc.c @@ -1852,7 +1852,7 @@ newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_prote } static inline VALUE -newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected) +newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected, rb_heap_t * eden) { VALUE obj; @@ -1870,31 +1870,30 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp } } - obj = heap_get_freeobj(objspace, heap_eden); + obj = heap_get_freeobj(objspace, eden); newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj); gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj); return obj; } -NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)); -NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)); +NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden)); +NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden)); static VALUE -newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace) +newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden) { - return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE); + return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE, eden); } static VALUE -newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace) +newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, rb_heap_t *eden) { - return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE); + return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE, eden); } static inline VALUE -newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) +newobj_of_with_eden(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, rb_objspace_t * objspace, rb_heap_t *eden) { - rb_objspace_t *objspace = &rb_objspace; VALUE obj; #if GC_DEBUG_STRESS_TO_CLASS @@ -1909,16 +1908,31 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protect if (!(during_gc || ruby_gc_stressful || gc_event_hook_available_p(objspace)) && - (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) { + (obj = heap_get_freeobj_head(objspace, eden)) != Qfalse) { return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj); } else { return wb_protected ? - newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) : - newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace); + newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace, eden) : + newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace, eden); } } +static inline VALUE +oldishobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) +{ + rb_objspace_t *objspace = &rb_objspace; + return newobj_of_with_eden(klass, flags, v1, v2, v3, wb_protected, objspace, heap_oldish); +} + +static inline VALUE +newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) +{ + rb_objspace_t *objspace = &rb_objspace; + return newobj_of_with_eden(klass, flags, v1, v2, v3, wb_protected, objspace, heap_eden); +} + + VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags) { @@ -1944,7 +1958,7 @@ rb_newobj(void) VALUE rb_new_oldish_obj_of(VALUE klass, VALUE flags) { - return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED); + return oldishobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED); } VALUE From 2e5f0c672dee9eb3d5cfc3b0dd95b20d86efe357 Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Mon, 11 Jul 2016 14:52:04 -0700 Subject: [PATCH 4/7] fix name --- class.c | 2 +- gc.c | 92 ++++++++++++++++++++++----------------------- include/ruby/ruby.h | 6 +-- 3 files changed, 50 insertions(+), 50 deletions(-) diff --git a/class.c b/class.c index 4c0d908fa8ede1..9c520a19d0291d 100644 --- a/class.c +++ b/class.c @@ -162,7 +162,7 @@ rb_class_detach_module_subclasses(VALUE klass) static VALUE class_alloc(VALUE flags, VALUE klass) { - NEW_OLDISH_OBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0)); + NEW_TCLASS_OBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0)); obj->ptr = ZALLOC(rb_classext_t); /* ZALLOC RCLASS_IV_TBL(obj) = 0; diff --git a/gc.c b/gc.c index fd5025299c9e44..baecf34c010eca 100644 --- a/gc.c +++ b/gc.c @@ -527,7 +527,7 @@ typedef struct rb_objspace { size_t total_allocated_objects; rb_heap_t eden_heap; - rb_heap_t oldish_heap; + rb_heap_t tclass_heap; rb_heap_t tomb_heap; /* heap for zombies and ghosts */ struct { @@ -733,7 +733,7 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress; #define heap_pages_final_slots objspace->heap_pages.final_slots #define heap_pages_deferred_final objspace->heap_pages.deferred_final #define heap_eden (&objspace->eden_heap) -#define heap_oldish (&objspace->oldish_heap) +#define heap_tclass (&objspace->tclass_heap) #define heap_tomb (&objspace->tomb_heap) #define dont_gc objspace->flags.dont_gc #define during_gc objspace->flags.during_gc @@ -1322,7 +1322,7 @@ static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page); void rb_objspace_free(rb_objspace_t *objspace) { - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) rb_bug("lazy sweeping underway when freeing object space"); if (objspace->profile.records) { @@ -1351,9 +1351,9 @@ rb_objspace_free(rb_objspace_t *objspace) objspace->eden_heap.total_pages = 0; objspace->eden_heap.total_slots = 0; objspace->eden_heap.pages = NULL; - objspace->oldish_heap.total_pages = 0; - objspace->oldish_heap.total_slots = 0; - objspace->oldish_heap.pages = NULL; + objspace->tclass_heap.total_pages = 0; + objspace->tclass_heap.total_slots = 0; + objspace->tclass_heap.pages = NULL; } free_stack_chunks(&objspace->mark_stack); #if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE) @@ -1367,7 +1367,7 @@ heap_pages_expand_sorted(rb_objspace_t *objspace) { size_t next_length = heap_allocatable_pages; next_length += heap_eden->total_pages; - next_length += heap_oldish->total_pages; + next_length += heap_tclass->total_pages; next_length += heap_tomb->total_pages; if (next_length > heap_pages_sorted_length) { @@ -1653,7 +1653,7 @@ heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots static void heap_set_increment(rb_objspace_t *objspace, size_t additional_pages) { - size_t used = heap_eden->total_pages + heap_oldish->total_pages; + size_t used = heap_eden->total_pages + heap_tclass->total_pages; size_t next_used_limit = used + additional_pages; if (next_used_limit == heap_allocated_pages) next_used_limit++; @@ -1919,10 +1919,10 @@ newobj_of_with_eden(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int } static inline VALUE -oldishobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) +tclassobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected) { rb_objspace_t *objspace = &rb_objspace; - return newobj_of_with_eden(klass, flags, v1, v2, v3, wb_protected, objspace, heap_oldish); + return newobj_of_with_eden(klass, flags, v1, v2, v3, wb_protected, objspace, heap_tclass); } static inline VALUE @@ -1956,9 +1956,9 @@ rb_newobj(void) } VALUE -rb_new_oldish_obj_of(VALUE klass, VALUE flags) +rb_new_tclass_obj_of(VALUE klass, VALUE flags) { - return oldishobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED); + return tclassobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED); } VALUE @@ -2341,7 +2341,7 @@ Init_heap(void) #endif heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); - heap_add_pages(objspace, heap_oldish, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); + heap_add_pages(objspace, heap_tclass, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); init_mark_stack(&objspace->mark_stack); #ifdef USE_SIGALTSTACK @@ -2963,7 +2963,7 @@ heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr) static inline int is_swept_object(rb_objspace_t *objspace, VALUE ptr) { - if (heap_is_swept_object(objspace, heap_eden, ptr) || heap_is_swept_object(objspace, heap_oldish, ptr)) { + if (heap_is_swept_object(objspace, heap_eden, ptr) || heap_is_swept_object(objspace, heap_tclass, ptr)) { return TRUE; } else { @@ -2976,7 +2976,7 @@ static inline int is_garbage_object(rb_objspace_t *objspace, VALUE ptr) { if (!is_lazy_sweeping(heap_eden) || - !is_lazy_sweeping(heap_oldish) || + !is_lazy_sweeping(heap_tclass) || is_swept_object(objspace, ptr) || MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) { @@ -3414,7 +3414,7 @@ count_objects(int argc, VALUE *argv, VALUE os) static size_t objspace_available_slots(rb_objspace_t *objspace) { - return heap_eden->total_slots + heap_tomb->total_slots + heap_oldish->total_slots; + return heap_eden->total_slots + heap_tomb->total_slots + heap_tclass->total_slots; } static size_t @@ -3601,7 +3601,7 @@ gc_sweep_start(rb_objspace_t *objspace) { gc_mode_transition(objspace, gc_mode_sweeping); gc_sweep_start_heap(objspace, heap_eden); - gc_sweep_start_heap(objspace, heap_oldish); + gc_sweep_start_heap(objspace, heap_tclass); } static void @@ -3700,7 +3700,7 @@ gc_sweep_rest(rb_objspace_t *objspace) gc_sweep_step(objspace, heap); } - heap = heap_oldish; + heap = heap_tclass; while (has_sweeping_pages(heap)) { gc_sweep_step(objspace, heap); @@ -3755,13 +3755,13 @@ gc_sweep(rb_objspace_t *objspace) else { gc_sweep_start(objspace); gc_mark_before_sweep(heap_eden); - gc_mark_before_sweep(heap_oldish); + gc_mark_before_sweep(heap_tclass); gc_sweep_step(objspace, heap_eden); - gc_sweep_step(objspace, heap_oldish); + gc_sweep_step(objspace, heap_tclass); } gc_heap_prepare_minimum_pages(objspace, heap_eden); - gc_heap_prepare_minimum_pages(objspace, heap_oldish); + gc_heap_prepare_minimum_pages(objspace, heap_tclass); } /* Marking - Marking stack */ @@ -5227,7 +5227,7 @@ gc_verify_heap_pages(rb_objspace_t *objspace) { int rememberd_old_objects = 0; rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_eden->pages); - rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_oldish->pages); + rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_tclass->pages); rememberd_old_objects = gc_verify_heap_pages_(objspace, heap_tomb->pages); return rememberd_old_objects; } @@ -5272,7 +5272,7 @@ gc_verify_internal_consistency(VALUE dummy) /* check counters */ - if ((!is_lazy_sweeping(heap_eden) || !is_lazy_sweeping(heap_oldish)) && !finalizing) { + if ((!is_lazy_sweeping(heap_eden) || !is_lazy_sweeping(heap_tclass)) && !finalizing) { if (objspace_live_slots(objspace) != data.live_object_count) { fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n", (int)heap_pages_final_slots, (int)objspace->profile.total_freed_objects); @@ -5350,7 +5350,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.last_major_gc = objspace->profile.count; objspace->marked_slots = 0; rgengc_mark_and_rememberset_clear(objspace, heap_eden); - rgengc_mark_and_rememberset_clear(objspace, heap_oldish); + rgengc_mark_and_rememberset_clear(objspace, heap_tclass); } else { objspace->flags.during_minor_gc = TRUE; @@ -5358,7 +5358,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */ objspace->profile.minor_gc_count++; rgengc_rememberset_mark(objspace, heap_eden); - rgengc_rememberset_mark(objspace, heap_oldish); + rgengc_rememberset_mark(objspace, heap_tclass); } #endif @@ -5434,8 +5434,8 @@ gc_marks_finish(rb_objspace_t *objspace) return FALSE; /* continue marking phase */ } - if (heap_oldish->pooled_pages) { - heap_move_pooled_pages_to_free_pages(heap_oldish); + if (heap_tclass->pooled_pages) { + heap_move_pooled_pages_to_free_pages(heap_tclass); gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n"); return FALSE; /* continue marking phase */ } @@ -5460,7 +5460,7 @@ gc_marks_finish(rb_objspace_t *objspace) objspace->flags.during_incremental_marking = FALSE; /* check children of all marked wb-unprotected objects */ gc_marks_wb_unprotected_objects(objspace, heap_eden); - gc_marks_wb_unprotected_objects(objspace, heap_oldish); + gc_marks_wb_unprotected_objects(objspace, heap_tclass); } #endif /* GC_ENABLE_INCREMENTAL_MARK */ @@ -5484,7 +5484,7 @@ gc_marks_finish(rb_objspace_t *objspace) { /* decide full GC is needed or not */ rb_heap_t *heap = heap_eden; - rb_heap_t *oheap = heap_oldish; + rb_heap_t *oheap = heap_tclass; size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap->total_slots + oheap->total_slots; size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */ size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio); @@ -5586,7 +5586,7 @@ gc_marks_rest(rb_objspace_t *objspace) #if GC_ENABLE_INCREMENTAL_MARK heap_eden->pooled_pages = NULL; - heap_oldish->pooled_pages = NULL; + heap_tclass->pooled_pages = NULL; #endif if (is_incremental_marking(objspace)) { @@ -5690,7 +5690,7 @@ gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...) status = is_full_marking(objspace) ? "+" : "-"; } else { - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) { + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) { status = "S"; } if (is_incremental_marking(objspace)) { @@ -6270,7 +6270,7 @@ ready_to_gc(rb_objspace_t *objspace) { if (dont_gc || during_gc || ruby_disable_gc) { heap_ready_to_gc(objspace, heap_eden); - heap_ready_to_gc(objspace, heap_oldish); + heap_ready_to_gc(objspace, heap_tclass); return FALSE; } else { @@ -6375,7 +6375,7 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, if (RGENGC_CHECK_MODE) { assert(gc_mode(objspace) == gc_mode_none); assert(!is_lazy_sweeping(heap_eden)); - assert(!is_lazy_sweeping(heap_oldish)); + assert(!is_lazy_sweeping(heap_tclass)); assert(!is_incremental_marking(objspace)); #if RGENGC_CHECK_MODE >= 2 gc_verify_internal_consistency(Qnil); @@ -6455,7 +6455,7 @@ static void gc_rest(rb_objspace_t *objspace) { int marking = is_incremental_marking(objspace); - int sweeping = is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish); + int sweeping = is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass); if (marking || sweeping) { gc_enter(objspace, "gc_rest"); @@ -6467,7 +6467,7 @@ gc_rest(rb_objspace_t *objspace) gc_marks_rest(objspace); POP_MARK_FUNC_DATA(); } - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) { + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) { gc_sweep_rest(objspace); } gc_exit(objspace, "gc_rest"); @@ -6497,7 +6497,7 @@ gc_current_status_fill(rb_objspace_t *objspace, char *buff) } else if (is_sweeping(objspace)) { buff[i++] = 'S'; - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) buff[i++] = 'L'; + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) buff[i++] = 'L'; } else { buff[i++] = 'N'; @@ -6883,7 +6883,7 @@ enum gc_stat_sym { gc_stat_sym_heap_final_slots, gc_stat_sym_heap_marked_slots, gc_stat_sym_heap_eden_pages, - gc_stat_sym_heap_oldish_pages, + gc_stat_sym_heap_tclass_pages, gc_stat_sym_heap_tomb_pages, gc_stat_sym_total_allocated_pages, gc_stat_sym_total_freed_pages, @@ -6917,7 +6917,7 @@ enum gc_stat_sym { enum gc_stat_compat_sym { gc_stat_compat_sym_gc_stat_heap_used, gc_stat_compat_sym_heap_eden_page_length, - gc_stat_compat_sym_heap_oldish_page_length, + gc_stat_compat_sym_heap_tclass_page_length, gc_stat_compat_sym_heap_tomb_page_length, gc_stat_compat_sym_heap_increment, gc_stat_compat_sym_heap_length, @@ -6961,7 +6961,7 @@ setup_gc_stat_symbols(void) S(heap_final_slots); S(heap_marked_slots); S(heap_eden_pages); - S(heap_oldish_pages); + S(heap_tclass_pages); S(heap_tomb_pages); S(total_allocated_pages); S(total_freed_pages); @@ -6993,7 +6993,7 @@ setup_gc_stat_symbols(void) #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s)) S(gc_stat_heap_used); S(heap_eden_page_length); - S(heap_oldish_page_length); + S(heap_tclass_page_length); S(heap_tomb_page_length); S(heap_increment); S(heap_length); @@ -7027,7 +7027,7 @@ setup_gc_stat_symbols(void) #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s] rb_hash_aset(table, OLD_SYM(gc_stat_heap_used), NEW_SYM(heap_allocated_pages)); rb_hash_aset(table, OLD_SYM(heap_eden_page_length), NEW_SYM(heap_eden_pages)); - rb_hash_aset(table, OLD_SYM(heap_oldish_page_length), NEW_SYM(heap_oldish_pages)); + rb_hash_aset(table, OLD_SYM(heap_tclass_page_length), NEW_SYM(heap_tclass_pages)); rb_hash_aset(table, OLD_SYM(heap_tomb_page_length), NEW_SYM(heap_tomb_pages)); rb_hash_aset(table, OLD_SYM(heap_increment), NEW_SYM(heap_allocatable_pages)); rb_hash_aset(table, OLD_SYM(heap_length), NEW_SYM(heap_sorted_length)); @@ -7136,7 +7136,7 @@ gc_stat_internal(VALUE hash_or_sym) SET(heap_final_slots, heap_pages_final_slots); SET(heap_marked_slots, objspace->marked_slots); SET(heap_eden_pages, heap_eden->total_pages); - SET(heap_oldish_pages, heap_oldish->total_pages); + SET(heap_tclass_pages, heap_tclass->total_pages); SET(heap_tomb_pages, heap_tomb->total_pages); SET(total_allocated_pages, objspace->profile.total_allocated_pages); SET(total_freed_pages, objspace->profile.total_freed_pages); @@ -7211,7 +7211,7 @@ gc_stat_internal(VALUE hash_or_sym) * :heap_final_slots=>0, * :heap_marked_slots=>0, * :heap_eden_pages=>24, - * :heap_oldish_pages=>24, + * :heap_tclass_pages=>24, * :heap_tomb_pages=>0, * :total_allocated_pages=>24, * :total_freed_pages=>0, @@ -7472,7 +7472,7 @@ gc_set_initial_pages(void) if (min_pages > heap_eden->total_pages) { heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages); } - heap_add_pages(objspace, heap_oldish, 10); + heap_add_pages(objspace, heap_tclass, 10); } /* @@ -7791,7 +7791,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si if (type == MEMOP_TYPE_MALLOC) { retry: if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) { - if (ruby_thread_has_gvl_p() && (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish))) { + if (ruby_thread_has_gvl_p() && (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass))) { gc_rest(objspace); /* gc_rest can reduce malloc_increase */ goto retry; } @@ -9456,7 +9456,7 @@ rb_gcdebug_print_obj_condition(VALUE obj) fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false"); #endif - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_oldish)) { + if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) { fprintf(stderr, "lazy sweeping?: true\n"); fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet"); } diff --git a/include/ruby/ruby.h b/include/ruby/ruby.h index 8e85808cb9be54..94043c4e91db0b 100644 --- a/include/ruby/ruby.h +++ b/include/ruby/ruby.h @@ -735,14 +735,14 @@ VALUE rb_int2big(SIGNED_VALUE); VALUE rb_newobj(void); VALUE rb_newobj_of(VALUE, VALUE); -VALUE rb_new_oldish_obj_of(VALUE, VALUE); +VALUE rb_new_tclass_obj_of(VALUE, VALUE); VALUE rb_obj_setup(VALUE obj, VALUE klass, VALUE type); #define RB_NEWOBJ(obj,type) type *(obj) = (type*)rb_newobj() #define RB_NEWOBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_newobj_of(klass, flags) -#define RB_NEW_OLDISH_OBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_new_oldish_obj_of(klass, flags) +#define RB_NEW_TCLASS_OBJ_OF(obj,type,klass,flags) type *(obj) = (type*)rb_new_tclass_obj_of(klass, flags) #define NEWOBJ(obj,type) RB_NEWOBJ(obj,type) #define NEWOBJ_OF(obj,type,klass,flags) RB_NEWOBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ -#define NEW_OLDISH_OBJ_OF(obj,type,klass,flags) RB_NEW_OLDISH_OBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ +#define NEW_TCLASS_OBJ_OF(obj,type,klass,flags) RB_NEW_TCLASS_OBJ_OF(obj,type,klass,flags) /* core has special NEWOBJ_OF() in internal.h */ #define OBJSETUP(obj,c,t) rb_obj_setup(obj, c, t) /* use NEWOBJ_OF instead of NEWOBJ()+OBJSETUP() */ #define CLONESETUP(clone,obj) rb_clone_setup(clone,obj) #define DUPSETUP(dup,obj) rb_dup_setup(dup,obj) From f9ea96510826c7c5155955de8335ac53ebc86a9d Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Wed, 14 Sep 2016 11:56:51 -0700 Subject: [PATCH 5/7] lazy sweeping should be based on `objspace` Since we have two heaps (one for classes and the regular eden) we need to base whether the we're in lazy sweep mode by looking at both heaps. This means passing `objspace` to `is_lazy_sweeping` so that `is_lazy_sweeping` can ask both heaps if either one is currently lazily sweeping. --- gc.c | 125 ++++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 77 insertions(+), 48 deletions(-) diff --git a/gc.c b/gc.c index baecf34c010eca..64580aff1edd46 100644 --- a/gc.c +++ b/gc.c @@ -785,7 +785,7 @@ gc_mode_verify(enum gc_mode mode) #define will_be_incremental_marking(objspace) FALSE #endif #define has_sweeping_pages(heap) ((heap)->sweep_pages != 0) -#define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap)) +#define is_lazy_sweeping(_objspace) (GC_ENABLE_LAZY_SWEEP && (has_sweeping_pages(&(_objspace->eden_heap)) || has_sweeping_pages(&(_objspace->tclass_heap)))) #if SIZEOF_LONG == SIZEOF_VOIDP # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG) @@ -844,7 +844,7 @@ static int gc_marks_finish(rb_objspace_t *objspace); static void gc_marks_rest(rb_objspace_t *objspace); #if GC_ENABLE_INCREMENTAL_MARK static void gc_marks_step(rb_objspace_t *objspace, int slots); -static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap); +static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t * requested_heap); #endif static void gc_sweep(rb_objspace_t *objspace); @@ -853,7 +853,7 @@ static void gc_sweep_finish(rb_objspace_t *objspace); static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap); static void gc_sweep_rest(rb_objspace_t *objspace); #if GC_ENABLE_LAZY_SWEEP -static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap); +static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap, int need_increment); #endif static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr); @@ -1322,7 +1322,7 @@ static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page); void rb_objspace_free(rb_objspace_t *objspace) { - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) + if (is_lazy_sweeping(objspace)) rb_bug("lazy sweeping underway when freeing object space"); if (objspace->profile.records) { @@ -1683,8 +1683,14 @@ heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap) if (RGENGC_CHECK_MODE) assert(heap->free_pages == NULL); #if GC_ENABLE_LAZY_SWEEP - if (is_lazy_sweeping(heap)) { - gc_sweep_continue(objspace, heap); + if (is_lazy_sweeping(objspace)) { + gc_sweep_continue(objspace, heap_eden, heap == heap_eden ? 1 : 0); + gc_sweep_continue(objspace, heap_tclass, heap == heap_tclass ? 1 : 0); + if (heap->free_pages == NULL) { + if(has_sweeping_pages(heap_eden) || has_sweeping_pages(heap_tclass)) { + gc_sweep_rest(objspace); + } + } } #endif #if GC_ENABLE_INCREMENTAL_MARK @@ -1932,7 +1938,6 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protect return newobj_of_with_eden(klass, flags, v1, v2, v3, wb_protected, objspace, heap_eden); } - VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags) { @@ -2341,7 +2346,7 @@ Init_heap(void) #endif heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); - heap_add_pages(objspace, heap_tclass, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); + heap_add_pages(objspace, heap_tclass, 2); init_mark_stack(&objspace->mark_stack); #ifdef USE_SIGALTSTACK @@ -2961,9 +2966,9 @@ heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr) } static inline int -is_swept_object(rb_objspace_t *objspace, VALUE ptr) +is_swept_object(rb_objspace_t *objspace, VALUE ptr, rb_heap_t * heap) { - if (heap_is_swept_object(objspace, heap_eden, ptr) || heap_is_swept_object(objspace, heap_tclass, ptr)) { + if (heap_is_swept_object(objspace, heap, ptr)) { return TRUE; } else { @@ -2975,9 +2980,9 @@ is_swept_object(rb_objspace_t *objspace, VALUE ptr) static inline int is_garbage_object(rb_objspace_t *objspace, VALUE ptr) { - if (!is_lazy_sweeping(heap_eden) || - !is_lazy_sweeping(heap_tclass) || - is_swept_object(objspace, ptr) || + if (!is_lazy_sweeping(objspace) || + is_swept_object(objspace, ptr, heap_eden) || + is_swept_object(objspace, ptr, heap_tclass) || MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) { return FALSE; @@ -3580,7 +3585,6 @@ gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap) heap->free_pages = NULL; #if GC_ENABLE_INCREMENTAL_MARK heap->pooled_pages = NULL; - objspace->rincgc.pooled_slots = 0; #endif if (heap->using_page) { RVALUE **p = &heap->using_page->freelist; @@ -3600,6 +3604,9 @@ static void gc_sweep_start(rb_objspace_t *objspace) { gc_mode_transition(objspace, gc_mode_sweeping); +#if GC_ENABLE_INCREMENTAL_MARK + objspace->rincgc.pooled_slots = 0; +#endif gc_sweep_start_heap(objspace, heap_eden); gc_sweep_start_heap(objspace, heap_tclass); } @@ -3680,7 +3687,7 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap) sweep_page = next_sweep_page; } - if (heap->sweep_pages == NULL) { + if (objspace->eden_heap.sweep_pages == NULL && objspace->tclass_heap.sweep_pages == NULL) { gc_sweep_finish(objspace); } @@ -3696,8 +3703,9 @@ gc_sweep_rest(rb_objspace_t *objspace) { rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */ - while (has_sweeping_pages(heap)) { - gc_sweep_step(objspace, heap); + while (has_sweeping_pages(heap_eden) || has_sweeping_pages(heap_tclass)) { + gc_sweep_step(objspace, heap_eden); + gc_sweep_step(objspace, heap_tclass); } heap = heap_tclass; @@ -3709,13 +3717,13 @@ gc_sweep_rest(rb_objspace_t *objspace) #if GC_ENABLE_LAZY_SWEEP static void -gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap) +gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap, int need_increment) { if (RGENGC_CHECK_MODE) assert(dont_gc == FALSE); gc_enter(objspace, "sweep_continue"); #if USE_RGENGC - if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) { + if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && need_increment && heap_increment(objspace, heap)) { gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n"); } #endif @@ -5272,7 +5280,7 @@ gc_verify_internal_consistency(VALUE dummy) /* check counters */ - if ((!is_lazy_sweeping(heap_eden) || !is_lazy_sweeping(heap_tclass)) && !finalizing) { + if (!is_lazy_sweeping(objspace) && !finalizing) { if (objspace_live_slots(objspace) != data.live_object_count) { fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n", (int)heap_pages_final_slots, (int)objspace->profile.total_freed_objects); @@ -5428,18 +5436,20 @@ gc_marks_finish(rb_objspace_t *objspace) #if GC_ENABLE_INCREMENTAL_MARK /* finish incremental GC */ if (is_incremental_marking(objspace)) { - if (heap_eden->pooled_pages) { - heap_move_pooled_pages_to_free_pages(heap_eden); - gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n"); - return FALSE; /* continue marking phase */ - } + if (heap_eden->pooled_pages || heap_tclass->pooled_pages) { + if (heap_eden->pooled_pages) { + heap_move_pooled_pages_to_free_pages(heap_eden); + } + + if (heap_tclass->pooled_pages) { + heap_move_pooled_pages_to_free_pages(heap_tclass); + } - if (heap_tclass->pooled_pages) { - heap_move_pooled_pages_to_free_pages(heap_tclass); gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n"); return FALSE; /* continue marking phase */ } + if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) { rb_bug("gc_marks_finish: mark stack is not empty (%d).", (int)mark_stack_size(&objspace->mark_stack)); } @@ -5604,32 +5614,52 @@ gc_marks_rest(rb_objspace_t *objspace) } #if GC_ENABLE_INCREMENTAL_MARK -static void -gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap) +static int +gc_marks_continue_slots(rb_objspace_t *objspace, rb_heap_t *heap) { int slots = 0; const char *from; + if (heap->pooled_pages) { + while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) { + struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap); + slots += page->free_slots; + } + from = "pooled-pages"; + } + else if (heap_increment(objspace, heap)) { + slots = heap->free_pages->free_slots; + from = "incremented-pages"; + } + if (slots > 0) { + gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from); + } + return slots; +} + +static void +gc_marks_continue(rb_objspace_t *objspace, rb_heap_t * requested_heap) +{ + int eden_slots = 0; + int tclass_slots = 0; + int slots = 0; + if (RGENGC_CHECK_MODE) assert(dont_gc == FALSE); gc_enter(objspace, "marks_continue"); PUSH_MARK_FUNC_DATA(NULL); { - if (heap->pooled_pages) { - while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) { - struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap); - slots += page->free_slots; - } - from = "pooled-pages"; - } - else if (heap_increment(objspace, heap)) { - slots = heap->free_pages->free_slots; - from = "incremented-pages"; + eden_slots = gc_marks_continue_slots(objspace, heap_eden); + tclass_slots = gc_marks_continue_slots(objspace, heap_tclass); + + if (requested_heap == heap_eden) { + slots = eden_slots; + } else { + slots = tclass_slots; } if (slots > 0) { - gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from); gc_marks_step(objspace, (int)objspace->rincgc.step_slots); } else { @@ -5690,7 +5720,7 @@ gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...) status = is_full_marking(objspace) ? "+" : "-"; } else { - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) { + if (is_lazy_sweeping(objspace)) { status = "S"; } if (is_incremental_marking(objspace)) { @@ -6374,8 +6404,7 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, if (RGENGC_CHECK_MODE) { assert(gc_mode(objspace) == gc_mode_none); - assert(!is_lazy_sweeping(heap_eden)); - assert(!is_lazy_sweeping(heap_tclass)); + assert(!is_lazy_sweeping(objspace)); assert(!is_incremental_marking(objspace)); #if RGENGC_CHECK_MODE >= 2 gc_verify_internal_consistency(Qnil); @@ -6455,7 +6484,7 @@ static void gc_rest(rb_objspace_t *objspace) { int marking = is_incremental_marking(objspace); - int sweeping = is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass); + int sweeping = is_lazy_sweeping(objspace); if (marking || sweeping) { gc_enter(objspace, "gc_rest"); @@ -6467,7 +6496,7 @@ gc_rest(rb_objspace_t *objspace) gc_marks_rest(objspace); POP_MARK_FUNC_DATA(); } - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) { + if (is_lazy_sweeping(objspace)) { gc_sweep_rest(objspace); } gc_exit(objspace, "gc_rest"); @@ -6497,7 +6526,7 @@ gc_current_status_fill(rb_objspace_t *objspace, char *buff) } else if (is_sweeping(objspace)) { buff[i++] = 'S'; - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) buff[i++] = 'L'; + if (is_lazy_sweeping(objspace)) buff[i++] = 'L'; } else { buff[i++] = 'N'; @@ -7791,7 +7820,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si if (type == MEMOP_TYPE_MALLOC) { retry: if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) { - if (ruby_thread_has_gvl_p() && (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass))) { + if (ruby_thread_has_gvl_p() && (is_lazy_sweeping(objspace))) { gc_rest(objspace); /* gc_rest can reduce malloc_increase */ goto retry; } @@ -9456,7 +9485,7 @@ rb_gcdebug_print_obj_condition(VALUE obj) fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false"); #endif - if (is_lazy_sweeping(heap_eden) || is_lazy_sweeping(heap_tclass)) { + if (is_lazy_sweeping(objspace)) { fprintf(stderr, "lazy sweeping?: true\n"); fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet"); } From 43fe223b6df35d1d239285e9b044e54aeef71d13 Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Fri, 16 Sep 2016 11:56:17 -0700 Subject: [PATCH 6/7] Add options to dump T_NONE objects and add page numbers --- ext/objspace/objspace_dump.c | 38 ++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/ext/objspace/objspace_dump.c b/ext/objspace/objspace_dump.c index 127d33283ad0a7..b9265d5dc243fb 100644 --- a/ext/objspace/objspace_dump.c +++ b/ext/objspace/objspace_dump.c @@ -21,6 +21,7 @@ #include "objspace.h" static VALUE sym_output, sym_stdout, sym_string, sym_file; +static VALUE sym_include_pages, sym_include_none; struct dump_config { VALUE type; @@ -31,6 +32,9 @@ struct dump_config { VALUE cur_obj; VALUE cur_obj_klass; size_t cur_obj_references; + int include_pages; + int include_none; + int pages_seen; }; PRINTF_ARGS(static void dump_append(struct dump_config *, const char *, ...), 2, 3); @@ -190,6 +194,18 @@ dump_append_string_content(struct dump_config *dc, VALUE obj) } } +static void +dump_empty(VALUE obj, struct dump_config *dc) +{ + dump_append(dc, "{\"address\":\"%p\", ", (void *)obj); + dump_append(dc, "\"type\":\"NONE\""); + + if (dc->include_pages) + dump_append(dc, ", \"page_number\":%d", dc->pages_seen); + dump_append(dc, "}\n"); + return; +} + static void dump_object(VALUE obj, struct dump_config *dc) { @@ -215,6 +231,8 @@ dump_object(VALUE obj, struct dump_config *dc) if (dc->cur_obj_klass) dump_append(dc, ", \"class\":\"%p\"", (void *)dc->cur_obj_klass); + if (dc->include_pages) + dump_append(dc, ", \"page_number\":%d", dc->pages_seen); if (rb_obj_frozen_p(obj)) dump_append(dc, ", \"frozen\":true"); @@ -321,8 +339,11 @@ heap_i(void *vstart, void *vend, size_t stride, void *data) VALUE v = (VALUE)vstart; for (; v != (VALUE)vend; v += stride) { if (RBASIC(v)->flags) - dump_object(v, data); + dump_object(v, dc); + else if (dc->include_none && T_NONE == BUILTIN_TYPE(v)) + dump_empty(v, dc); } + dc->pages_seen++; return 0; } @@ -347,9 +368,20 @@ dump_output(struct dump_config *dc, VALUE opts, VALUE output, const char *filena { VALUE tmp; - if (RTEST(opts)) + dc->pages_seen = 0; + dc->include_pages = 0; + dc->include_none = 0; + + if (RTEST(opts)) { output = rb_hash_aref(opts, sym_output); + if (Qtrue == rb_hash_lookup2(opts, sym_include_pages, Qfalse)) + dc->include_pages = 1; + + if (Qtrue == rb_hash_lookup2(opts, sym_include_none, Qfalse)) + dc->include_none = 1; + } + if (output == sym_stdout) { dc->stream = stdout; dc->string = Qnil; @@ -474,6 +506,8 @@ Init_objspace_dump(VALUE rb_mObjSpace) sym_stdout = ID2SYM(rb_intern("stdout")); sym_string = ID2SYM(rb_intern("string")); sym_file = ID2SYM(rb_intern("file")); + sym_include_pages = ID2SYM(rb_intern("include_pages")); + sym_include_none = ID2SYM(rb_intern("include_none")); /* force create static IDs */ rb_obj_gc_flags(rb_mObjSpace, 0, 0); From 4c91ab0a2cb9b78fde70528f93ef0e520f2176d0 Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Tue, 20 Sep 2016 15:49:55 -0700 Subject: [PATCH 7/7] Revert "objspace_dump.c: dc is no longer used [ci skip]" This reverts commit cf6ca2d8b252cd9af0bec374693d2064842078dc. --- ext/objspace/objspace_dump.c | 1 + 1 file changed, 1 insertion(+) diff --git a/ext/objspace/objspace_dump.c b/ext/objspace/objspace_dump.c index b9265d5dc243fb..674bc781e63178 100644 --- a/ext/objspace/objspace_dump.c +++ b/ext/objspace/objspace_dump.c @@ -336,6 +336,7 @@ dump_object(VALUE obj, struct dump_config *dc) static int heap_i(void *vstart, void *vend, size_t stride, void *data) { + struct dump_config *dc = (struct dump_config *)data; VALUE v = (VALUE)vstart; for (; v != (VALUE)vend; v += stride) { if (RBASIC(v)->flags)