14 #define rb_data_object_alloc rb_data_object_alloc 15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc 34 #include <sys/types.h> 37 #undef rb_data_object_wrap 39 #ifndef HAVE_MALLOC_USABLE_SIZE 41 # define HAVE_MALLOC_USABLE_SIZE 42 # define malloc_usable_size(a) _msize(a) 43 # elif defined HAVE_MALLOC_SIZE 44 # define HAVE_MALLOC_USABLE_SIZE 45 # define malloc_usable_size(a) malloc_size(a) 48 #ifdef HAVE_MALLOC_USABLE_SIZE 51 # elif defined(HAVE_MALLOC_NP_H) 52 # include <malloc_np.h> 53 # elif defined(HAVE_MALLOC_MALLOC_H) 54 # include <malloc/malloc.h> 59 __has_feature(address_sanitizer) || \ 60 defined(__SANITIZE_ADDRESS__) 61 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \ 62 __attribute__((no_address_safety_analysis)) \ 63 __attribute__((noinline)) 65 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS 68 #ifdef HAVE_SYS_TIME_H 72 #ifdef HAVE_SYS_RESOURCE_H 73 #include <sys/resource.h> 75 #if defined(__native_client__) && defined(NACL_NEWLIB) 77 # undef HAVE_POSIX_MEMALIGN 82 #if defined _WIN32 || defined __CYGWIN__ 84 #elif defined(HAVE_POSIX_MEMALIGN) 85 #elif defined(HAVE_MEMALIGN) 89 #define rb_setjmp(env) RUBY_SETJMP(env) 90 #define rb_jmp_buf rb_jmpbuf_t 92 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL 104 #ifndef GC_HEAP_INIT_SLOTS 105 #define GC_HEAP_INIT_SLOTS 10000 107 #ifndef GC_HEAP_FREE_SLOTS 108 #define GC_HEAP_FREE_SLOTS 4096 110 #ifndef GC_HEAP_GROWTH_FACTOR 111 #define GC_HEAP_GROWTH_FACTOR 1.8 113 #ifndef GC_HEAP_GROWTH_MAX_SLOTS 114 #define GC_HEAP_GROWTH_MAX_SLOTS 0 116 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR 117 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0 120 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO 121 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20 123 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO 124 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40 126 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO 127 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65 130 #ifndef GC_MALLOC_LIMIT_MIN 131 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 ) 133 #ifndef GC_MALLOC_LIMIT_MAX 134 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 ) 136 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR 137 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4 140 #ifndef GC_OLDMALLOC_LIMIT_MIN 141 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 ) 143 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 144 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2 146 #ifndef GC_OLDMALLOC_LIMIT_MAX 147 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 ) 150 #ifndef PRINT_MEASURE_LINE 151 #define PRINT_MEASURE_LINE 0 153 #ifndef PRINT_ENTER_EXIT_TICK 154 #define PRINT_ENTER_EXIT_TICK 0 156 #ifndef PRINT_ROOT_TICKS 157 #define PRINT_ROOT_TICKS 0 160 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS) 223 #define RGENGC_DEBUG 0 234 #ifndef RGENGC_CHECK_MODE 235 #define RGENGC_CHECK_MODE 0 244 #ifndef RGENGC_OLD_NEWOBJ_CHECK 245 #define RGENGC_OLD_NEWOBJ_CHECK 0 253 #ifndef RGENGC_PROFILE 254 #define RGENGC_PROFILE 0 263 #ifndef RGENGC_ESTIMATE_OLDMALLOC 264 #define RGENGC_ESTIMATE_OLDMALLOC 1 270 #ifndef RGENGC_FORCE_MAJOR_GC 271 #define RGENGC_FORCE_MAJOR_GC 0 279 #define RGENGC_DEBUG 0 280 #ifdef RGENGC_CHECK_MODE 281 #undef RGENGC_CHECK_MODE 283 #define RGENGC_CHECK_MODE 0 284 #define RGENGC_PROFILE 0 285 #define RGENGC_ESTIMATE_OLDMALLOC 0 286 #define RGENGC_FORCE_MAJOR_GC 0 290 #ifndef GC_PROFILE_MORE_DETAIL 291 #define GC_PROFILE_MORE_DETAIL 0 293 #ifndef GC_PROFILE_DETAIL_MEMORY 294 #define GC_PROFILE_DETAIL_MEMORY 0 296 #ifndef GC_ENABLE_INCREMENTAL_MARK 297 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC 299 #ifndef GC_ENABLE_LAZY_SWEEP 300 #define GC_ENABLE_LAZY_SWEEP 1 302 #ifndef CALC_EXACT_MALLOC_SIZE 303 #define CALC_EXACT_MALLOC_SIZE 0 305 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0 306 #ifndef MALLOC_ALLOCATED_SIZE 307 #define MALLOC_ALLOCATED_SIZE 0 310 #define MALLOC_ALLOCATED_SIZE 0 312 #ifndef MALLOC_ALLOCATED_SIZE_CHECK 313 #define MALLOC_ALLOCATED_SIZE_CHECK 0 316 #ifndef GC_DEBUG_STRESS_TO_CLASS 317 #define GC_DEBUG_STRESS_TO_CLASS 0 320 #ifndef RGENGC_OBJ_INFO 321 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE) 331 #if RGENGC_ESTIMATE_OLDMALLOC 358 #if GC_PROFILE_MORE_DETAIL 360 double gc_sweep_time;
362 size_t heap_use_pages;
363 size_t heap_live_objects;
364 size_t heap_free_objects;
366 size_t allocate_increase;
367 size_t allocate_limit;
370 size_t removing_objects;
371 size_t empty_objects;
372 #if GC_PROFILE_DETAIL_MEMORY 378 #if MALLOC_ALLOCATED_SIZE 379 size_t allocated_size;
382 #if RGENGC_PROFILE > 0 384 size_t remembered_normal_objects;
385 size_t remembered_shady_objects;
389 #if defined(_MSC_VER) || defined(__CYGWIN__) 390 #pragma pack(push, 1) 439 #if defined(_MSC_VER) || defined(__CYGWIN__) 464 #define STACK_CHUNK_SIZE 500 487 #if GC_ENABLE_INCREMENTAL_MARK 504 #if MALLOC_ALLOCATED_SIZE 505 size_t allocated_size;
521 #if GC_ENABLE_INCREMENTAL_MARK 538 void (*mark_func)(
VALUE v,
void *data);
567 #if GC_PROFILE_MORE_DETAIL 575 #if RGENGC_PROFILE > 0 576 size_t total_generated_normal_object_count;
577 size_t total_generated_shady_object_count;
578 size_t total_shade_operation_count;
579 size_t total_promoted_count;
580 size_t total_remembered_normal_object_count;
581 size_t total_remembered_shady_object_count;
583 #if RGENGC_PROFILE >= 2 584 size_t generated_normal_object_count_types[
RUBY_T_MASK];
585 size_t generated_shady_object_count_types[
RUBY_T_MASK];
588 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
589 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
619 #if RGENGC_ESTIMATE_OLDMALLOC 624 #if RGENGC_CHECK_MODE >= 2 629 #if GC_ENABLE_INCREMENTAL_MARK 637 #if GC_DEBUG_STRESS_TO_CLASS 643 #ifndef HEAP_PAGE_ALIGN_LOG 645 #define HEAP_PAGE_ALIGN_LOG 14 647 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod)) 687 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK))) 688 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header) 689 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page) 691 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE)) 692 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH ) 693 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1)) 694 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p)) 697 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p)) 698 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p)) 699 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p)) 702 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0]) 704 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0]) 705 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0]) 706 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0]) 710 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE 711 #define rb_objspace (*rb_objspace_of(GET_VM())) 712 #define rb_objspace_of(vm) ((vm)->objspace) 715 #define rb_objspace_of(vm) (&rb_objspace) 718 #define ruby_initial_gc_stress gc_params.gc_stress 722 #define malloc_limit objspace->malloc_params.limit 723 #define malloc_increase objspace->malloc_params.increase 724 #define malloc_allocated_size objspace->malloc_params.allocated_size 725 #define heap_pages_sorted objspace->heap_pages.sorted 726 #define heap_allocated_pages objspace->heap_pages.allocated_pages 727 #define heap_pages_sorted_length objspace->heap_pages.sorted_length 728 #define heap_pages_lomem objspace->heap_pages.range[0] 729 #define heap_pages_himem objspace->heap_pages.range[1] 730 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages 731 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages 732 #define heap_pages_final_slots objspace->heap_pages.final_slots 733 #define heap_pages_deferred_final objspace->heap_pages.deferred_final 734 #define heap_eden (&objspace->eden_heap) 735 #define heap_tomb (&objspace->tomb_heap) 736 #define dont_gc objspace->flags.dont_gc 737 #define during_gc objspace->flags.during_gc 738 #define finalizing objspace->atomic_flags.finalizing 739 #define finalizer_table objspace->finalizer_table 740 #define global_list objspace->global_list 741 #define ruby_gc_stressful objspace->flags.gc_stressful 742 #define ruby_gc_stress_mode objspace->gc_stress_mode 743 #if GC_DEBUG_STRESS_TO_CLASS 744 #define stress_to_class objspace->stress_to_class 746 #define stress_to_class 0 752 #if RGENGC_CHECK_MODE > 0 759 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
765 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode) 766 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode)) 768 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking) 769 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping) 771 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE) 773 #define is_full_marking(objspace) TRUE 775 #if GC_ENABLE_INCREMENTAL_MARK 776 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE) 778 #define is_incremental_marking(objspace) FALSE 780 #if GC_ENABLE_INCREMENTAL_MARK 781 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE) 783 #define will_be_incremental_marking(objspace) FALSE 785 #define has_sweeping_pages(heap) ((heap)->sweep_pages != 0) 786 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap)) 788 #if SIZEOF_LONG == SIZEOF_VOIDP 789 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG) 790 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) 791 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP 792 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2) 793 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \ 794 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1)) 796 # error not supported 799 #define RANY(o) ((RVALUE*)(o)) 804 void (*dfree)(
void *);
808 #define RZOMBIE(o) ((struct RZombie *)(o)) 810 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory] 834 static int gc_start(
rb_objspace_t *objspace,
const int full_mark,
const int immediate_mark,
const unsigned int immediate_sweep,
int reason);
843 #if GC_ENABLE_INCREMENTAL_MARK 853 #if GC_ENABLE_LAZY_SWEEP 892 #define gc_prof_record(objspace) (objspace)->profile.current_record 893 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record) 895 #ifdef HAVE_VA_ARGS_MACRO 896 # define gc_report(level, objspace, fmt, ...) \ 897 if ((level) > RGENGC_DEBUG) {} else gc_report_body(level, objspace, fmt, ##__VA_ARGS__) 899 # define gc_report if (!(RGENGC_DEBUG)) {} else gc_report_body 904 #define PUSH_MARK_FUNC_DATA(v) do { \ 905 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \ 906 objspace->mark_func_data = (v); 908 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0) 928 #if defined(__GNUC__) && defined(__i386__) 929 typedef unsigned long long tick_t;
930 #define PRItick "llu" 934 unsigned long long int x;
935 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
939 #elif defined(__GNUC__) && defined(__x86_64__) 940 typedef unsigned long long tick_t;
941 #define PRItick "llu" 943 static __inline__ tick_t
946 unsigned long hi,
lo;
947 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
948 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
951 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0) 952 typedef unsigned long long tick_t;
953 #define PRItick "llu" 955 static __inline__ tick_t
958 unsigned long long val = __builtin_ppc_get_timebase();
962 #elif defined(_WIN32) && defined(_MSC_VER) 964 typedef unsigned __int64 tick_t;
965 #define PRItick "llu" 974 typedef clock_t tick_t;
975 #define PRItick "llu" 985 typedef double tick_t;
986 #define PRItick "4.9f" 994 #error "choose tick type" 997 #define MEASURE_LINE(expr) do { \ 998 volatile tick_t start_time = tick(); \ 999 volatile tick_t end_time; \ 1001 end_time = tick(); \ 1002 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \ 1006 #define MEASURE_LINE(expr) expr 1009 #define FL_TEST2(x,f) ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? (rb_bug("FL_TEST2: SPECIAL_CONST (%p)", (void *)(x)), 0) : FL_TEST_RAW((x),(f)) != 0) 1010 #define FL_SET2(x,f) do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_SET2: SPECIAL_CONST"); RBASIC(x)->flags |= (f);} while (0) 1011 #define FL_UNSET2(x,f) do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_UNSET2: SPECIAL_CONST"); RBASIC(x)->flags &= ~(f);} while (0) 1013 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj)) 1014 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj)) 1017 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj)) 1018 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj)) 1019 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj)) 1021 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj)) 1022 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj)) 1023 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj)) 1025 #define RVALUE_OLD_AGE 3 1026 #define RVALUE_AGE_SHIFT 5 1042 #if RGENGC_CHECK_MODE == 0 1055 rb_bug(
"check_rvalue_consistency: %p is a special const.", (
void *)obj);
1058 rb_bug(
"check_rvalue_consistency: %p is not a Ruby object.", (
void *)obj);
1075 if (age > 0 && wb_unprotected_bit) {
1076 rb_bug(
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.",
obj_info(obj), age);
1079 if (!
is_marking(objspace) && uncollectible_bit && !mark_bit) {
1080 rb_bug(
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.",
obj_info(obj));
1084 if (uncollectible_bit && age !=
RVALUE_OLD_AGE && !wb_unprotected_bit) {
1085 rb_bug(
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.",
obj_info(obj), age);
1088 rb_bug(
"check_rvalue_consistency: %s is rememberd, but not old (age: %d).",
obj_info(obj), age);
1100 if (!
is_marking(objspace) && !mark_bit)
rb_bug(
"check_rvalue_consistency: %s is marking, but not marked.",
obj_info(obj));
1147 return (
RBASIC(obj)->
flags & promoted) == promoted;
1157 #if RGENGC_CHECK_MODE || GC_DEBUG 1159 RVALUE_AGE(
VALUE obj)
1172 #if RGENGC_PROFILE >= 2 1173 objspace->
profile.total_promoted_count++;
1200 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.",
obj_info(obj));
1286 RVALUE_GREY_P(
VALUE obj)
1307 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE 1324 rb_bug(
"lazy sweeping underway when freeing object space");
1344 heap_allocated_pages = 0;
1354 #if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE) 1355 if (objspace == &rb_objspace)
return;
1366 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %d, size: %d\n", (
int)next_length, (
int)size);
1404 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", p);
1407 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
1419 #if GC_ENABLE_INCREMENTAL_MARK 1491 if (page_body == 0) {
1509 end = start + limit;
1517 mid = (lo +
hi) / 2;
1519 if (mid_page->
start < start) {
1522 else if (mid_page->
start > start) {
1553 for (p = start; p != end; p++) {
1554 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", p);
1582 const char *method =
"recycle";
1585 method =
"allocate";
1587 if (0) fprintf(stderr,
"heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1618 for (i = 0; i <
add; i++) {
1631 if (goal_ratio == 0.0) {
1638 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1641 if (f < 1.0) f = 1.1;
1643 next_used = (size_t)(f * used);
1648 " G(%1.2f), f(%1.2f)," 1650 free_slots, total_slots, free_slots/(
double)total_slots,
1651 goal_ratio, f, used, next_used);
1657 if (next_used > max_used) next_used = max_used;
1660 return next_used - used;
1667 size_t next_used_limit = used + additional_pages;
1681 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
1683 heap_allocatable_pages--;
1695 #if GC_ENABLE_LAZY_SWEEP 1700 #if GC_ENABLE_INCREMENTAL_MARK 1773 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook) 1774 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event)) 1776 #define gc_event_hook(objspace, event, data) do { \ 1777 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \ 1778 gc_event_hook_body(GET_THREAD(), (objspace), (event), (data)); \ 1793 RANY(obj)->as.values.v1 = v1;
1794 RANY(obj)->as.values.v2 = v2;
1795 RANY(obj)->as.values.v3 = v3;
1797 #if RGENGC_CHECK_MODE 1804 if (RVALUE_AGE(obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.",
obj_info(obj), RVALUE_AGE(obj));
1807 if (RVALUE_AGE(obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.",
obj_info(obj), RVALUE_AGE(obj));
1820 objspace->
profile.total_generated_normal_object_count++;
1821 #if RGENGC_PROFILE >= 2 1826 objspace->
profile.total_generated_shady_object_count++;
1827 #if RGENGC_PROFILE >= 2 1842 #if RGENGC_OLD_NEWOBJ_CHECK > 0 1849 if (--newobj_cnt == 0) {
1873 rb_bug(
"object allocation during garbage collection phase");
1884 newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
1910 #if GC_DEBUG_STRESS_TO_CLASS 1914 for (i = 0; i <
cnt; ++i) {
1923 return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
1926 return wb_protected ?
1982 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", memo,
imemo_type(memo), file, line);
1994 #undef rb_data_object_alloc 2015 #undef rb_data_typed_object_alloc 2058 register size_t hi,
lo, mid;
2067 mid = (lo +
hi) / 2;
2069 if (page->
start <= p) {
2125 rb_bug(
"obj_free() called for broken object");
2137 #if RGENGC_CHECK_MODE 2138 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj)) 2150 RANY(obj)->as.object.as.heap.ivptr) {
2151 xfree(
RANY(obj)->as.object.as.heap.ivptr);
2177 if (
RANY(obj)->as.klass.ptr)
2188 if (
RANY(obj)->as.hash.ntbl) {
2193 if (
RANY(obj)->as.regexp.ptr) {
2199 int free_immediately =
FALSE;
2200 void (*dfree)(
void *);
2205 dfree =
RANY(obj)->as.typeddata.type->function.dfree;
2206 if (0 && free_immediately == 0) {
2208 fprintf(stderr,
"not immediate -> %s\n",
RANY(obj)->as.typeddata.type->wrap_struct_name);
2212 dfree =
RANY(obj)->as.data.dfree;
2219 else if (free_immediately) {
2230 if (
RANY(obj)->as.match.rmatch) {
2231 struct rmatch *rm =
RANY(obj)->as.match.rmatch;
2239 if (
RANY(obj)->as.file.fptr) {
2280 RANY(obj)->as.rstruct.as.heap.ptr) {
2281 xfree((
void *)
RANY(obj)->as.rstruct.as.heap.ptr);
2329 #if RGENGC_ESTIMATE_OLDMALLOC 2336 #ifdef USE_SIGALTSTACK 2340 void *tmp = th->altstack;
2374 pstart = page->
start;
2435 int prev_dont_incremental = objspace->flags.dont_incremental;
2438 objspace->flags.dont_incremental =
TRUE;
2443 if (prev_dont_incremental) {
2505 for (; p != pend; p++) {
2664 table = (
VALUE)data;
2672 for (i = 0; i <
len; i++, ptr++) {
2706 table = (
VALUE)data;
2736 #define RESTORE_FINALIZER() (\ 2737 th->cfp = saved.cfp, \ 2738 rb_set_safe_level_force(saved.safe), \ 2739 rb_set_errinfo(saved.errinfo)) 2744 saved.cfp = th->
cfp;
2752 for (i = saved.finished;
2754 saved.finished = ++i) {
2758 #undef RESTORE_FINALIZER 2785 RZOMBIE(zombie)->basic.flags = 0;
2793 zombie = next_zombie;
2827 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
2852 #if RGENGC_CHECK_MODE >= 2 2894 gc_enter(objspace,
"rb_objspace_call_finalizer");
2906 p->as.free.flags = 0;
2908 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
2913 else if (
RANY(p)->as.data.dfree) {
2918 if (
RANY(p)->as.file.fptr) {
2927 gc_exit(objspace,
"rb_objspace_call_finalizer");
3036 #if SIZEOF_LONG == SIZEOF_VOIDP 3037 #define NUM2PTR(x) NUM2ULONG(x) 3038 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP 3039 #define NUM2PTR(x) NUM2ULL(x) 3055 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3068 if (
RBASIC(ptr)->klass == 0) {
3134 #if SIZEOF_LONG == SIZEOF_VOIDP 3164 ROBJECT(obj)->as.heap.ivptr) {
3180 if (
RCLASS(obj)->ptr->iv_tbl) {
3183 if (
RCLASS(obj)->ptr->const_tbl) {
3203 if (
RHASH(obj)->ntbl) {
3220 size +=
sizeof(
struct rmatch);
3224 if (
RFILE(obj)->fptr) {
3258 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
3262 return size +
sizeof(
RVALUE);
3330 for (i = 0; i <=
T_MASK; i++) {
3339 for (;p < pend; p++) {
3359 for (i = 0; i <=
T_MASK; i++) {
3362 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break; 3390 default: type =
INT2NUM(i);
break;
3439 int empty_slots = 0, freed_slots = 0,
final_slots = 0;
3440 RVALUE *p, *pend,*offset;
3443 gc_report(2, objspace,
"page_sweep: start.\n");
3464 #if USE_RGENGC && RGENGC_CHECK_MODE 3499 #if GC_PROFILE_MORE_DETAIL 3502 record->removing_objects +=
final_slots + freed_slots;
3503 record->empty_objects += empty_slots;
3506 if (0) fprintf(stderr,
"gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3511 sweep_page->
free_slots = freed_slots + empty_slots;
3523 gc_report(2, objspace,
"page_sweep: end.\n");
3525 return freed_slots + empty_slots;
3548 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
3555 #if RGENGC_CHECK_MODE 3557 switch (prev_mode) {
3572 #if GC_ENABLE_INCREMENTAL_MARK 3587 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4 3600 gc_report(1, objspace,
"gc_sweep_finish");
3606 if (heap_allocatable_pages < heap_tomb->total_pages) {
3613 #if RGENGC_CHECK_MODE >= 2 3622 int unlink_limit = 3;
3623 #if GC_ENABLE_INCREMENTAL_MARK 3626 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
3628 gc_report(2, objspace,
"gc_sweep_step\n");
3633 #if GC_ENABLE_LAZY_SWEEP 3637 while (sweep_page) {
3650 else if (free_slots > 0) {
3651 #if GC_ENABLE_INCREMENTAL_MARK 3670 sweep_page = next_sweep_page;
3677 #if GC_ENABLE_LAZY_SWEEP 3694 #if GC_ENABLE_LAZY_SWEEP 3700 gc_enter(objspace,
"sweep_continue");
3703 gc_report(3, objspace,
"gc_sweep_continue: success heap_increment().\n");
3707 gc_exit(objspace,
"sweep_continue");
3716 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
3718 if (immediate_sweep) {
3719 #if !GC_ENABLE_LAZY_SWEEP 3724 #if !GC_ENABLE_LAZY_SWEEP 3769 size += stack->
limit;
3770 chunk = chunk->
next;
3779 stack->
cache = chunk;
3789 chunk = stack->
cache;
3805 next = stack->
cache;
3837 while (chunk !=
NULL) {
3859 if (stack->
index == 1) {
3869 #if GC_ENABLE_INCREMENTAL_MARK 3874 for (i=0; i<limit; i++) {
3875 if (chunk->
data[i] == obj) {
3887 int limit = stack->
index;
3891 chunk = chunk->
next;
3892 limit = stack->
limit;
3894 rb_bug(
"invalid_mark_stack: unreachable");
3907 for (i=0; i < 4; i++) {
3916 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine.stack_end), th->machine.register_stack_end = rb_ia64_bsp()) 3918 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine.stack_end) 3921 #define STACK_START (th->machine.stack_start) 3922 #define STACK_END (th->machine.stack_end) 3923 #define STACK_LEVEL_MAX (th->machine.stack_maxsize/sizeof(VALUE)) 3925 #if STACK_GROW_DIRECTION < 0 3926 # define STACK_LENGTH (size_t)(STACK_START - STACK_END) 3927 #elif STACK_GROW_DIRECTION > 0 3928 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1) 3930 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \ 3931 : (size_t)(STACK_END - STACK_START + 1)) 3933 #if !STACK_GROW_DIRECTION 3941 if (end > addr)
return ruby_stack_grow_direction = 1;
3942 return ruby_stack_grow_direction = -1;
3955 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)) 3965 ret = (
VALUE*)rb_ia64_bsp() - th->
machine.register_stack_start >
3966 th->
machine.register_stack_maxsize/
sizeof(
VALUE) - water_mark;
3973 #define STACKFRAME_FOR_CALL_CFUNC 512 3978 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) 4002 if (end <= start)
return;
4018 for (i=0; i<n; i++) {
4098 switch (def->
type) {
4163 #if STACK_GROW_DIRECTION < 0 4164 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START) 4165 #elif STACK_GROW_DIRECTION > 0 4166 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix)) 4168 #define GET_STACK_BOUNDS(start, end, appendix) \ 4169 ((STACK_END < STACK_START) ? \ 4170 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix))) 4174 const VALUE *stack_start,
const VALUE *stack_end);
4182 } save_regs_gc_mark;
4183 VALUE *stack_start, *stack_end;
4204 VALUE *stack_start, *stack_end;
4212 const VALUE *stack_start,
const VALUE *stack_end)
4218 th->
machine.register_stack_start,
4219 th->
machine.register_stack_end);
4221 #if defined(__mc68000__) 4223 (
VALUE*)((
char*)stack_start + 2),
4224 (
VALUE*)((
char*)stack_end - 2));
4272 #if RGENGC_PROFILE > 0 4273 objspace->
profile.total_remembered_shady_object_count++;
4274 #if RGENGC_PROFILE >= 2 4328 #if RGENGC_CHECK_MODE 4333 #if GC_ENABLE_INCREMENTAL_MARK 4348 #if RGENGC_CHECK_MODE 4437 gc_mark(objspace,
RANY(obj)->as.imemo.cref.klass);
4439 gc_mark(objspace,
RANY(obj)->as.imemo.cref.refinements);
4442 gc_mark(objspace,
RANY(obj)->as.imemo.svar.cref_or_me);
4443 gc_mark(objspace,
RANY(obj)->as.imemo.svar.lastline);
4444 gc_mark(objspace,
RANY(obj)->as.imemo.svar.backref);
4445 gc_mark(objspace,
RANY(obj)->as.imemo.svar.others);
4448 gc_mark(objspace,
RANY(obj)->as.imemo.throw_data.throw_obj);
4464 #if VM_CHECK_MODE > 0 4484 rb_bug(
"rb_gc_mark() called for broken object");
4489 if (obj)
gc_mark(objspace, obj);
4525 for (i=0; i <
len; i++) {
4549 if (mark_func) (*mark_func)(ptr);
4558 for (i = 0; i <
len; i++) {
4618 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
4633 #if GC_ENABLE_INCREMENTAL_MARK 4634 size_t marked_slots_at_the_beginning = objspace->
marked_slots;
4635 size_t popped_count = 0;
4639 if (obj ==
Qundef)
continue;
4642 rb_bug(
"gc_mark_stacked_objects: %s is not marked.",
obj_info(obj));
4646 #if GC_ENABLE_INCREMENTAL_MARK 4649 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
4654 if (popped_count + (objspace->
marked_slots - marked_slots_at_the_beginning) > count) {
4687 #if PRINT_ROOT_TICKS 4688 #define MAX_TICKS 0x100 4689 static tick_t mark_ticks[MAX_TICKS];
4690 static const char *mark_ticks_categories[MAX_TICKS];
4693 show_mark_ticks(
void)
4696 fprintf(stderr,
"mark ticks result:\n");
4697 for (i=0; i<MAX_TICKS; i++) {
4698 const char *category = mark_ticks_categories[i];
4700 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
4716 #if PRINT_ROOT_TICKS 4717 tick_t start_tick = tick();
4719 const char *prev_category = 0;
4721 if (mark_ticks_categories[0] == 0) {
4722 atexit(show_mark_ticks);
4726 if (categoryp) *categoryp =
"xxx";
4732 #if PRINT_ROOT_TICKS 4733 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \ 4734 if (prev_category) { \ 4735 tick_t t = tick(); \ 4736 mark_ticks[tick_count] = t - start_tick; \ 4737 mark_ticks_categories[tick_count] = prev_category; \ 4740 prev_category = category; \ 4741 start_tick = tick(); \ 4744 #define MARK_CHECKPOINT_PRINT_TICK(category) 4747 #define MARK_CHECKPOINT(category) do { \ 4748 if (categoryp) *categoryp = category; \ 4749 MARK_CHECKPOINT_PRINT_TICK(category); \ 4781 #undef MARK_CHECKPOINT 4784 #if RGENGC_CHECK_MODE >= 4 4786 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01) 4787 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01) 4788 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1)) 4796 static struct reflist *
4797 reflist_create(
VALUE obj)
4799 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
4802 refs->list[0] = obj;
4808 reflist_destruct(
struct reflist *refs)
4815 reflist_add(
struct reflist *refs,
VALUE obj)
4817 if (refs->pos == refs->size) {
4822 refs->list[refs->pos++] = obj;
4826 reflist_dump(
struct reflist *refs)
4829 for (i=0; i<refs->pos; i++) {
4830 VALUE obj = refs->list[i];
4831 if (IS_ROOTSIG(obj)) {
4832 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
4835 fprintf(stderr,
"<%s>",
obj_info(obj));
4837 if (i+1 < refs->pos) fprintf(stderr,
", ");
4842 reflist_refered_from_machine_context(
struct reflist *refs)
4845 for (i=0; i<refs->pos; i++) {
4846 VALUE obj = refs->list[i];
4847 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
4862 const char *category;
4868 allrefs_add(
struct allrefs *data,
VALUE obj)
4870 struct reflist *refs;
4873 reflist_add(refs, data->root_obj);
4877 refs = reflist_create(data->root_obj);
4884 allrefs_i(
VALUE obj,
void *ptr)
4886 struct allrefs *data = (
struct allrefs *)ptr;
4888 if (allrefs_add(data, obj)) {
4894 allrefs_roots_i(
VALUE obj,
void *ptr)
4896 struct allrefs *data = (
struct allrefs *)ptr;
4898 data->root_obj = MAKE_ROOTSIG(data->category);
4900 if (allrefs_add(data, obj)) {
4908 struct allrefs data;
4909 struct mark_func_data_struct mfd;
4914 data.objspace = objspace;
4918 mfd.mark_func = allrefs_roots_i;
4934 return data.references;
4940 struct reflist *refs = (
struct reflist *)value;
4941 reflist_destruct(refs);
4946 objspace_allrefs_destruct(
struct st_table *refs)
4948 st_foreach(refs, objspace_allrefs_destruct_i, 0);
4952 #if RGENGC_CHECK_MODE >= 5 4957 struct reflist *refs = (
struct reflist *)v;
4958 fprintf(stderr,
"[allrefs_dump_i] %s <- ",
obj_info(obj));
4960 fprintf(stderr,
"\n");
4967 fprintf(stderr,
"[all refs] (size: %d)\n", (
int)objspace->
rgengc.allrefs_table->num_entries);
4976 struct reflist *refs = (
struct reflist *)v;
4981 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n",
obj_info(obj));
4982 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
4985 if (reflist_refered_from_machine_context(refs)) {
4986 fprintf(stderr,
" (marked from machine stack).\n");
4990 objspace->
rgengc.error_count++;
4991 fprintf(stderr,
"\n");
4998 gc_marks_check(
rb_objspace_t *objspace,
int (*checker_func)(
ANYARGS),
const char *checker_name)
5001 #if RGENGC_ESTIMATE_OLDMALLOC 5006 objspace->
rgengc.allrefs_table = objspace_allrefs(objspace);
5012 if (objspace->
rgengc.error_count > 0) {
5013 #if RGENGC_CHECK_MODE >= 5 5014 allrefs_dump(objspace);
5016 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
5019 objspace_allrefs_destruct(objspace->
rgengc.allrefs_table);
5020 objspace->
rgengc.allrefs_table = 0;
5024 #if RGENGC_ESTIMATE_OLDMALLOC 5056 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n",
obj_info(parent),
obj_info(child));
5069 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5089 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
5134 unsigned int has_remembered_shady =
FALSE;
5135 unsigned int has_remembered_old =
FALSE;
5136 int rememberd_old_objects = 0;
5137 int free_objects = 0;
5138 int zombie_objects = 0;
5142 if (
RBASIC(obj) == 0) free_objects++;
5146 has_remembered_old =
TRUE;
5147 rememberd_old_objects++;
5157 fprintf(stderr,
"marking -> %s\n",
obj_info(obj));
5160 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5161 page, rememberd_old_objects, obj ?
obj_info(obj) :
"");
5165 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5172 rb_bug(
"page %p's free_slots should be %d, but %d\n", page, (
int)page->
free_slots, free_objects);
5176 rb_bug(
"page %p's final_slots should be %d, but %d\n", page, (
int)page->
final_slots, zombie_objects);
5179 return rememberd_old_objects;
5188 int rememberd_old_objects = 0;
5197 return rememberd_old_objects;
5203 int rememberd_old_objects = 0;
5206 return rememberd_old_objects;
5227 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
5232 eo_args.
data = (
void *)&data;
5236 #if RGENGC_CHECK_MODE >= 5 5238 gc_marks_check(objspace,
NULL,
NULL);
5239 allrefs_dump(objspace);
5241 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
5251 fprintf(stderr,
"heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
5269 size_t list_count = 0;
5282 rb_bug(
"inconsistent finalizing object count:\n" 5285 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
5292 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
5309 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
5314 #if GC_ENABLE_INCREMENTAL_MARK 5317 if (0) fprintf(stderr,
"objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
5342 #if GC_ENABLE_INCREMENTAL_MARK 5356 bits_t bits = mark_bits[j] & wbun_bits[j];
5400 #if GC_ENABLE_INCREMENTAL_MARK 5405 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
5420 #if RGENGC_CHECK_MODE >= 2 5422 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
5432 #if RGENGC_CHECK_MODE >= 2 5445 #if RGENGC_CHECK_MODE >= 4 5446 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
5453 size_t sweep_slots = total_slots - objspace->
marked_slots;
5458 #if RGENGC_CHECK_MODE 5465 if (sweep_slots > max_free_slots) {
5476 if (sweep_slots < min_free_slots) {
5477 if (!full_marking) {
5479 full_marking =
TRUE;
5484 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
5490 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
5513 gc_report(1, objspace,
"gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
5517 if (sweep_slots < min_free_slots) {
5518 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
5530 #if GC_ENABLE_INCREMENTAL_MARK 5542 if (0) fprintf(stderr,
"objspace->marked_slots: %d\n", (
int)objspace->
marked_slots);
5549 gc_report(1, objspace,
"gc_marks_rest\n");
5551 #if GC_ENABLE_INCREMENTAL_MARK 5569 #if GC_ENABLE_INCREMENTAL_MARK 5578 gc_enter(objspace,
"marks_continue");
5587 from =
"pooled-pages";
5591 from =
"incremented-pages";
5595 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n", slots, from);
5605 gc_exit(objspace,
"marks_continue");
5624 #if RGENGC_PROFILE > 0 5649 const char *status =
" ";
5665 va_start(args, fmt);
5669 fprintf(out,
"%s|", status);
5717 #if RGENGC_PROFILE > 0 5720 objspace->
profile.total_remembered_normal_object_count++;
5721 #if RGENGC_PROFILE >= 2 5740 #ifndef PROFILE_REMEMBERSET_MARK 5741 #define PROFILE_REMEMBERSET_MARK 0 5749 #if PROFILE_REMEMBERSET_MARK 5750 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5752 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
5762 #if PROFILE_REMEMBERSET_MARK 5768 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
5769 marking_bits[j] = 0;
5797 #if PROFILE_REMEMBERSET_MARK 5806 #if PROFILE_REMEMBERSET_MARK 5807 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
5809 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
5864 #if GC_ENABLE_INCREMENTAL_MARK 5906 #define gc_writebarrier_incremental(a, b, objspace) 5949 objspace->
profile.total_shade_operation_count++;
5950 #if RGENGC_PROFILE >= 2 5990 fprintf(stderr,
"%s\t%d\n", (
char *)key, (
int)val);
6005 if (rgengc_unprotect_logging_table == 0) {
6072 static ID ID_marked;
6074 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible;
6078 #define I(s) ID_##s = rb_intern(#s); 6119 #if GC_ENABLE_INCREMENTAL_MARK 6133 #if GC_ENABLE_INCREMENTAL_MARK 6149 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE 6150 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024 6185 if (tmp->
varptr == addr) {
6217 #define gc_stress_full_mark_after_malloc_p() \ 6218 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc))) 6278 #if RGENGC_ESTIMATE_OLDMALLOC 6290 if (0) fprintf(stderr,
"%d\t%d\t%u\t%u\t%d\n",
6315 #if GC_PROFILE_MORE_DETAIL 6321 #if GC_PROFILE_MORE_DETAIL 6325 return gc_start(objspace, full_mark, immediate_mark, immediate_sweep, reason);
6329 gc_start(
rb_objspace_t *objspace,
const int full_mark,
const int immediate_mark,
const unsigned int immediate_sweep,
int reason)
6331 int do_full_mark = full_mark;
6341 #if RGENGC_CHECK_MODE >= 2 6352 do_full_mark =
TRUE;
6361 do_full_mark =
TRUE;
6365 do_full_mark =
TRUE;
6376 #if GC_ENABLE_INCREMENTAL_MARK 6391 gc_report(1, objspace,
"gc_start(%d, %d, %d, reason: %d) => %d, %d, %d\n",
6392 full_mark, immediate_mark, immediate_sweep, reason,
6411 gc_exit(objspace,
"gc_start");
6421 if (marking || sweeping) {
6454 #if GC_ENABLE_INCREMENTAL_MARK 6472 static char buff[0x10];
6477 #if PRINT_ENTER_EXIT_TICK 6479 static tick_t last_exit_tick;
6480 static tick_t enter_tick;
6481 static int enter_count = 0;
6482 static char last_gc_status[0x10];
6487 if (direction == 0) {
6489 enter_tick = tick();
6493 tick_t exit_tick = tick();
6494 char current_gc_status[0x10];
6498 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6499 enter_tick - last_exit_tick,
6500 exit_tick - enter_tick,
6502 last_gc_status, current_gc_status,
6504 last_exit_tick = exit_tick;
6507 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6509 exit_tick - enter_tick,
6511 last_gc_status, current_gc_status,
6559 return garbage_collect(objspace, full_mark, immediate_mark, immediate_sweep, reason);
6573 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
6622 static ID keyword_ids[3];
6629 if (!keyword_ids[0]) {
6630 keyword_ids[0] =
rb_intern(
"full_mark");
6631 keyword_ids[1] =
rb_intern(
"immediate_mark");
6632 keyword_ids[2] =
rb_intern(
"immediate_sweep");
6637 if (kwvals[0] !=
Qundef) full_mark =
RTEST(kwvals[0]);
6638 if (kwvals[1] !=
Qundef) immediate_mark =
RTEST(kwvals[1]);
6639 if (kwvals[2] !=
Qundef) immediate_sweep =
RTEST(kwvals[2]);
6670 #if RGENGC_PROFILE >= 2 6675 gc_count_add_each_types(
VALUE hash,
const char *
name,
const size_t *types)
6679 for (i=0; i<
T_MASK; i++) {
6712 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
6713 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
6714 #if RGENGC_ESTIMATE_OLDMALLOC 6715 static VALUE sym_oldmalloc;
6717 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
6718 static VALUE sym_none, sym_marking, sym_sweeping;
6733 if (sym_major_by ==
Qnil) {
6734 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s)) 6746 #if RGENGC_ESTIMATE_OLDMALLOC 6760 #define SET(name, attr) \ 6761 if (key == sym_##name) \ 6763 else if (hash != Qnil) \ 6764 rb_hash_aset(hash, sym_##name, (attr)); 6771 #if RGENGC_ESTIMATE_OLDMALLOC 6775 SET(major_by, major_by);
6789 if (orig_flags == 0) {
6862 #if RGENGC_ESTIMATE_OLDMALLOC 6867 gc_stat_sym_total_generated_normal_object_count,
6868 gc_stat_sym_total_generated_shady_object_count,
6869 gc_stat_sym_total_shade_operation_count,
6870 gc_stat_sym_total_promoted_count,
6871 gc_stat_sym_total_remembered_normal_object_count,
6872 gc_stat_sym_total_remembered_shady_object_count,
6898 #if RGENGC_ESTIMATE_OLDMALLOC 6912 if (gc_stat_symbols[0] == 0) {
6913 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s)) 6916 S(heap_sorted_length);
6918 S(heap_available_slots);
6921 S(heap_final_slots);
6922 S(heap_marked_slots);
6925 S(total_allocated_pages);
6926 S(total_freed_pages);
6927 S(total_allocated_objects);
6928 S(total_freed_objects);
6929 S(malloc_increase_bytes);
6930 S(malloc_increase_bytes_limit);
6934 S(remembered_wb_unprotected_objects);
6935 S(remembered_wb_unprotected_objects_limit);
6937 S(old_objects_limit);
6938 #if RGENGC_ESTIMATE_OLDMALLOC 6939 S(oldmalloc_increase_bytes);
6940 S(oldmalloc_increase_bytes_limit);
6943 S(total_generated_normal_object_count);
6944 S(total_generated_shady_object_count);
6945 S(total_shade_operation_count);
6946 S(total_promoted_count);
6947 S(total_remembered_normal_object_count);
6948 S(total_remembered_shady_object_count);
6952 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s)) 6953 S(gc_stat_heap_used);
6954 S(heap_eden_page_length);
6955 S(heap_tomb_page_length);
6963 S(remembered_shady_object);
6964 S(remembered_shady_object_limit);
6966 S(old_object_limit);
6968 S(total_allocated_object);
6969 S(total_freed_object);
6972 #if RGENGC_ESTIMATE_OLDMALLOC 6973 S(oldmalloc_increase);
6984 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] 6985 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s] 7004 #if RGENGC_ESTIMATE_OLDMALLOC 7020 if (!
NIL_P(new_key)) {
7021 static int warned = 0;
7023 rb_warn(
"GC.stat keys were changed from Ruby 2.1. " 7025 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
7062 static VALUE default_proc_for_compat = 0;
7063 if (default_proc_for_compat == 0) {
7077 #define SET(name, attr) \ 7078 if (key == gc_stat_symbols[gc_stat_sym_##name]) \ 7080 else if (hash != Qnil) \ 7081 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr)); 7110 #if RGENGC_ESTIMATE_OLDMALLOC 7116 SET(total_generated_normal_object_count, objspace->
profile.total_generated_normal_object_count);
7117 SET(total_generated_shady_object_count, objspace->
profile.total_generated_shady_object_count);
7118 SET(total_shade_operation_count, objspace->
profile.total_shade_operation_count);
7119 SET(total_promoted_count, objspace->
profile.total_promoted_count);
7120 SET(total_remembered_normal_object_count, objspace->
profile.total_remembered_normal_object_count);
7121 SET(total_remembered_shady_object_count, objspace->
profile.total_remembered_shady_object_count);
7135 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2 7137 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->
profile.generated_normal_object_count_types);
7138 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->
profile.generated_shady_object_count_types);
7139 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->
profile.shade_operation_count_types);
7140 gc_count_add_each_types(hash,
"promoted_types", objspace->
profile.promoted_types);
7141 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->
profile.remembered_normal_object_count_types);
7142 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->
profile.remembered_shady_object_count_types);
7325 char *ptr =
getenv(name);
7328 if (ptr !=
NULL && *ptr) {
7331 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG 7332 val = strtoll(ptr, &end, 0);
7334 val =
strtol(ptr, &end, 0);
7346 unit = 1024*1024*1024;
7350 while (*end && isspace((
unsigned char)*end)) end++;
7352 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
7356 if (val < -(ssize_t)(
SIZE_MAX / 2 / unit) || (ssize_t)(
SIZE_MAX / 2 / unit) < val) {
7357 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
7362 if (val > 0 && (
size_t)val > lower_bound) {
7364 fprintf(stderr,
"%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
7366 *default_value = (size_t)val;
7371 fprintf(stderr,
"%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
7372 name, val, *default_value, lower_bound);
7381 get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
7383 char *ptr =
getenv(name);
7386 if (ptr !=
NULL && *ptr) {
7389 if (!*ptr || *end) {
7390 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
7394 if (accept_zero && val == 0.0) {
7397 else if (val <= lower_bound) {
7399 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7400 name, val, *default_value, lower_bound);
7403 else if (upper_bound != 0.0 &&
7404 val > upper_bound) {
7406 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7407 name, val, *default_value, upper_bound);
7412 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
7413 *default_value =
val;
7427 if (min_pages >
heap_eden->total_pages) {
7477 if (safe_level > 0)
return;
7484 rb_warn(
"RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
7492 rb_warn(
"RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
7510 #if RGENGC_ESTIMATE_OLDMALLOC 7526 struct mark_func_data_struct mfd;
7527 mfd.mark_func =
func;
7553 struct mark_func_data_struct mfd;
7556 data.
data = passing_data;
7590 fprintf(stderr,
"[FATAL] %s\n", msg);
7615 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
7631 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
7648 #if defined __MINGW32__ 7649 res = __mingw_aligned_malloc(size, alignment);
7650 #elif defined _WIN32 7651 void *_aligned_malloc(
size_t,
size_t);
7652 res = _aligned_malloc(size, alignment);
7653 #elif defined(HAVE_POSIX_MEMALIGN) 7654 if (posix_memalign(&res, alignment, size) == 0) {
7660 #elif defined(HAVE_MEMALIGN) 7661 res = memalign(alignment, size);
7664 res =
malloc(alignment + size +
sizeof(
void*));
7665 aligned = (
char*)res + alignment +
sizeof(
void*);
7666 aligned -= ((
VALUE)aligned & (alignment - 1));
7667 ((
void**)aligned)[-1] = res;
7668 res = (
void*)aligned;
7671 #if defined(_DEBUG) || GC_DEBUG 7673 assert(((alignment - 1) & alignment) == 0);
7674 assert(alignment %
sizeof(
void*) == 0);
7682 #if defined __MINGW32__ 7683 __mingw_aligned_free(ptr);
7684 #elif defined _WIN32 7686 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN) 7689 free(((
void**)ptr)[-1]);
7693 static inline size_t 7696 #ifdef HAVE_MALLOC_USABLE_SIZE 7697 return malloc_usable_size(ptr);
7712 if (sub == 0)
return;
7716 if (val < sub) sub =
val;
7732 if (new_size > old_size) {
7734 #if RGENGC_ESTIMATE_OLDMALLOC 7740 #if RGENGC_ESTIMATE_OLDMALLOC 7756 #if MALLOC_ALLOCATED_SIZE 7757 if (new_size >= old_size) {
7761 size_t dec_size = old_size - new_size;
7762 size_t allocated_size = objspace->
malloc_params.allocated_size;
7764 #if MALLOC_ALLOCATED_SIZE_CHECK 7765 if (allocated_size < dec_size) {
7766 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
7772 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
7777 (
int)new_size, (
int)old_size);
7786 if (allocations > 0) {
7789 #if MALLOC_ALLOCATED_SIZE_CHECK 7801 static inline size_t 7804 if (size == 0) size = 1;
7806 #if CALC_EXACT_MALLOC_SIZE 7807 size +=
sizeof(size_t);
7813 static inline void *
7816 #if CALC_EXACT_MALLOC_SIZE 7817 ((
size_t *)mem)[0] =
size;
7818 mem = (
size_t *)mem + 1;
7824 #define TRY_WITH_GC(alloc) do { \ 7825 objspace_malloc_gc_stress(objspace); \ 7827 (!garbage_collect_with_gvl(objspace, TRUE, TRUE, TRUE, GPR_FLAG_MALLOC) || \ 7851 if ((ssize_t)size < 0) {
7857 static inline size_t 7885 if (new_size == 0) {
7890 #if CALC_EXACT_MALLOC_SIZE 7891 new_size +=
sizeof(size_t);
7892 ptr = (
size_t *)ptr - 1;
7893 old_size = ((
size_t *)ptr)[0];
7900 #if CALC_EXACT_MALLOC_SIZE 7901 ((
size_t *)mem)[0] = new_size;
7902 mem = (
size_t *)mem + 1;
7913 #if CALC_EXACT_MALLOC_SIZE 7914 ptr = ((
size_t *)ptr) - 1;
7915 old_size = ((
size_t*)ptr)[0];
7971 #ifdef ruby_sized_xrealloc 7972 #undef ruby_sized_xrealloc 7986 #ifdef ruby_sized_xrealloc2 7987 #undef ruby_sized_xrealloc2 7992 size_t len = size * n;
7993 if (n != 0 && size != len / n) {
8005 #ifdef ruby_sized_xfree 8006 #undef ruby_sized_xfree 8029 #if CALC_EXACT_MALLOC_SIZE 8030 size +=
sizeof(size_t);
8033 #if CALC_EXACT_MALLOC_SIZE 8035 ((
size_t *)mem)[0] = 0;
8036 mem = (
size_t *)mem + 1;
8044 size_t *mem = (
size_t *)ptr;
8045 #if CALC_EXACT_MALLOC_SIZE 8070 if (len < 0 || (cnt = (
long)
roomof(len,
sizeof(
VALUE))) < 0) {
8083 RNODE(s)->u3.cnt = 0;
8088 #if MALLOC_ALLOCATED_SIZE 8099 gc_malloc_allocated_size(
VALUE self)
8114 gc_malloc_allocations(
VALUE self)
8127 else if (diff < 0) {
8142 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0 8144 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK 8159 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK 8186 *(
size_t *)arg += (ptr[0] + 1) *
sizeof(
VALUE);
8194 const struct weakmap *w = ptr;
8227 if (!existing)
return ST_STOP;
8229 for (i = j = 1, size = ptr[0]; i <=
size; ++i) {
8230 if (ptr[i] != wmap) {
8260 rids = (
VALUE *)data;
8262 for (i = 0; i <
size; ++i) {
8451 size = (ptr = optr = (
VALUE *)*val)[0];
8462 if (ptr == optr)
return ST_STOP;
8515 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG 8526 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100 8532 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID) 8534 static int try_clock_gettime = 1;
8536 if (try_clock_gettime &&
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
8540 try_clock_gettime = 0;
8547 struct rusage usage;
8549 if (
getrusage(RUSAGE_SELF, &usage) == 0) {
8550 time = usage.ru_utime;
8558 FILETIME creation_time, exit_time, kernel_time, user_time;
8563 if (GetProcessTimes(GetCurrentProcess(),
8564 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8565 memcpy(&ui, &user_time,
sizeof(FILETIME));
8566 q = ui.QuadPart / 10
L;
8567 t = (
DWORD)(q % 1000000
L) * 1e-6;
8572 t += (double)(
DWORD)(q >> 16) * (1 << 16);
8573 t += (
DWORD)q & ~(~0 << 16);
8605 rb_bug(
"gc_profile malloc or realloc miss");
8612 #if MALLOC_ALLOCATED_SIZE 8615 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY 8618 struct rusage usage;
8619 if (
getrusage(RUSAGE_SELF, &usage) == 0) {
8620 record->maxrss = usage.ru_maxrss;
8621 record->minflt = usage.ru_minflt;
8622 record->majflt = usage.ru_majflt;
8635 #if GC_PROFILE_MORE_DETAIL 8636 record->prepare_time = objspace->
profile.prepare_time;
8665 #define RUBY_DTRACE_GC_HOOK(name) \ 8666 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0) 8671 #if GC_PROFILE_MORE_DETAIL 8682 #if GC_PROFILE_MORE_DETAIL 8715 record->
gc_time += sweep_time;
8721 #if GC_PROFILE_MORE_DETAIL 8722 record->gc_sweep_time += sweep_time;
8732 #if GC_PROFILE_MORE_DETAIL 8749 #if GC_PROFILE_MORE_DETAIL 8751 record->heap_live_objects = live;
8752 record->heap_free_objects = total - live;
8859 #if GC_PROFILE_MORE_DETAIL 8874 #if RGENGC_PROFILE > 0 8885 #if GC_PROFILE_MORE_DETAIL 8886 #define MAJOR_REASON_MAX 0x10 8889 gc_profile_dump_major_reason(
int flags,
char *buff)
8900 if (reason & GPR_FLAG_MAJOR_BY_##x) { \ 8901 buff[i++] = #x[0]; \ 8902 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \ 8908 #if RGENGC_ESTIMATE_OLDMALLOC 8922 #ifdef MAJOR_REASON_MAX 8923 char reason_str[MAJOR_REASON_MAX];
8931 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
8933 for (i = 0; i <
count; i++) {
8940 #if GC_PROFILE_MORE_DETAIL 8943 "Prepare Time = Previously GC's rest sweep time\n" 8944 "Index Flags Allocate Inc. Allocate Limit" 8948 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj" 8950 " OldgenObj RemNormObj RemShadObj" 8953 " MaxRSS(KB) MinorFLT MajorFLT" 8957 for (i = 0; i <
count; i++) {
8973 gc_profile_dump_major_reason(record->
flags, reason_str),
8980 record->allocate_increase, record->allocate_limit,
8982 record->allocated_size,
8984 record->heap_use_pages,
8985 record->gc_mark_time*1000,
8986 record->gc_sweep_time*1000,
8987 record->prepare_time*1000,
8989 record->heap_live_objects,
8990 record->heap_free_objects,
8991 record->removing_objects,
8992 record->empty_objects
8995 record->old_objects,
8996 record->remembered_normal_objects,
8997 record->remembered_shady_objects
9001 record->maxrss / 1024,
9073 for (i = 0; i <
count; i++) {
9137 #define TYPE_NAME(t) case (t): return #t; 9196 rb_bug(
"method_type_name: unreachable (type: %d)", type);
9200 # define ARY_SHARED_P(ary) \ 9201 (assert(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \ 9202 FL_TEST((ary),ELTS_SHARED)!=0) 9203 # define ARY_EMBED_P(ary) \ 9204 (assert(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \ 9205 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0) 9211 snprintf(buff, buff_size,
"%s %s@%s:%d", buff,
9225 #define TF(c) ((c) != 0 ? "true" : "false") 9226 #define C(c, s) ((c) != 0 ? (s) : " ") 9231 snprintf(buff, buff_size,
"%p [%d%s%s%s%s] %s",
9239 snprintf(buff, buff_size,
"%p [%s] %s",
9248 else if (
RBASIC(obj)->klass == 0) {
9249 snprintf(buff, buff_size,
"%s (temporary internal)", buff);
9253 if (!
NIL_P(class_path)) {
9259 snprintf(buff, buff_size,
"%s @%s:%d", buff,
RANY(obj)->file,
RANY(obj)->line);
9264 snprintf(buff, buff_size,
"%s (%s)", buff,
9268 snprintf(buff, buff_size,
"%s [%s%s] len: %d", buff,
9279 if (!
NIL_P(class_path)) {
9292 snprintf(buff, buff_size,
"%s %s", buff, type_name);
9298 const char *imemo_name;
9300 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break; 9311 snprintf(buff, buff_size,
"%s %s", buff, imemo_name);
9316 snprintf(buff, buff_size,
"%s (called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)", buff,
9343 #define OBJ_INFO_BUFFERS_NUM 10 9344 #define OBJ_INFO_BUFFERS_SIZE 0x100 9345 static int obj_info_buffers_index = 0;
9346 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
9351 const int index = obj_info_buffers_index++;
9352 char *
const buff = &obj_info_buffers[index][0];
9354 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
9355 obj_info_buffers_index = 0;
9383 fprintf(stderr,
"rb_obj_info_dump: %s\n",
rb_raw_obj_info(buff, 0x100, obj));
9393 fprintf(stderr,
"created at: %s:%d\n",
RANY(obj)->file,
RANY(obj)->line);
9396 fprintf(stderr,
"pointer to heap?: true\n");
9399 fprintf(stderr,
"pointer to heap?: false\n");
9405 fprintf(stderr,
"age? : %d\n", RVALUE_AGE(obj));
9406 fprintf(stderr,
"old? : %s\n",
RVALUE_OLD_P(obj) ?
"true" :
"false");
9408 fprintf(stderr,
"remembered? : %s\n",
RVALUE_REMEMBERED(obj) ?
"true" :
"false");
9412 fprintf(stderr,
"lazy sweeping?: true\n");
9413 fprintf(stderr,
"swept?: %s\n",
is_swept_object(objspace, obj) ?
"done" :
"not yet");
9416 fprintf(stderr,
"lazy sweeping?: false\n");
9423 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
9428 rb_gcdebug_sentinel(
VALUE obj,
const char *name)
9435 #if GC_DEBUG_STRESS_TO_CLASS 9449 rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
9455 for (i = 0; i <
argc; ++i) {
9606 #if MALLOC_ALLOCATED_SIZE 9611 #if GC_DEBUG_STRESS_TO_CLASS 9620 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
rb_event_flag_t hook_events
#define RBASIC_CLEAR_CLASS(obj)
static void * objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
int rb_objspace_marked_object_p(VALUE obj)
static const char * type_name(int type, VALUE obj)
static void mark_hash(rb_objspace_t *objspace, st_table *tbl)
void rb_gc_finalize_deferred(void)
void rb_class_remove_from_super_subclasses(VALUE klass)
static int VM_ENV_ESCAPED_P(const VALUE *ep)
size_t heap_total_objects
static void gc_marks(rb_objspace_t *objspace, int full_mark)
void(* RUBY_DATA_FUNC)(void *)
VALUE rb_ary_last(int argc, const VALUE *argv, VALUE ary)
void rb_class_detach_subclasses(VALUE klass)
void rb_free_const_table(struct rb_id_table *tbl)
static enum gc_mode gc_mode_verify(enum gc_mode mode)
static void gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
struct rb_objspace::@114 rincgc
static int RVALUE_FLAGS_AGE(VALUE flags)
struct heap_page * pooled_pages
static void heap_set_increment(rb_objspace_t *objspace, size_t additional_pages)
void rb_gc_writebarrier(VALUE a, VALUE b)
static void gc_marks_start(rb_objspace_t *objspace, int full)
static void root_objects_from(VALUE obj, void *ptr)
static void RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
void rb_bug(const char *fmt,...)
struct heap_page::@115 flags
static void gc_grey(rb_objspace_t *objspace, VALUE ptr)
#define heap_pages_final_slots
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
static VALUE gc_profile_disable(void)
static void objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
static int set_zero(st_data_t key, st_data_t val, st_data_t arg)
#define RUBY_TYPED_FREE_IMMEDIATELY
static void gc_aging(rb_objspace_t *objspace, VALUE obj)
#define GC_PROFILE_MORE_DETAIL
double heap_free_slots_min_ratio
static VALUE gc_stress_set_m(VALUE self, VALUE flag)
VALUE rb_obj_id(VALUE obj)
size_t strlen(const char *)
#define has_sweeping_pages(heap)
void rb_objspace_free(rb_objspace_t *objspace)
static void gc_prof_set_malloc_info(rb_objspace_t *)
static enum rb_id_table_iterator_result free_const_entry_i(VALUE value, void *data)
#define RCLASS_CONST_TBL(c)
#define is_marking(objspace)
size_t uncollectible_wb_unprotected_objects_limit
static VALUE wmap_each(VALUE self)
#define RUBY_DEFAULT_FREE
void rb_gc_free_dsymbol(VALUE)
static size_t xmalloc2_size(const size_t count, const size_t elsize)
static void pop_mark_stack_chunk(mark_stack_t *stack)
static VALUE wmap_each_value(VALUE self)
double gc_sweep_start_time
static void run_final(rb_objspace_t *objspace, VALUE zombie)
static void gc_record(rb_objspace_t *objspace, int direction, const char *event)
static void gc_prof_timer_stop(rb_objspace_t *)
static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
#define RSTRUCT_CONST_PTR(st)
unsigned int UINT8 __attribute__((__mode__(__QI__)))
VALUE rb_yield_values(int n,...)
struct rb_thread_struct::@204 machine
static int max(int a, int b)
static VALUE id2ref(VALUE obj, VALUE objid)
static unsigned int hash(str, len) register const char *str
static void rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
void * ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
static int mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
#define GC_HEAP_GROWTH_FACTOR
static VALUE os_each_obj(int argc, VALUE *argv, VALUE os)
static int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
size_t ruby_stack_length(VALUE **p)
#define FLUSH_REGISTER_WINDOWS
static const char * obj_type_name(VALUE obj)
#define malloc_allocated_size
void rb_gc_free_node(VALUE obj)
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
static VALUE default_proc_for_compat_func(VALUE hash, VALUE dmy, int argc, VALUE *argv)
static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
static void gc_sweep_rest(rb_objspace_t *objspace)
void rb_class_remove_from_module_subclasses(VALUE klass)
static size_t objspace_free_slots(rb_objspace_t *objspace)
#define ATOMIC_EXCHANGE(var, val)
static void gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
static int wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
VALUE rb_obj_is_thread(VALUE obj)
static void wmap_mark(void *ptr)
#define rb_data_typed_object_alloc
unsigned int during_minor_gc
static VALUE gc_verify_internal_consistency(VALUE self)
size_t onig_memsize(const regex_t *reg)
static int RVALUE_REMEMBERED(VALUE obj)
size_t oldmalloc_limit_max
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
static void gc_finalize_deferred(void *dmy)
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
void rb_iseq_free(const rb_iseq_t *iseq)
static size_t objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define RGENGC_FORCE_MAJOR_GC
static void * ruby_xmalloc0(size_t size)
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
static void gc_enter(rb_objspace_t *objspace, const char *event)
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
static const rb_iseq_t * vm_proc_iseq(VALUE procval)
static void invalidate_mark_stack(mark_stack_t *stack, VALUE obj)
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
static int stack_check(int water_mark)
static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
static enum rb_id_table_iterator_result mark_const_entry_i(VALUE value, void *data)
void * ruby_xmalloc2(size_t n, size_t size)
#define heap_pages_sorted_length
void rb_define_private_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
void rb_iseq_mark(const rb_iseq_t *iseq)
size_t zombie_object_count
void ruby_mimfree(void *ptr)
void rb_gcdebug_print_obj_condition(VALUE obj)
#define GC_ENABLE_LAZY_SWEEP
static int wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
#define TH_JUMP_TAG(th, st)
RUBY_ALIAS_FUNCTION(rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree), rb_data_object_wrap,(klass, datap, dmark, dfree))
struct rb_data_type_struct::@131 function
VALUE rb_ary_push(VALUE ary, VALUE item)
#define SIZED_REALLOC_N(var, type, n, old_n)
static void gc_prof_mark_timer_start(rb_objspace_t *)
#define heap_pages_freeable_pages
SSL_METHOD *(* func)(void)
if(len<=MAX_WORD_LENGTH &&len >=MIN_WORD_LENGTH)
size_t oldmalloc_increase
#define ARY_SHARED_P(ary)
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
VALUE rb_ary_tmp_new(long capa)
#define RGENGC_ESTIMATE_OLDMALLOC
void * ruby_xrealloc2(void *ptr, size_t n, size_t size)
struct rb_iseq_constant_body * body
static void RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
static int gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
static struct heap_page * heap_page_create(rb_objspace_t *objspace)
static VALUE count_objects(int argc, VALUE *argv, VALUE os)
#define RGENGC_CHECK_MODE
static VALUE run_single_final(VALUE final, VALUE objid)
void ruby_sized_xfree(void *x, size_t size)
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
VALUE rb_funcall(VALUE, ID, int,...)
Calls a method.
#define MARK_OBJECT_ARY_BUCKET_SIZE
#define STACK_UPPER(x, a, b)
#define gc_mode_set(objspace, mode)
static double elapsed_time_from(double time)
static void * objspace_xmalloc(rb_objspace_t *objspace, size_t size)
static void gc_prof_sweep_timer_start(rb_objspace_t *)
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
#define GC_MALLOC_LIMIT_MAX
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
static int gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, const unsigned int immediate_sweep, int reason)
void rb_raise(VALUE exc, const char *fmt,...)
int rb_io_fptr_finalize(rb_io_t *)
static VALUE newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected)
static void ruby_memerror(void)
struct rb_objspace::@109 flags
#define heap_allocated_pages
static int RVALUE_WHITE_P(VALUE obj)
int rb_objspace_garbage_object_p(VALUE obj)
int ruby_get_stack_grow_direction(volatile VALUE *addr)
#define GC_PROFILE_DETAIL_MEMORY
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
static void gc_rest(rb_objspace_t *objspace)
int ruby_thread_has_gvl_p(void)
ONIG_EXTERN void onig_free(OnigRegex)
#define ruby_gc_stress_mode
size_t rb_io_memsize(const rb_io_t *)
struct RVALUE::@104::@105 free
static int wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
#define ruby_gc_stressful
static int wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
static VALUE gc_profile_total_time(VALUE self)
static struct heap_page * heap_page_allocate(rb_objspace_t *objspace)
double oldobject_limit_factor
static int obj_free(rb_objspace_t *objspace, VALUE obj)
static int mark_key(st_data_t key, st_data_t value, st_data_t data)
static int heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
#define nd_set_type(n, t)
static size_t objspace_available_slots(rb_objspace_t *objspace)
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
void rb_include_module(VALUE klass, VALUE module)
static const char * gc_current_status(rb_objspace_t *objspace)
#define GC_PROFILE_RECORD_DEFAULT_SIZE
void rb_gc_mark(VALUE ptr)
static int rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
size_t remembered_shady_count
VALUE rb_hash_lookup(VALUE hash, VALUE key)
int rb_objspace_markable_object_p(VALUE obj)
#define ATOMIC_PTR_EXCHANGE(var, val)
#define RUBY_INTERNAL_EVENT_GC_START
void rb_gc_register_address(VALUE *addr)
int st_update(st_table *table, st_data_t key, st_update_callback_func *func, st_data_t arg)
VALUE rb_io_write(VALUE, VALUE)
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
static void RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
static void check_generation_i(const VALUE child, void *ptr)
static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count)
size_t heap_used_at_gc_start
void rb_gc_force_recycle(VALUE obj)
VALUE writeconv_pre_ecopts
static VALUE wmap_each_key(VALUE self)
int char_offset_num_allocated
void rb_obj_info_dump(VALUE obj)
#define gc_event_hook(objspace, event, data)
static size_t obj_memsize_of(VALUE obj, int use_all_types)
#define ATOMIC_VALUE_EXCHANGE(var, val)
const char * rb_source_loc(int *pline)
int ruby_native_thread_p(void)
static int RVALUE_UNCOLLECTIBLE(VALUE obj)
VALUE rb_ary_cat(VALUE ary, const VALUE *argv, long len)
static void gc_reset_malloc_info(rb_objspace_t *objspace)
static size_t heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots)
VALUE rb_str_buf_append(VALUE, VALUE)
size_t total_allocated_objects_at_gc_start
static int heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
static size_t objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
#define is_incremental_marking(objspace)
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
static int wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
int ruby_stack_grow_direction
int ruby_stack_check(void)
size_t oldmalloc_limit_min
struct heap_page_header header
static VALUE wmap_size(VALUE self)
const char * rb_obj_classname(VALUE)
struct rb_objspace::mark_func_data_struct * mark_func_data
static void push_mark_stack(mark_stack_t *, VALUE)
#define MARK_CHECKPOINT(category)
#define ATOMIC_SIZE_ADD(var, val)
#define ATOMIC_SIZE_CAS(var, oldval, val)
static void gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
static VALUE objspace_each_objects(VALUE arg)
static void negative_size_allocation_error(const char *)
static void heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
gc_profile_record * current_record
#define ruby_initial_gc_stress
#define RVALUE_MARKING_BITMAP(obj)
static void finalize_list(rb_objspace_t *objspace, VALUE zombie)
static VALUE define_final(int argc, VALUE *argv, VALUE os)
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
static int force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
#define heap_pages_deferred_final
unsigned int immediate_sweep
#define obj_id_to_ref(objid)
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
void rb_global_variable(VALUE *var)
static VALUE wmap_allocate(VALUE klass)
static void mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
VALUE data[STACK_CHUNK_SIZE]
unsigned int gc_stressful
void rb_gc_mark_values(long n, const VALUE *values)
void rb_exc_raise(VALUE mesg)
void rb_objspace_set_event_hook(const rb_event_flag_t event)
VALUE rb_define_finalizer(VALUE obj, VALUE block)
static void mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
size_t total_freed_objects
#define RBASIC_SET_CLASS_RAW(obj, cls)
static void check_color_i(const VALUE child, void *ptr)
size_t rb_obj_memsize_of(VALUE obj)
#define RB_TYPE_P(obj, type)
void * ruby_xcalloc(size_t n, size_t size)
#define GET_STACK_BOUNDS(start, end, appendix)
static void gc_prof_timer_start(rb_objspace_t *)
size_t uncollectible_wb_unprotected_objects
#define ATOMIC_SET(var, val)
#define will_be_incremental_marking(objspace)
#define MEMZERO(p, type, n)
VALUE rb_obj_method(VALUE, VALUE)
#define is_sweeping(objspace)
static VALUE check_rvalue_consistency(const VALUE obj)
size_t oldmalloc_increase_limit
void rb_gc_adjust_memory_usage(ssize_t diff)
#define RUBY_INTERNAL_EVENT_GC_ENTER
struct rb_objspace::@108 malloc_params
size_t total_allocated_objects
void rb_free_generic_ivar(VALUE)
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr))
static VALUE wmap_has_key(VALUE self, VALUE key)
static void * aligned_malloc(size_t, size_t)
static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
void rb_ary_free(VALUE ary)
void rb_mark_end_proc(void)
static void gc_mark(rb_objspace_t *objspace, VALUE ptr)
#define GC_HEAP_GROWTH_MAX_SLOTS
VALUE rb_class_name(VALUE)
VALUE rb_undefine_finalizer(VALUE obj)
void rb_id_table_free(struct rb_id_table *tbl)
#define RGENGC_OLD_NEWOBJ_CHECK
#define RUBY_SAFE_LEVEL_MAX
void rb_vm_mark(void *ptr)
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
size_t rb_generic_ivar_memsize(VALUE)
static void * gc_with_gvl(void *ptr)
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace))
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
RUBY_EXTERN VALUE rb_cObject
static void finalize_deferred(rb_objspace_t *objspace)
static VALUE define_final0(VALUE obj, VALUE block)
static void * negative_size_allocation_error_with_gvl(void *ptr)
void rb_gc_unregister_address(VALUE *addr)
size_t st_memsize(const st_table *tab)
static int pop_mark_stack(mark_stack_t *, VALUE *)
static VALUE gc_stress_get(VALUE self)
void rb_free_tmp_buffer(volatile VALUE *store)
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
#define GET_HEAP_MARKING_BITS(x)
static void RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
VALUE rb_str_cat2(VALUE, const char *)
RUBY_EXTERN VALUE rb_cBasicObject
static void gc_event_hook_body(rb_thread_t *th, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
struct gc_profile_record gc_profile_record
static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr)
static void RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
RUBY_EXTERN VALUE rb_mKernel
static VALUE compat_key(VALUE key)
static const char * obj_info(VALUE obj)
unsigned int before_sweep
static VALUE heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
static void push_mark_stack_chunk(mark_stack_t *stack)
#define nonspecial_obj_id(obj)
static struct heap_page * heap_page_resurrect(rb_objspace_t *objspace)
VALUE rb_gc_latest_gc_info(VALUE key)
static VALUE os_obj_of(VALUE of)
unsigned int dont_incremental
static size_t wmap_memsize(const void *ptr)
static int heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
struct rb_method_definition_struct *const def
static void gc_sweep_finish(rb_objspace_t *objspace)
void rb_free_method_entry(const rb_method_entry_t *me)
#define RUBY_DTRACE_GC_HOOK(name)
void rb_define_const(VALUE, const char *, VALUE)
VALUE rb_obj_is_mutex(VALUE obj)
#define MALLOC_ALLOCATED_SIZE
static void heap_add_freepage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
rb_atomic_t cnt[RUBY_NSIG]
struct rb_io_t::rb_io_enc_t encs
#define rb_data_object_alloc
static void gc_marks_step(rb_objspace_t *objspace, int slots)
void rb_vm_register_special_exception(enum ruby_special_exceptions sp, VALUE cls, const char *mesg)
VALUE writeconv_asciicompat
static int RVALUE_OLD_P_RAW(VALUE obj)
static void callback(ffi_cif *cif, void *resp, void **args, void *ctx)
static void objspace_malloc_gc_stress(rb_objspace_t *objspace)
static void gc_finalize_deferred_register(rb_objspace_t *objspace)
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
static int is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
size_t onig_region_memsize(const OnigRegion *regs)
struct force_finalize_list * next
static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
static const char * method_type_name(rb_method_type_t type)
VALUE rb_gc_mark_node(NODE *obj)
#define range(low, item, hi)
void rb_gc_register_mark_object(VALUE obj)
#define ATOMIC_SIZE_EXCHANGE(var, val)
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
size_t(* dsize)(const void *)
static VALUE gc_stat_compat_symbols[gc_stat_compat_sym_last]
#define is_lazy_sweeping(heap)
static void gc_marks_rest(rb_objspace_t *objspace)
#define RVALUE_MARK_BITMAP(obj)
static int RVALUE_BLACK_P(VALUE obj)
static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
static RVALUE * heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
size_t rb_objspace_data_type_memsize(VALUE obj)
#define is_full_marking(objspace)
NODE * rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
static int RVALUE_WB_UNPROTECTED(VALUE obj)
double heap_free_slots_goal_ratio
void rb_gc_mark_machine_stack(rb_thread_t *th)
static VALUE wmap_values(VALUE self)
static void run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
void * ruby_mimmalloc(size_t size)
static void gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end)
st_table * finalizer_table
static void heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
#define rb_thread_raised_clear(th)
#define RUBY_INTERNAL_EVENT_GC_END_MARK
static void mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
static int internal_object_p(VALUE obj)
static void gc_exit(rb_objspace_t *objspace, const char *event)
static void gc_prof_set_heap_info(rb_objspace_t *)
static const char * gc_mode_name(enum gc_mode mode)
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for module.
static void gc_profile_dump_on(VALUE out, VALUE(*append)(VALUE, VALUE))
volatile VALUE * rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
#define RARRAY_CONST_PTR(a)
struct RRational rational
static void rgengc_unprotect_logging_exit_func(void)
union rb_method_definition_struct::@144 body
static double getrusage_time(void)
static VALUE gc_profile_report(int argc, VALUE *argv, VALUE self)
VALUE * ruby_initial_gc_stress_ptr
double malloc_limit_growth_factor
VALUE rb_obj_is_proc(VALUE)
void ruby_malloc_size_overflow(size_t count, size_t elsize)
static void gc_set_initial_pages(void)
static VALUE RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
static void gc_prof_sweep_timer_stop(rb_objspace_t *)
static int rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
VALUE rb_sprintf(const char *format,...)
int rb_objspace_internal_object_p(VALUE obj)
static VALUE incremental_enable(void)
#define GET_HEAP_MARK_BITS(x)
#define STACKFRAME_FOR_CALL_CFUNC
#define RICLASS_IS_ORIGIN
int rb_obj_respond_to(VALUE, ID, int)
#define RESTORE_FINALIZER()
PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt,...), 3, 4)
static void gc_prof_mark_timer_stop(rb_objspace_t *)
static void aligned_free(void *)
unsigned int has_remembered_objects
static void gc_sweep(rb_objspace_t *objspace)
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
static void add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
#define MARKED_IN_BITMAP(bits, p)
#define MEMMOVE(p1, p2, type, n)
static void gc_stress_set(rb_objspace_t *objspace, VALUE flag)
static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
static VALUE wmap_aset(VALUE self, VALUE wmap, VALUE orig)
size_t rb_str_memsize(VALUE)
static int os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
static int rgengc_remember(rb_objspace_t *objspace, VALUE obj)
#define RUBY_INTERNAL_EVENT_GC_EXIT
#define rb_thread_raised_set(th, f)
unsigned char buf[MIME_BUF_SIZE]
static int wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
static void mark_tbl(rb_objspace_t *objspace, st_table *tbl)
void Init_stack(volatile VALUE *addr)
static st_table * rgengc_unprotect_logging_table
static ruby_gc_params_t gc_params
VALUE tied_io_for_writing
#define ATOMIC_SIZE_INC(var)
#define MALLOC_ALLOCATED_SIZE_CHECK
static void atomic_sub_nounderflow(size_t *var, size_t sub)
static int gc_mark_stacked_objects_all(rb_objspace_t *)
static void heap_pages_free_unused_pages(rb_objspace_t *objspace)
static int is_id_value(rb_objspace_t *objspace, VALUE ptr)
static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
static void heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
struct rb_objspace::@112 profile
static VALUE gc_profile_enable_get(VALUE self)
static void rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
static VALUE gc_start_internal(int argc, VALUE *argv, VALUE self)
static int mark_entry(st_data_t key, st_data_t value, st_data_t data)
#define gc_prof_record(objspace)
const char * rb_objspace_data_type_name(VALUE obj)
const VALUE defined_class
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
VALUE rb_obj_hide(VALUE obj)
static void gc_current_status_fill(rb_objspace_t *objspace, char *buff)
double heap_free_slots_max_ratio
static int is_markable_object(rb_objspace_t *objspace, VALUE obj)
static void * ruby_memerror_body(void *dummy)
void rb_mark_tbl(st_table *tbl)
const struct rb_method_entry_struct *const original_me
static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
RUBY_FUNC_EXPORTED size_t rb_ary_memsize(VALUE ary)
static ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n)
VALUE rb_gc_disable(void)
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
int clock_gettime(clockid_t, struct timespec *)
static int gc_verify_heap_pages_(rb_objspace_t *objspace, struct heap_page *page)
#define SET_MACHINE_STACK_END(p)
void ruby_init_stack(volatile VALUE *)
const char * rb_id2name(ID)
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
VALUE rb_str_new_cstr(const char *)
struct heap_page * free_next
int rb_sigaltstack_size(void)
static void make_zombie(rb_objspace_t *objspace, VALUE obj, void(*dfree)(void *), void *data)
void * ruby_xrealloc(void *ptr, size_t new_size)
static void gc_sweep_start(rb_objspace_t *objspace)
static int gc_marks_finish(rb_objspace_t *objspace)
void ruby_gc_set_params(int safe_level)
void rb_class_detach_module_subclasses(VALUE klass)
static void wmap_free(void *ptr)
static int is_mark_stack_empty(mark_stack_t *stack)
int rb_garbage_collect(void)
static enum rb_id_table_iterator_result mark_method_entry_i(VALUE me, void *data)
static VALUE gc_latest_gc_info(int argc, VALUE *argv, VALUE self)
#define rb_objspace_of(vm)
long strtol(const char *nptr, char **endptr, int base)
static const rb_data_type_t weakmap_type
static void make_io_zombie(rb_objspace_t *objspace, VALUE obj)
register unsigned int len
VALUE rb_define_module_under(VALUE outer, const char *name)
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
static void heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
static void heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
void rb_set_safe_level_force(int)
static void gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
static size_t gc_stat_internal(VALUE hash_or_sym)
static int gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
incremental: 0 -> not incremental (do all) incremental: n -> mark at most `n' objects ...
void * ruby_xmalloc(size_t size)
static VALUE gc_profile_result(void)
size_t rb_node_memsize(VALUE obj)
static void RVALUE_AGE_RESET_RAW(VALUE obj)
static VALUE wmap_aref(VALUE self, VALUE wmap)
static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_mark, int immediate_sweep, int reason)
#define MARK_IN_BITMAP(bits, p)
static size_t objspace_live_slots(rb_objspace_t *objspace)
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
static void gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
static VALUE wmap_keys(VALUE self)
void * ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
#define GC_HEAP_FREE_SLOTS
static int rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
static void free_stack_chunks(mark_stack_t *)
static VALUE gc_profile_record_get(void)
#define PUSH_MARK_FUNC_DATA(v)
void rb_mark_generic_ivar(VALUE)
static int RVALUE_MARKED(VALUE obj)
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
static int wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
static int ready_to_gc(rb_objspace_t *objspace)
#define RARRAY_AREF(a, i)
#define GC_ENABLE_INCREMENTAL_MARK
static int RVALUE_OLD_P(VALUE obj)
static int get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
static void gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
static void mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
VALUE rb_block_proc(void)
static void RVALUE_AGE_RESET(VALUE obj)
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
#define RUBY_INTERNAL_EVENT_NEWOBJ
#define heap_allocatable_pages
#define RUBY_INTERNAL_EVENT_FREEOBJ
static void RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
static int wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
int getrusage(int who, struct rusage *usage)
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
size_t rb_gc_stat(VALUE key)
void rb_mark_set(st_table *tbl)
#define gc_stress_full_mark_after_malloc_p()
RUBY_SYMBOL_EXPORT_BEGIN void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
static int garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_mark, int immediate_sweep, int reason)
static VALUE gc_count(VALUE self)
static int is_swept_object(rb_objspace_t *objspace, VALUE ptr)
const char * ruby_node_name(int node)
VALUE rb_newobj_of(VALUE klass, VALUE flags)
#define RCLASS_IV_INDEX_TBL(c)
static int gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
static void heap_pages_expand_sorted(rb_objspace_t *objspace)
VALUE rb_class_path_cached(VALUE)
struct gc_list * global_list
static void * objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
static void heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
static VALUE wmap_inspect(VALUE self)
static void heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
VALUE rb_obj_is_fiber(VALUE obj)
static stack_chunk_t * stack_chunk_alloc(void)
static int rb_special_const_p(VALUE obj)
VALUE rb_data_object_zalloc(VALUE, size_t, RUBY_DATA_FUNC, RUBY_DATA_FUNC)
#define RCLASS_CALLABLE_M_TBL(c)
gc_profile_record * records
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
#define HEAP_PAGE_ALIGN_LOG
static VALUE gc_stat_compat_table
#define POP_MARK_FUNC_DATA()
static void check_children_i(const VALUE child, void *ptr)
static Bigint * diff(Bigint *a, Bigint *b)
static void rb_objspace_call_finalizer(rb_objspace_t *objspace)
static VALUE heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
struct rb_encoding_entry * list
static VALUE gc_stat_symbols[gc_stat_sym_last]
static int wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
int each_obj_callback(void *, void *, size_t, void *)
struct rb_heap_struct rb_heap_t
int rb_singleton_class_internal_p(VALUE sklass)
rb_objspace_t * rb_objspace_alloc(void)
struct RArray::@128::@129 heap
static void gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
static int RVALUE_MARKING(VALUE obj)
#define VM_UNREACHABLE(func)
#define TypedData_Make_Struct(klass, type, data_type, sval)
const char * rb_obj_info(VALUE obj)
struct mark_stack mark_stack_t
#define rb_thread_raised_p(th, f)
void rb_gc_writebarrier_remember(VALUE obj)
#define RETURN_ENUMERATOR(obj, argc, argv)
static size_t mark_stack_size(mark_stack_t *stack)
unsigned int during_incremental_marking
static int invalidate_mark_stack_chunk(stack_chunk_t *chunk, int limit, VALUE obj)
#define heap_pages_sorted
static int gc_verify_heap_pages(rb_objspace_t *objspace)
size_t total_allocated_pages
struct rmatch_offset * char_offset
VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc)
volatile VALUE rb_gc_guarded_val
static void gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
static void init_mark_stack(mark_stack_t *stack)
void rb_gc_mark_maybe(VALUE obj)
static void shrink_stack_chunk_cache(mark_stack_t *stack)
#define GC_HEAP_INIT_SLOTS
static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt,...)
#define gc_event_hook_available_p(objspace)
static void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
static VALUE gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const int orig_flags)
void rb_gc_mark_global_tbl(void)
static int is_live_object(rb_objspace_t *objspace, VALUE ptr)
#define GC_OLDMALLOC_LIMIT_MIN
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
#define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
void(* func)(const char *category, VALUE, void *)
const rb_data_type_t * type
static int get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
#define gc_mode(objspace)
static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
#define RTYPEDDATA_DATA(v)
void rb_gc_writebarrier_unprotect(VALUE obj)
void(* mark_func)(VALUE v, void *data)
struct heap_page * using_page
#define rb_check_frozen(obj)
#define RVALUE_PAGE_MARKING(page, obj)
struct RTypedData typeddata
rb_id_table_iterator_result
RUBY_EXTERN VALUE rb_stdout
void rb_gc_call_finalizer_at_exit(void)
static void gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
VALUE rb_obj_freeze(VALUE)
const rb_iseq_t *const iseqptr
static void RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
#define SPECIAL_CONST_P(x)
struct rb_objspace rb_objspace_t
static int verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
VALUE rb_define_module(const char *name)
#define gc_prof_enabled(objspace)
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
struct rb_objspace::@113 rgengc
void rb_mark_hash(st_table *tbl)
#define GC_MALLOC_LIMIT_MIN
each_obj_callback * callback
static VALUE undefine_final(VALUE os, VALUE obj)
VALUE rb_str_buf_new(long)
static VALUE gc_profile_clear(void)
void rb_gc_mark_encodings(void)
static void * objspace_xmalloc2(rb_objspace_t *objspace, size_t n, size_t size)
const struct rb_method_entry_struct *const orig_me
static void * objspace_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
static int wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size)
#define RTYPEDDATA_TYPE(v)
rb_method_refined_t refined
struct rb_classext_struct rb_classext_t
#define GC_OLDMALLOC_LIMIT_MAX
static VALUE wmap_finalize(VALUE self, VALUE objid)
static VALUE gc_stat(int argc, VALUE *argv, VALUE self)
static void mark_set(rb_objspace_t *objspace, st_table *tbl)
static VALUE newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected)
void rb_gc_verify_internal_consistency(void)
static int match(VALUE str, VALUE pat, VALUE hash, int(*cb)(VALUE, VALUE))
static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
VALUE rb_str_append(VALUE, VALUE)
#define CALC_EXACT_MALLOC_SIZE
struct heap_page ** sorted
static VALUE newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, rb_objspace_t *objspace, VALUE obj)
double oldmalloc_limit_growth_factor
void rb_ary_delete_same(VALUE ary, VALUE item)
struct stack_chunk stack_chunk_t
void rb_warn(const char *fmt,...)
static VALUE gc_profile_enable(void)
static void VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
static int gc_mark_set(rb_objspace_t *objspace, VALUE obj)
#define CLEAR_IN_BITMAP(bits, p)
#define BIGNUM_EMBED_FLAG
static void setup_gc_stat_symbols(void)
struct RString::@125::@126 heap
unsigned int has_uncollectible_shady_objects
static int rb_mul_size_overflow(size_t a, size_t b, size_t max, size_t *c)
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
static void should_be_finalizable(VALUE obj)
static void should_be_callable(VALUE block)
struct heap_page * free_pages
static void gc_setup_mark_bits(struct heap_page *page)
rb_iseq_location_t location
static void mark_stack_locations(rb_objspace_t *objspace, rb_thread_t *th, const VALUE *stack_start, const VALUE *stack_end)
static void heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
#define TRY_WITH_GC(alloc)
VALUE rb_obj_class(VALUE)
struct stack_chunk * next
static void * objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
struct heap_page * sweep_pages
static struct heap_page * heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)