14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
37 #include <sys/types.h>
43 #undef rb_data_object_wrap
45 #ifndef HAVE_MALLOC_USABLE_SIZE
47 # define HAVE_MALLOC_USABLE_SIZE
48 # define malloc_usable_size(a) _msize(a)
49 # elif defined HAVE_MALLOC_SIZE
50 # define HAVE_MALLOC_USABLE_SIZE
51 # define malloc_usable_size(a) malloc_size(a)
54 #ifdef HAVE_MALLOC_USABLE_SIZE
55 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
56 # include RUBY_ALTERNATIVE_MALLOC_HEADER
59 # elif defined(HAVE_MALLOC_NP_H)
60 # include <malloc_np.h>
61 # elif defined(HAVE_MALLOC_MALLOC_H)
62 # include <malloc/malloc.h>
66 #ifdef HAVE_SYS_TIME_H
70 #ifdef HAVE_SYS_RESOURCE_H
71 #include <sys/resource.h>
74 #if defined _WIN32 || defined __CYGWIN__
76 #elif defined(HAVE_POSIX_MEMALIGN)
77 #elif defined(HAVE_MEMALIGN)
81 #define rb_setjmp(env) RUBY_SETJMP(env)
82 #define rb_jmp_buf rb_jmpbuf_t
84 #if defined(_MSC_VER) && defined(_WIN64)
86 #pragma intrinsic(_umul128)
102 #elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
103 p = __builtin_mul_overflow(x, y, &z);
105 #elif defined(DSIZE_T)
112 #elif defined(_MSC_VER) && defined(_WIN64)
114 unsigned __int64 dz = _umul128(x, y, &
dp);
134 #elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
135 p = __builtin_add_overflow(x, y, &z);
137 #elif defined(DSIZE_T)
155 struct optional t = size_mul_overflow(x, y);
163 struct optional t = size_mul_overflow(x, y);
164 struct optional u = size_mul_overflow(z, w);
172 size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
174 struct optional t = size_mul_overflow(x, y);
194 return size_mul_or_raise(x, y,
exc);
198 size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
200 struct optional t = size_mul_add_overflow(x, y, z);
221 return size_mul_add_or_raise(x, y, z,
exc);
225 size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
227 struct optional t = size_mul_add_mul_overflow(x, y, z, w);
246 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
258 #ifndef GC_HEAP_INIT_SLOTS
259 #define GC_HEAP_INIT_SLOTS 10000
261 #ifndef GC_HEAP_FREE_SLOTS
262 #define GC_HEAP_FREE_SLOTS 4096
264 #ifndef GC_HEAP_GROWTH_FACTOR
265 #define GC_HEAP_GROWTH_FACTOR 1.8
267 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
268 #define GC_HEAP_GROWTH_MAX_SLOTS 0
270 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
271 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
274 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
275 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
277 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
278 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
280 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
281 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
284 #ifndef GC_MALLOC_LIMIT_MIN
285 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
287 #ifndef GC_MALLOC_LIMIT_MAX
288 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
290 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
291 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
294 #ifndef GC_OLDMALLOC_LIMIT_MIN
295 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
297 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
298 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
300 #ifndef GC_OLDMALLOC_LIMIT_MAX
301 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
304 #ifndef PRINT_MEASURE_LINE
305 #define PRINT_MEASURE_LINE 0
307 #ifndef PRINT_ENTER_EXIT_TICK
308 #define PRINT_ENTER_EXIT_TICK 0
310 #ifndef PRINT_ROOT_TICKS
311 #define PRINT_ROOT_TICKS 0
314 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
378 #define RGENGC_DEBUG -1
380 #define RGENGC_DEBUG 0
383 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
384 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
386 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
398 #ifndef RGENGC_CHECK_MODE
399 #define RGENGC_CHECK_MODE 0
403 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
411 #ifndef RGENGC_OLD_NEWOBJ_CHECK
412 #define RGENGC_OLD_NEWOBJ_CHECK 0
420 #ifndef RGENGC_PROFILE
421 #define RGENGC_PROFILE 0
430 #ifndef RGENGC_ESTIMATE_OLDMALLOC
431 #define RGENGC_ESTIMATE_OLDMALLOC 1
437 #ifndef RGENGC_FORCE_MAJOR_GC
438 #define RGENGC_FORCE_MAJOR_GC 0
446 #define RGENGC_DEBUG 0
447 #ifdef RGENGC_CHECK_MODE
448 #undef RGENGC_CHECK_MODE
450 #define RGENGC_CHECK_MODE 0
451 #define RGENGC_PROFILE 0
452 #define RGENGC_ESTIMATE_OLDMALLOC 0
453 #define RGENGC_FORCE_MAJOR_GC 0
457 #ifndef GC_PROFILE_MORE_DETAIL
458 #define GC_PROFILE_MORE_DETAIL 0
460 #ifndef GC_PROFILE_DETAIL_MEMORY
461 #define GC_PROFILE_DETAIL_MEMORY 0
463 #ifndef GC_ENABLE_INCREMENTAL_MARK
464 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
466 #ifndef GC_ENABLE_LAZY_SWEEP
467 #define GC_ENABLE_LAZY_SWEEP 1
469 #ifndef CALC_EXACT_MALLOC_SIZE
470 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
472 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
473 #ifndef MALLOC_ALLOCATED_SIZE
474 #define MALLOC_ALLOCATED_SIZE 0
477 #define MALLOC_ALLOCATED_SIZE 0
479 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
480 #define MALLOC_ALLOCATED_SIZE_CHECK 0
483 #ifndef GC_DEBUG_STRESS_TO_CLASS
484 #define GC_DEBUG_STRESS_TO_CLASS 0
487 #ifndef RGENGC_OBJ_INFO
488 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
498 #if RGENGC_ESTIMATE_OLDMALLOC
531 #if GC_PROFILE_MORE_DETAIL
533 double gc_sweep_time;
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
539 size_t allocate_increase;
540 size_t allocate_limit;
543 size_t removing_objects;
544 size_t empty_objects;
545 #if GC_PROFILE_DETAIL_MEMORY
551 #if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
555 #if RGENGC_PROFILE > 0
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
562 #if defined(_MSC_VER) || defined(__CYGWIN__)
563 #pragma pack(push, 1)
614 #if defined(_MSC_VER) || defined(__CYGWIN__)
623 #define popcount_bits rb_popcount_intptr
640 #define STACK_CHUNK_SIZE 500
663 #if GC_ENABLE_INCREMENTAL_MARK
680 #if MALLOC_ALLOCATED_SIZE
681 size_t allocated_size;
698 #if GC_ENABLE_INCREMENTAL_MARK
745 #if GC_PROFILE_MORE_DETAIL
754 #if RGENGC_PROFILE > 0
755 size_t total_generated_normal_object_count;
756 size_t total_generated_shady_object_count;
757 size_t total_shade_operation_count;
758 size_t total_promoted_count;
759 size_t total_remembered_normal_object_count;
760 size_t total_remembered_shady_object_count;
762 #if RGENGC_PROFILE >= 2
763 size_t generated_normal_object_count_types[
RUBY_T_MASK];
764 size_t generated_shady_object_count_types[
RUBY_T_MASK];
767 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
768 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
798 #if RGENGC_ESTIMATE_OLDMALLOC
803 #if RGENGC_CHECK_MODE >= 2
814 #if GC_ENABLE_INCREMENTAL_MARK
825 #if GC_DEBUG_STRESS_TO_CLASS
832 #define HEAP_PAGE_ALIGN_LOG 14
833 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
876 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
877 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
878 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
880 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
881 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
882 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
883 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
886 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
887 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
888 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
891 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
892 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
894 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
895 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
896 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
900 #define rb_objspace (*rb_objspace_of(GET_VM()))
901 #define rb_objspace_of(vm) ((vm)->objspace)
903 #define ruby_initial_gc_stress gc_params.gc_stress
907 #define malloc_limit objspace->malloc_params.limit
908 #define malloc_increase objspace->malloc_params.increase
909 #define malloc_allocated_size objspace->malloc_params.allocated_size
910 #define heap_pages_sorted objspace->heap_pages.sorted
911 #define heap_allocated_pages objspace->heap_pages.allocated_pages
912 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
913 #define heap_pages_lomem objspace->heap_pages.range[0]
914 #define heap_pages_himem objspace->heap_pages.range[1]
915 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages
916 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
917 #define heap_pages_final_slots objspace->heap_pages.final_slots
918 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
919 #define heap_eden (&objspace->eden_heap)
920 #define heap_tomb (&objspace->tomb_heap)
921 #define dont_gc objspace->flags.dont_gc
922 #define during_gc objspace->flags.during_gc
923 #define finalizing objspace->atomic_flags.finalizing
924 #define finalizer_table objspace->finalizer_table
925 #define global_list objspace->global_list
926 #define ruby_gc_stressful objspace->flags.gc_stressful
927 #define ruby_gc_stress_mode objspace->gc_stress_mode
928 #if GC_DEBUG_STRESS_TO_CLASS
929 #define stress_to_class objspace->stress_to_class
931 #define stress_to_class 0
935 gc_mode_verify(
enum gc_mode mode)
937 #if RGENGC_CHECK_MODE > 0
944 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
950 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
951 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
953 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
954 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
956 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
958 #define is_full_marking(objspace) TRUE
960 #if GC_ENABLE_INCREMENTAL_MARK
961 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
963 #define is_incremental_marking(objspace) FALSE
965 #if GC_ENABLE_INCREMENTAL_MARK
966 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
968 #define will_be_incremental_marking(objspace) FALSE
970 #define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
971 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
973 #if SIZEOF_LONG == SIZEOF_VOIDP
974 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
975 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
976 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
977 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
978 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
979 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
981 # error not supported
984 #define RANY(o) ((RVALUE*)(o))
993 #define RZOMBIE(o) ((struct RZombie *)(o))
995 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
997 #if RUBY_MARK_FREE_DEBUG
998 int ruby_gc_debug_indent = 0;
1013 NORETURN(
static void negative_size_allocation_error(
const char *));
1023 static inline void gc_enter(
rb_objspace_t *objspace,
const char *event);
1024 static inline void gc_exit(
rb_objspace_t *objspace,
const char *event);
1026 static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1027 static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1030 static void gc_marks_step(
rb_objspace_t *objspace,
int slots);
1057 static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1059 static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1060 static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1066 static double getrusage_time(
void);
1067 static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason);
1070 static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1071 static inline void gc_prof_mark_timer_stop(
rb_objspace_t *);
1072 static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1073 static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1074 static inline void gc_prof_set_malloc_info(
rb_objspace_t *);
1077 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1078 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1079 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1083 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1085 #define gc_prof_record(objspace) (objspace)->profile.current_record
1086 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1088 #ifdef HAVE_VA_ARGS_MACRO
1089 # define gc_report(level, objspace, ...) \
1090 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1092 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1095 static const char *obj_info(
VALUE obj);
1097 #define PUSH_MARK_FUNC_DATA(v) do { \
1098 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
1099 objspace->mark_func_data = (v);
1101 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
1121 #if defined(__GNUC__) && defined(__i386__)
1122 typedef unsigned long long tick_t;
1123 #define PRItick "llu"
1124 static inline tick_t
1127 unsigned long long int x;
1128 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1132 #elif defined(__GNUC__) && defined(__x86_64__)
1133 typedef unsigned long long tick_t;
1134 #define PRItick "llu"
1136 static __inline__ tick_t
1139 unsigned long hi,
lo;
1140 __asm__ __volatile__ (
"rdtsc" :
"=a"(
lo),
"=d"(
hi));
1141 return ((
unsigned long long)
lo)|( ((
unsigned long long)
hi)<<32);
1144 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1145 typedef unsigned long long tick_t;
1146 #define PRItick "llu"
1148 static __inline__ tick_t
1151 unsigned long long val = __builtin_ppc_get_timebase();
1155 #elif defined(_WIN32) && defined(_MSC_VER)
1157 typedef unsigned __int64 tick_t;
1158 #define PRItick "llu"
1160 static inline tick_t
1168 #define PRItick "llu"
1170 static inline tick_t
1177 #elif TICK_TYPE == 2
1178 typedef double tick_t;
1179 #define PRItick "4.9f"
1181 static inline tick_t
1184 return getrusage_time();
1187 #error "choose tick type"
1190 #define MEASURE_LINE(expr) do { \
1191 volatile tick_t start_time = tick(); \
1192 volatile tick_t end_time; \
1194 end_time = tick(); \
1195 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1199 #define MEASURE_LINE(expr) expr
1202 #define FL_CHECK2(name, x, pred) \
1203 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1204 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1205 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1206 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1207 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1209 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1210 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1211 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1214 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1215 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1216 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1218 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1219 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1220 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1222 #define RVALUE_OLD_AGE 3
1223 #define RVALUE_AGE_SHIFT 5
1232 RVALUE_FLAGS_AGE(
VALUE flags)
1240 check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1246 fprintf(
stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)
obj);
1249 else if (!is_pointer_to_heap(objspace, (
void *)
obj)) {
1255 fprintf(
stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1256 (
void *)
obj, (
void *)page);
1261 fprintf(
stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)
obj);
1274 fprintf(
stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(
obj));
1278 fprintf(
stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(
obj));
1282 fprintf(
stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(
obj));
1292 if (age > 0 && wb_unprotected_bit) {
1293 fprintf(
stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(
obj), age);
1297 if (!
is_marking(objspace) && uncollectible_bit && !mark_bit) {
1298 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(
obj));
1303 if (uncollectible_bit && age !=
RVALUE_OLD_AGE && !wb_unprotected_bit) {
1304 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1305 obj_info(
obj), age);
1309 fprintf(
stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1310 obj_info(
obj), age);
1324 fprintf(
stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(
obj));
1330 if (
err > 0 && terminate) {
1331 rb_bug(
"check_rvalue_consistency_force: there is %d errors.",
err);
1337 #if RGENGC_CHECK_MODE == 0
1339 check_rvalue_consistency(
const VALUE obj)
1345 check_rvalue_consistency(
const VALUE obj)
1347 check_rvalue_consistency_force(
obj,
TRUE);
1359 void *poisoned = asan_poisoned_object_p(
obj);
1360 asan_unpoison_object(
obj,
false);
1366 asan_poison_object(
obj);
1375 check_rvalue_consistency(
obj);
1382 check_rvalue_consistency(
obj);
1390 check_rvalue_consistency(
obj);
1397 check_rvalue_consistency(
obj);
1404 check_rvalue_consistency(
obj);
1411 check_rvalue_consistency(
obj);
1425 check_rvalue_consistency(
obj);
1426 return RVALUE_OLD_P_RAW(
obj);
1429 #if RGENGC_CHECK_MODE || GC_DEBUG
1433 check_rvalue_consistency(
obj);
1445 #if RGENGC_PROFILE >= 2
1446 objspace->
profile.total_promoted_count++;
1471 int age = RVALUE_FLAGS_AGE(
flags);
1474 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(
obj));
1481 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1483 check_rvalue_consistency(
obj);
1490 check_rvalue_consistency(
obj);
1494 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1496 check_rvalue_consistency(
obj);
1503 check_rvalue_consistency(
obj);
1508 check_rvalue_consistency(
obj);
1521 check_rvalue_consistency(
obj);
1528 RVALUE_DEMOTE_RAW(objspace,
obj);
1530 if (RVALUE_MARKED(
obj)) {
1534 check_rvalue_consistency(
obj);
1546 check_rvalue_consistency(
obj);
1549 RVALUE_AGE_RESET_RAW(
obj);
1550 check_rvalue_consistency(
obj);
1556 return RVALUE_MARKED(
obj) && !RVALUE_MARKING(
obj);
1563 return RVALUE_MARKED(
obj) && RVALUE_MARKING(
obj);
1570 return RVALUE_MARKED(
obj) ==
FALSE;
1579 static inline void *
1603 rb_bug(
"lazy sweeping underway when freeing object space");
1638 heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1643 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %d, size: %d\n", (
int)next_length, (
int)
size);
1673 heap_pages_expand_sorted_to(objspace, next_length);
1681 heap_allocatable_pages_set(
rb_objspace_t *objspace,
size_t s)
1684 heap_pages_expand_sorted(objspace);
1692 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1704 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
1707 asan_poison_object(
obj);
1709 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)
obj);
1715 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1724 #if GC_ENABLE_INCREMENTAL_MARK
1728 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1753 static void rb_aligned_free(
void *
ptr);
1774 heap_unlink_page(objspace,
heap_tomb, page);
1775 heap_page_free(objspace, page);
1799 if (page_body == 0) {
1804 page = calloc1(
sizeof(
struct heap_page));
1806 rb_aligned_free(page_body);
1817 end = start + limit;
1825 mid = (
lo +
hi) / 2;
1864 for (p =
start; p != end; p++) {
1865 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
1866 heap_page_add_freeobj(objspace, page, (
VALUE)p);
1880 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1882 heap_unlink_page(objspace,
heap_tomb, page);
1895 const char *method =
"recycle";
1899 page = heap_page_resurrect(objspace);
1902 page = heap_page_allocate(objspace);
1903 method =
"allocate";
1905 if (0)
fprintf(
stderr,
"heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1922 struct heap_page *page = heap_page_create(objspace);
1923 heap_add_page(objspace, heap, page);
1924 heap_add_freepage(heap, page);
1932 heap_allocatable_pages_set(objspace,
add);
1934 for (
i = 0;
i <
add;
i++) {
1935 heap_assign_page(objspace, heap);
1948 if (goal_ratio == 0.0) {
1958 if (
f < 1.0)
f = 1.1;
1960 next_used = (
size_t)(
f * used);
1965 " G(%1.2f), f(%1.2f),"
1968 goal_ratio,
f, used, next_used);
1974 if (next_used > max_used) next_used = max_used;
1977 return next_used - used;
1981 heap_set_increment(
rb_objspace_t *objspace,
size_t additional_pages)
1984 size_t next_used_limit = used + additional_pages;
1988 heap_allocatable_pages_set(objspace, next_used_limit - used);
1997 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
2003 heap_assign_page(objspace, heap);
2015 gc_sweep_continue(objspace, heap);
2018 gc_marks_continue(objspace, heap);
2035 heap_prepare(objspace, heap);
2042 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
2047 asan_unpoison_object((
VALUE)p,
true);
2058 asan_unpoison_object((
VALUE)p,
true);
2069 asan_unpoison_object((
VALUE)p,
true);
2074 p = heap_get_freeobj_from_next_freepage(objspace, heap);
2091 if (
pc && VM_FRAME_RUBYFRAME_P(ec->
cfp)) {
2099 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2100 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2102 #define gc_event_hook(objspace, event, data) do { \
2103 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2104 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2111 #if !__has_feature(memory_sanitizer)
2132 #if RGENGC_CHECK_MODE
2139 if (RVALUE_AGE(
obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(
obj), RVALUE_AGE(
obj));
2142 if (RVALUE_AGE(
obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(
obj), RVALUE_AGE(
obj));
2144 if (rgengc_remembered(objspace, (
VALUE)
obj))
rb_bug(
"newobj: %s is remembered.", obj_info(
obj));
2155 objspace->
profile.total_generated_normal_object_count++;
2156 #if RGENGC_PROFILE >= 2
2161 objspace->
profile.total_generated_shady_object_count++;
2162 #if RGENGC_PROFILE >= 2
2177 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2184 if (--newobj_cnt == 0) {
2187 gc_mark_set(objspace,
obj);
2188 RVALUE_AGE_SET_OLD(objspace,
obj);
2195 check_rvalue_consistency(
obj);
2208 rb_bug(
"object allocation during garbage collection phase");
2248 #if GC_DEBUG_STRESS_TO_CLASS
2251 for (
i = 0;
i <
cnt; ++
i) {
2265 return wb_protected ?
2299 #define UNEXPECTED_NODE(func) \
2300 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2301 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2320 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *
buf,
size_t cnt)
2382 #undef rb_data_object_alloc
2403 #undef rb_data_typed_object_alloc
2422 if (
ptr &&
type->function.dsize) {
2423 return type->function.dsize(
ptr);
2446 register size_t hi,
lo, mid;
2460 mid = (
lo +
hi) / 2;
2462 if (page->
start <= p) {
2483 free_const_entry_i(
VALUE value,
void *data)
2528 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(
obj));
2544 rb_bug(
"obj_free() called for broken object");
2554 obj_free_object_id(objspace,
obj);
2560 #if RGENGC_CHECK_MODE
2561 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2562 CHECK(RVALUE_WB_UNPROTECTED);
2563 CHECK(RVALUE_MARKED);
2564 CHECK(RVALUE_MARKING);
2565 CHECK(RVALUE_UNCOLLECTIBLE);
2622 #if USE_DEBUG_COUNTER
2680 if (
RANY(
obj)->as.regexp.ptr) {
2687 int free_immediately =
FALSE;
2688 void (*dfree)(
void *);
2693 dfree =
RANY(
obj)->as.typeddata.type->function.dfree;
2694 if (0 && free_immediately == 0) {
2700 dfree =
RANY(
obj)->as.data.dfree;
2708 else if (free_immediately) {
2713 make_zombie(objspace,
obj, dfree, data);
2724 if (
RANY(
obj)->as.match.rmatch) {
2726 #if USE_DEBUG_COUNTER
2746 if (
RANY(
obj)->as.file.fptr) {
2747 make_io_zombie(objspace,
obj);
2872 make_zombie(objspace,
obj, 0, 0);
2881 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
2882 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
2903 static const struct st_hash_type object_id_hash_type = {
2917 #if RGENGC_ESTIMATE_OLDMALLOC
2939 static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
2962 pstart = page->
start;
2965 if ((*callback)(pstart, pend,
sizeof(
RVALUE), data)) {
2972 objspace_each_objects_protected(
VALUE arg)
2980 incremental_enable(
VALUE _)
3038 if (prev_dont_incremental) {
3063 asan_unpoison_object(
obj,
false);
3064 bool used_p = p->
as.
basic.flags;
3078 if (!p->
as.
basic.klass)
break;
3084 if (!p->
as.
basic.klass)
break;
3088 if (
ptr || ! used_p) {
3089 asan_poison_object(
obj);
3097 return internal_object_p(
obj);
3101 os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
3106 for (; p != pend; p++) {
3108 if (!internal_object_p(
v)) {
3173 return os_obj_of(
of);
3202 should_be_callable(
VALUE block)
3237 should_be_finalizable(
obj);
3242 should_be_callable(block);
3245 return define_final0(
obj, block);
3261 table = (
VALUE)data;
3268 for (
i = 0;
i <
len;
i++) {
3289 should_be_finalizable(
obj);
3290 should_be_callable(block);
3291 return define_final0(
obj, block);
3303 table = (
VALUE)data;
3328 #define RESTORE_FINALIZER() (\
3329 ec->cfp = saved.cfp, \
3330 rb_set_errinfo(saved.errinfo))
3334 saved.cfp = ec->
cfp;
3342 for (
i = saved.finished;
3344 saved.finished = ++
i) {
3348 #undef RESTORE_FINALIZER
3362 run_finalizer(objspace, zombie, (
VALUE)table);
3372 asan_unpoison_object(zombie,
false);
3373 next_zombie =
RZOMBIE(zombie)->next;
3376 run_final(objspace, zombie);
3380 obj_free_object_id(objspace, zombie);
3383 RZOMBIE(zombie)->basic.flags = 0;
3387 heap_page_add_freeobj(objspace,
GET_HEAP_PAGE(zombie), zombie);
3391 zombie = next_zombie;
3401 finalize_list(objspace, zombie);
3406 gc_finalize_deferred(
void *dmy)
3410 finalize_deferred(objspace);
3418 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
3446 #if RGENGC_CHECK_MODE >= 2
3447 gc_verify_internal_consistency(objspace);
3454 finalize_deferred(objspace);
3468 run_finalizer(objspace, curr->
obj, curr->
table);
3479 gc_enter(objspace,
"rb_objspace_call_finalizer");
3485 void *poisoned = asan_poisoned_object_p((
VALUE)p);
3486 asan_unpoison_object((
VALUE)p,
false);
3493 p->as.free.flags = 0;
3495 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
3500 else if (
RANY(p)->as.data.dfree) {
3501 make_zombie(objspace, (
VALUE)p,
RANY(p)->as.data.dfree,
RANY(p)->as.data.data);
3505 if (
RANY(p)->as.file.fptr) {
3506 make_io_zombie(objspace, (
VALUE)p);
3512 asan_poison_object((
VALUE)p);
3518 gc_exit(objspace,
"rb_objspace_call_finalizer");
3533 if (!is_pointer_to_heap(objspace, (
void *)
ptr))
return FALSE;
3562 is_swept_object(objspace,
ptr) ||
3581 if (!is_garbage_object(objspace,
ptr)) {
3593 check_rvalue_consistency(
obj);
3601 return is_markable_object(objspace,
obj) && is_live_object(objspace,
obj);
3608 return is_garbage_object(objspace,
obj);
3639 #if SIZEOF_LONG == SIZEOF_VOIDP
3640 #define NUM2PTR(x) NUM2ULONG(x)
3641 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3642 #define NUM2PTR(x) NUM2ULL(x)
3658 if ((
ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3667 if ((orig = id2ref_obj_tbl(objspace, objid)) !=
Qundef &&
3668 is_live_object(objspace, orig)) {
3682 return id2ref(objid);
3692 #if SIZEOF_LONG == SIZEOF_VOIDP
3702 return get_heap_object_id(
obj);
3739 return rb_find_object_id(
obj, nonspecial_obj_id_);
3801 return rb_find_object_id(
obj, cached_object_id);
3807 obj_memsize_of(
VALUE obj,
int use_all_types)
3926 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
3936 return obj_memsize_of(
obj,
TRUE);
3949 type_sym(
size_t type)
3952 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4045 for (;p < pend; p++) {
4046 void *poisoned = asan_poisoned_object_p((
VALUE)p);
4047 asan_unpoison_object((
VALUE)p,
false);
4056 asan_poison_object((
VALUE)p);
4105 gc_setup_mark_bits(
struct heap_page *page)
4120 int empty_slots = 0, freed_slots = 0,
final_slots = 0;
4121 RVALUE *p, *pend,*offset;
4124 gc_report(2, objspace,
"page_sweep: start.\n");
4141 asan_unpoison_object((
VALUE)p,
false);
4145 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
4146 #if USE_RGENGC && RGENGC_CHECK_MODE
4148 if (RVALUE_OLD_P((
VALUE)p))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
4149 if (rgengc_remembered_sweep(objspace, (
VALUE)p))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
4152 if (obj_free(objspace, (
VALUE)p)) {
4157 heap_page_add_freeobj(objspace, sweep_page, (
VALUE)p);
4158 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info((
VALUE)p));
4160 asan_poison_object((
VALUE)p);
4180 gc_setup_mark_bits(sweep_page);
4182 #if GC_PROFILE_MORE_DETAIL
4185 record->removing_objects +=
final_slots + freed_slots;
4186 record->empty_objects += empty_slots;
4189 if (0)
fprintf(
stderr,
"gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4194 sweep_page->
free_slots = freed_slots + empty_slots;
4202 gc_finalize_deferred_register(objspace);
4206 gc_report(2, objspace,
"page_sweep: end.\n");
4208 return freed_slots + empty_slots;
4217 heap_set_increment(objspace, 1);
4218 if (!heap_increment(objspace, heap)) {
4225 gc_mode_name(
enum gc_mode mode)
4231 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
4238 #if RGENGC_CHECK_MODE
4240 switch (prev_mode) {
4246 if (0)
fprintf(
stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(
gc_mode(objspace)), gc_mode_name(mode));
4255 #if GC_ENABLE_INCREMENTAL_MARK
4261 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
4274 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
4281 gc_sweep_start_heap(objspace,
heap_eden);
4287 gc_report(1, objspace,
"gc_sweep_finish\n");
4289 gc_prof_set_heap_info(objspace);
4290 heap_pages_free_unused_pages(objspace);
4293 if (heap_allocatable_pages < heap_tomb->total_pages) {
4294 heap_allocatable_pages_set(objspace,
heap_tomb->total_pages);
4300 #if RGENGC_CHECK_MODE >= 2
4301 gc_verify_internal_consistency(objspace);
4309 int unlink_limit = 3;
4310 #if GC_ENABLE_INCREMENTAL_MARK
4313 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
4315 gc_report(2, objspace,
"gc_sweep_step\n");
4320 #if GC_ENABLE_LAZY_SWEEP
4321 gc_prof_sweep_timer_start(objspace);
4325 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
4334 heap_unlink_page(objspace, heap, sweep_page);
4335 heap_add_page(objspace,
heap_tomb, sweep_page);
4338 #if GC_ENABLE_INCREMENTAL_MARK
4340 if (heap_add_poolpage(objspace, heap, sweep_page)) {
4345 heap_add_freepage(heap, sweep_page);
4349 heap_add_freepage(heap, sweep_page);
4359 gc_sweep_finish(objspace);
4362 #if GC_ENABLE_LAZY_SWEEP
4363 gc_prof_sweep_timer_stop(objspace);
4375 gc_sweep_step(objspace, heap);
4385 gc_enter(objspace,
"sweep_continue");
4388 gc_report(3, objspace,
"gc_sweep_continue: success heap_increment().\n");
4391 gc_sweep_step(objspace, heap);
4392 gc_exit(objspace,
"sweep_continue");
4400 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
4402 if (immediate_sweep) {
4403 #if !GC_ENABLE_LAZY_SWEEP
4404 gc_prof_sweep_timer_start(objspace);
4406 gc_sweep_start(objspace);
4407 gc_sweep_rest(objspace);
4408 #if !GC_ENABLE_LAZY_SWEEP
4409 gc_prof_sweep_timer_stop(objspace);
4414 gc_sweep_start(objspace);
4422 gc_heap_prepare_minimum_pages(objspace,
heap_eden);
4428 stack_chunk_alloc(
void)
4453 chunk = chunk->
next;
4462 stack->
cache = chunk;
4472 chunk = stack->
cache;
4488 next = stack->
cache;
4495 next = stack_chunk_alloc();
4498 stack->
chunk = next;
4509 add_stack_chunk_cache(stack, stack->
chunk);
4510 stack->
chunk = prev;
4520 while (chunk !=
NULL) {
4531 push_mark_stack_chunk(stack);
4539 if (is_mark_stack_empty(stack)) {
4542 if (stack->
index == 1) {
4544 pop_mark_stack_chunk(stack);
4552 #if GC_ENABLE_INCREMENTAL_MARK
4557 for (
i=0;
i<limit;
i++) {
4570 int limit = stack->
index;
4573 if (invalidate_mark_stack_chunk(chunk, limit,
obj))
return;
4574 chunk = chunk->
next;
4575 limit = stack->
limit;
4577 rb_bug(
"invalid_mark_stack: unreachable");
4590 for (
i=0;
i < 4;
i++) {
4591 add_stack_chunk_cache(stack, stack_chunk_alloc());
4598 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4600 #define STACK_START (ec->machine.stack_start)
4601 #define STACK_END (ec->machine.stack_end)
4602 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4604 #ifdef __EMSCRIPTEN__
4605 #undef STACK_GROW_DIRECTION
4606 #define STACK_GROW_DIRECTION 1
4609 #if STACK_GROW_DIRECTION < 0
4610 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4611 #elif STACK_GROW_DIRECTION > 0
4612 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4614 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4615 : (size_t)(STACK_END - STACK_START + 1))
4617 #if !STACK_GROW_DIRECTION
4639 #define PREVENT_STACK_OVERFLOW 1
4640 #ifndef PREVENT_STACK_OVERFLOW
4641 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4642 # define PREVENT_STACK_OVERFLOW 1
4644 # define PREVENT_STACK_OVERFLOW 0
4647 #if PREVENT_STACK_OVERFLOW
4656 return length > maximum_length;
4659 #define stack_check(ec, water_mark) FALSE
4662 #define STACKFRAME_FOR_CALL_CFUNC 2048
4683 gc_mark_maybe(objspace,
v);
4693 if (end <=
start)
return;
4695 mark_locations_array(objspace,
start,
n);
4709 for (
i=0;
i<
n;
i++) {
4710 gc_mark(objspace, values[
i]);
4720 for (
i=0;
i<
n;
i++) {
4721 gc_mark_and_pin(objspace, values[
i]);
4730 for (
i=0;
i<
n;
i++) {
4733 gc_mark_and_pin(objspace, values[
i]);
4742 gc_mark_and_pin_stack_values(objspace,
n, values);
4749 gc_mark(objspace, (
VALUE)value);
4757 gc_mark_and_pin(objspace, (
VALUE)value);
4779 gc_mark_and_pin(objspace, (
VALUE)
key);
4809 gc_mark(objspace, (
VALUE)value);
4818 gc_mark_and_pin(objspace, (
VALUE)
key);
4819 gc_mark_and_pin(objspace, (
VALUE)value);
4828 gc_mark_and_pin(objspace, (
VALUE)
key);
4829 gc_mark(objspace, (
VALUE)value);
4851 gc_mark(objspace,
RHASH(hash)->ifnone);
4876 switch (def->
type) {
4908 mark_method_entry_i(
VALUE me,
void *data)
4912 gc_mark(objspace,
me);
4925 mark_const_entry_i(
VALUE value,
void *data)
4930 gc_mark(objspace, ce->
value);
4931 gc_mark(objspace, ce->
file);
4942 #if STACK_GROW_DIRECTION < 0
4943 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4944 #elif STACK_GROW_DIRECTION > 0
4945 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4947 #define GET_STACK_BOUNDS(start, end, appendix) \
4948 ((STACK_END < STACK_START) ? \
4949 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4953 const VALUE *stack_start,
const VALUE *stack_end);
4961 } save_regs_gc_mark;
4962 VALUE *stack_start, *stack_end;
4965 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
4975 mark_locations_array(objspace, save_regs_gc_mark.v,
numberof(save_regs_gc_mark.v));
4977 mark_stack_locations(objspace, ec, stack_start, stack_end);
4984 VALUE *stack_start, *stack_end;
4987 mark_stack_locations(objspace, ec, stack_start, stack_end);
4992 const VALUE *stack_start,
const VALUE *stack_end)
4995 gc_mark_locations(objspace, stack_start, stack_end);
4997 #if defined(__mc68000__)
4998 gc_mark_locations(objspace,
4999 (
VALUE*)((
char*)stack_start + 2),
5000 (
VALUE*)((
char*)stack_end - 2));
5021 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
5023 asan_unpoison_object(
obj,
false);
5032 gc_mark_and_pin(objspace,
obj);
5038 asan_poison_object(
obj);
5052 if (RVALUE_MARKED(
obj))
return 0;
5069 #if RGENGC_PROFILE > 0
5070 objspace->
profile.total_remembered_shady_object_count++;
5071 #if RGENGC_PROFILE >= 2
5090 if (RVALUE_WB_UNPROTECTED(
obj)) {
5091 if (gc_remember_unprotected(objspace,
obj)) {
5092 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5096 if (!RVALUE_OLD_P(
obj)) {
5097 if (RVALUE_MARKED(
obj)) {
5099 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5100 RVALUE_AGE_SET_OLD(objspace,
obj);
5102 if (!RVALUE_MARKING(
obj)) {
5103 gc_grey(objspace,
obj);
5107 rgengc_remember(objspace,
obj);
5111 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5112 RVALUE_AGE_SET_CANDIDATE(objspace,
obj);
5125 #if RGENGC_CHECK_MODE
5126 if (RVALUE_MARKED(
obj) ==
FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(
obj));
5127 if (RVALUE_MARKING(
obj) ==
TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(
obj));
5130 #if GC_ENABLE_INCREMENTAL_MARK
5146 check_rvalue_consistency(
obj);
5149 if (!RVALUE_OLD_P(
obj)) {
5150 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(
obj));
5151 RVALUE_AGE_INC(objspace,
obj);
5155 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page,
obj);
5158 check_rvalue_consistency(
obj);
5170 rgengc_check_relation(objspace,
obj);
5171 if (!gc_mark_set(objspace,
obj))
return;
5173 gc_aging(objspace,
obj);
5174 gc_grey(objspace,
obj);
5193 if (!is_markable_object(objspace,
obj))
return;
5194 gc_pin(objspace,
obj);
5195 gc_mark_ptr(objspace,
obj);
5201 if (!is_markable_object(objspace,
obj))
return;
5202 gc_mark_ptr(objspace,
obj);
5231 if (RVALUE_OLD_P(
obj)) {
5248 gc_mark_values(objspace, (
long)
env->env_size,
env->env);
5251 gc_mark(objspace, (
VALUE)
env->iseq);
5255 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.klass);
5256 gc_mark(objspace, (
VALUE)
RANY(
obj)->as.imemo.cref.next);
5257 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.refinements);
5260 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.cref_or_me);
5261 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.lastline);
5262 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.backref);
5263 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.others);
5266 gc_mark(objspace,
RANY(
obj)->as.imemo.throw_data.throw_obj);
5269 gc_mark_maybe(objspace, (
VALUE)
RANY(
obj)->as.imemo.ifunc.data);
5272 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v1);
5273 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v2);
5274 gc_mark_maybe(objspace,
RANY(
obj)->as.imemo.memo.u3.value);
5277 mark_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
5296 #if VM_CHECK_MODE > 0
5307 gc_mark_set_parent(objspace,
obj);
5323 rb_bug(
"rb_gc_mark() called for broken object");
5331 gc_mark_imemo(objspace,
obj);
5335 gc_mark(objspace, any->
as.
basic.klass);
5363 gc_mark(objspace, root);
5368 for (
i=0;
i <
len;
i++) {
5369 gc_mark(objspace,
ptr[
i]);
5382 mark_hash(objspace,
obj);
5398 if (mark_func) (*mark_func)(
ptr);
5409 for (
i = 0;
i <
len;
i++) {
5410 gc_mark(objspace,
ptr[
i]);
5460 gc_mark(objspace,
ptr[
i]);
5477 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
5479 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
5492 #if GC_ENABLE_INCREMENTAL_MARK
5493 size_t marked_slots_at_the_beginning = objspace->
marked_slots;
5494 size_t popped_count = 0;
5497 while (pop_mark_stack(mstack, &
obj)) {
5501 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(
obj));
5503 gc_mark_children(objspace,
obj);
5505 #if GC_ENABLE_INCREMENTAL_MARK
5508 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
5513 if (popped_count + (objspace->
marked_slots - marked_slots_at_the_beginning) >
count) {
5525 if (is_mark_stack_empty(mstack)) {
5526 shrink_stack_chunk_cache(mstack);
5537 return gc_mark_stacked_objects(objspace,
TRUE,
count);
5543 return gc_mark_stacked_objects(objspace,
FALSE, 0);
5546 #if PRINT_ROOT_TICKS
5547 #define MAX_TICKS 0x100
5548 static tick_t mark_ticks[MAX_TICKS];
5549 static const char *mark_ticks_categories[MAX_TICKS];
5552 show_mark_ticks(
void)
5556 for (
i=0;
i<MAX_TICKS;
i++) {
5557 const char *category = mark_ticks_categories[
i];
5559 fprintf(
stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[
i]);
5570 gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
5574 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5576 #if PRINT_ROOT_TICKS
5577 tick_t start_tick = tick();
5579 const char *prev_category = 0;
5581 if (mark_ticks_categories[0] == 0) {
5586 if (categoryp) *categoryp =
"xxx";
5592 #if PRINT_ROOT_TICKS
5593 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
5594 if (prev_category) { \
5595 tick_t t = tick(); \
5596 mark_ticks[tick_count] = t - start_tick; \
5597 mark_ticks_categories[tick_count] = prev_category; \
5600 prev_category = category; \
5601 start_tick = tick(); \
5604 #define MARK_CHECKPOINT_PRINT_TICK(category)
5607 #define MARK_CHECKPOINT(category) do { \
5608 if (categoryp) *categoryp = category; \
5609 MARK_CHECKPOINT_PRINT_TICK(category); \
5615 if (vm->
self) gc_mark(objspace, vm->
self);
5621 mark_current_machine_context(objspace, ec);
5626 gc_mark_maybe(objspace, *
list->varptr);
5642 #undef MARK_CHECKPOINT
5645 #if RGENGC_CHECK_MODE >= 4
5647 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5648 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5649 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5657 static struct reflist *
5660 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
5663 refs->list[0] =
obj;
5669 reflist_destruct(
struct reflist *refs)
5676 reflist_add(
struct reflist *refs,
VALUE obj)
5678 if (refs->pos == refs->size) {
5683 refs->list[refs->pos++] =
obj;
5687 reflist_dump(
struct reflist *refs)
5690 for (
i=0;
i<refs->pos;
i++) {
5692 if (IS_ROOTSIG(
obj)) {
5703 reflist_referred_from_machine_context(
struct reflist *refs)
5706 for (
i=0;
i<refs->pos;
i++) {
5708 if (IS_ROOTSIG(
obj) &&
strcmp(GET_ROOTSIG(
obj),
"machine_context") == 0)
return 1;
5723 const char *category;
5729 allrefs_add(
struct allrefs *data,
VALUE obj)
5731 struct reflist *refs;
5734 reflist_add(refs, data->root_obj);
5738 refs = reflist_create(data->root_obj);
5747 struct allrefs *data = (
struct allrefs *)
ptr;
5749 if (allrefs_add(data,
obj)) {
5750 push_mark_stack(&data->mark_stack,
obj);
5757 struct allrefs *data = (
struct allrefs *)
ptr;
5759 data->root_obj = MAKE_ROOTSIG(data->category);
5761 if (allrefs_add(data,
obj)) {
5762 push_mark_stack(&data->mark_stack,
obj);
5769 struct allrefs data;
5770 struct mark_func_data_struct mfd;
5775 data.objspace = objspace;
5777 init_mark_stack(&data.mark_stack);
5779 mfd.mark_func = allrefs_roots_i;
5785 gc_mark_roots(objspace, &data.category);
5789 while (pop_mark_stack(&data.mark_stack, &
obj)) {
5792 free_stack_chunks(&data.mark_stack);
5795 return data.references;
5801 struct reflist *refs = (
struct reflist *)value;
5802 reflist_destruct(refs);
5807 objspace_allrefs_destruct(
struct st_table *refs)
5809 st_foreach(refs, objspace_allrefs_destruct_i, 0);
5813 #if RGENGC_CHECK_MODE >= 5
5818 struct reflist *refs = (
struct reflist *)
v;
5828 fprintf(
stderr,
"[all refs] (size: %d)\n", (
int)objspace->
rgengc.allrefs_table->num_entries);
5837 struct reflist *refs = (
struct reflist *)
v;
5842 fprintf(
stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(
obj));
5843 fprintf(
stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)
obj);
5846 if (reflist_referred_from_machine_context(refs)) {
5851 objspace->
rgengc.error_count++;
5862 #if RGENGC_ESTIMATE_OLDMALLOC
5867 objspace->
rgengc.allrefs_table = objspace_allrefs(objspace);
5873 if (objspace->
rgengc.error_count > 0) {
5874 #if RGENGC_CHECK_MODE >= 5
5875 allrefs_dump(objspace);
5877 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
5880 objspace_allrefs_destruct(objspace->
rgengc.allrefs_table);
5881 objspace->
rgengc.allrefs_table = 0;
5885 #if RGENGC_ESTIMATE_OLDMALLOC
5906 check_generation_i(
const VALUE child,
void *
ptr)
5913 if (!RVALUE_OLD_P(child)) {
5914 if (!RVALUE_REMEMBERED(
parent) &&
5915 !RVALUE_REMEMBERED(child) &&
5916 !RVALUE_UNCOLLECTIBLE(child)) {
5917 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(
parent), obj_info(child));
5924 check_color_i(
const VALUE child,
void *
ptr)
5929 if (!RVALUE_WB_UNPROTECTED(
parent) && RVALUE_WHITE_P(child)) {
5930 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5931 obj_info(
parent), obj_info(child));
5938 check_children_i(
const VALUE child,
void *
ptr)
5941 if (check_rvalue_consistency_force(child,
FALSE) != 0) {
5942 fprintf(
stderr,
"check_children_i: %s has error (referenced from %s)",
5943 obj_info(child), obj_info(data->
parent));
5951 verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
void *
ptr)
5958 void *poisoned = asan_poisoned_object_p(
obj);
5959 asan_unpoison_object(
obj,
false);
5985 if (RVALUE_BLACK_P(
obj)) {
6001 asan_poison_object(
obj);
6013 unsigned int has_remembered_shady =
FALSE;
6014 unsigned int has_remembered_old =
FALSE;
6015 int remembered_old_objects = 0;
6016 int free_objects = 0;
6017 int zombie_objects = 0;
6021 void *poisoned = asan_poisoned_object_p(val);
6022 asan_unpoison_object(val,
false);
6024 if (
RBASIC(val) == 0) free_objects++;
6027 has_remembered_shady =
TRUE;
6030 has_remembered_old =
TRUE;
6031 remembered_old_objects++;
6036 asan_poison_object(val);
6049 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6050 (
void *)page, remembered_old_objects,
obj ? obj_info(
obj) :
"");
6054 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6055 (
void *)page,
obj ? obj_info(
obj) :
"");
6061 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, (
int)page->free_slots, free_objects);
6065 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, (
int)page->final_slots, zombie_objects);
6068 return remembered_old_objects;
6077 int remembered_old_objects = 0;
6081 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
6085 asan_unpoison_object((
VALUE)p,
false);
6087 fprintf(
stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info((
VALUE)p));
6090 asan_poison_object((
VALUE)prev);
6095 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
6099 return remembered_old_objects;
6105 int remembered_old_objects = 0;
6106 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_eden->pages);
6107 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_tomb->pages);
6108 return remembered_old_objects;
6122 gc_verify_internal_consistency_m(
VALUE dummy)
6139 objspace_each_objects_without_setup(
objspace, verify_internal_consistency_i, &data);
6142 #if RGENGC_CHECK_MODE >= 5
6147 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
6157 fprintf(
stderr,
"heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
6175 size_t list_count = 0;
6188 rb_bug(
"inconsistent finalizing object count:\n"
6191 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
6208 gc_verify_transient_heap_internal_consistency(
VALUE dmy)
6225 #if GC_ENABLE_INCREMENTAL_MARK
6228 if (0)
fprintf(
stderr,
"objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
6253 #if GC_ENABLE_INCREMENTAL_MARK
6274 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
6277 gc_mark_children(objspace, (
VALUE)p);
6286 gc_mark_stacked_objects_all(objspace);
6290 heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
6296 heap_add_freepage(heap, page);
6306 #if GC_ENABLE_INCREMENTAL_MARK
6310 heap_move_pooled_pages_to_free_pages(
heap_eden);
6311 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
6316 rb_bug(
"gc_marks_finish: mark stack is not empty (%d).", (
int)mark_stack_size(&objspace->
mark_stack));
6319 gc_mark_roots(objspace, 0);
6322 gc_report(1, objspace,
"gc_marks_finish: not empty (%d). retry.\n", (
int)mark_stack_size(&objspace->
mark_stack));
6326 #if RGENGC_CHECK_MODE >= 2
6327 if (gc_verify_heap_pages(objspace) != 0) {
6328 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
6334 gc_marks_wb_unprotected_objects(objspace);
6338 #if RGENGC_CHECK_MODE >= 2
6339 gc_verify_internal_consistency(objspace);
6351 #if RGENGC_CHECK_MODE >= 4
6352 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
6369 if (sweep_slots > max_free_slots) {
6380 if (sweep_slots < min_free_slots) {
6381 if (!full_marking) {
6383 full_marking =
TRUE;
6388 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
6394 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6395 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots,
total_slots));
6396 heap_increment(objspace, heap);
6417 gc_report(1, objspace,
"gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
6421 if (sweep_slots < min_free_slots) {
6422 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6423 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
6424 heap_increment(objspace, heap);
6439 #if GC_ENABLE_INCREMENTAL_MARK
6442 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
6443 if (gc_marks_finish(objspace)) {
6455 gc_report(1, objspace,
"gc_marks_rest\n");
6457 #if GC_ENABLE_INCREMENTAL_MARK
6463 while (gc_mark_stacked_objects_incremental(objspace,
INT_MAX) ==
FALSE);
6464 }
while (gc_marks_finish(objspace) ==
FALSE);
6467 gc_mark_stacked_objects_all(objspace);
6468 gc_marks_finish(objspace);
6479 #if GC_ENABLE_INCREMENTAL_MARK
6481 gc_enter(objspace,
"marks_continue");
6490 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
6493 from =
"pooled-pages";
6495 else if (heap_increment(objspace, heap)) {
6497 from =
"incremented-pages";
6501 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n", slots, from);
6505 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %d).\n", (
int)mark_stack_size(&objspace->
mark_stack));
6506 gc_marks_rest(objspace);
6511 gc_exit(objspace,
"marks_continue");
6518 gc_prof_mark_timer_start(objspace);
6525 gc_marks_start(objspace, full_mark);
6527 gc_marks_rest(objspace);
6530 #if RGENGC_PROFILE > 0
6538 gc_marks_start(objspace,
TRUE);
6539 gc_marks_rest(objspace);
6543 gc_prof_mark_timer_stop(objspace);
6555 const char *status =
" ";
6587 return RVALUE_REMEMBERED(
obj);
6614 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(
obj),
6615 rgengc_remembersetbits_get(objspace,
obj) ?
"was already remembered" :
"is remembered now");
6617 check_rvalue_consistency(
obj);
6620 if (RVALUE_WB_UNPROTECTED(
obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(
obj));
6623 #if RGENGC_PROFILE > 0
6624 if (!rgengc_remembered(objspace,
obj)) {
6625 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6626 objspace->
profile.total_remembered_normal_object_count++;
6627 #if RGENGC_PROFILE >= 2
6634 return rgengc_remembersetbits_set(objspace,
obj);
6640 int result = rgengc_remembersetbits_get(objspace,
obj);
6641 check_rvalue_consistency(
obj);
6648 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(
obj));
6649 return rgengc_remembered_sweep(objspace,
obj);
6652 #ifndef PROFILE_REMEMBERSET_MARK
6653 #define PROFILE_REMEMBERSET_MARK 0
6661 #if PROFILE_REMEMBERSET_MARK
6662 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
6664 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
6674 #if PROFILE_REMEMBERSET_MARK
6694 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(
obj));
6698 gc_mark_children(objspace,
obj);
6706 #if PROFILE_REMEMBERSET_MARK
6713 #if PROFILE_REMEMBERSET_MARK
6714 fprintf(
stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6716 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
6742 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6743 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
6744 if (
is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6749 if (!rgengc_remembered(objspace, a)) {
6750 rgengc_remember(objspace, a);
6751 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6756 if (RVALUE_WB_UNPROTECTED(b)) {
6757 gc_remember_unprotected(objspace, b);
6760 RVALUE_AGE_SET_OLD(objspace, b);
6761 rgengc_remember(objspace, b);
6764 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6767 check_rvalue_consistency(a);
6768 check_rvalue_consistency(b);
6771 #if GC_ENABLE_INCREMENTAL_MARK
6775 gc_mark_set_parent(objspace, parent);
6776 rgengc_check_relation(objspace,
obj);
6777 if (gc_mark_set(objspace,
obj) ==
FALSE)
return;
6778 gc_aging(objspace,
obj);
6779 gc_grey(objspace,
obj);
6787 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
6789 if (RVALUE_BLACK_P(a)) {
6790 if (RVALUE_WHITE_P(b)) {
6791 if (!RVALUE_WB_UNPROTECTED(a)) {
6792 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
6793 gc_mark_from(objspace, b, a);
6796 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6797 if (!RVALUE_WB_UNPROTECTED(b)) {
6798 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
6799 RVALUE_AGE_SET_OLD(objspace, b);
6801 if (RVALUE_BLACK_P(b)) {
6802 gc_grey(objspace, b);
6806 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
6807 gc_remember_unprotected(objspace, b);
6813 #define gc_writebarrier_incremental(a, b, objspace)
6825 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6829 gc_writebarrier_generational(a, b, objspace);
6833 gc_writebarrier_incremental(a, b, objspace);
6840 if (RVALUE_WB_UNPROTECTED(
obj)) {
6846 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(
obj),
6847 rgengc_remembered(objspace,
obj) ?
" (already remembered)" :
"");
6849 if (RVALUE_OLD_P(
obj)) {
6850 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(
obj));
6851 RVALUE_DEMOTE(objspace,
obj);
6852 gc_mark_set(objspace,
obj);
6853 gc_remember_unprotected(objspace,
obj);
6856 objspace->
profile.total_shade_operation_count++;
6857 #if RGENGC_PROFILE >= 2
6863 RVALUE_AGE_RESET(
obj);
6879 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(
obj));
6882 if (RVALUE_BLACK_P(
obj)) {
6883 gc_grey(objspace,
obj);
6887 if (RVALUE_OLD_P(
obj)) {
6888 rgengc_remember(objspace,
obj);
6893 static st_table *rgengc_unprotect_logging_table;
6903 rgengc_unprotect_logging_exit_func(
void)
6905 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6913 if (rgengc_unprotect_logging_table == 0) {
6915 atexit(rgengc_unprotect_logging_exit_func);
6918 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6923 snprintf(
ptr, 0x100 - 1,
"%s|%s:%d", obj_info(
obj), filename, line);
6943 if (RVALUE_WB_UNPROTECTED(
obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6944 if (!RVALUE_OLD_P(dest)) {
6946 RVALUE_AGE_RESET_RAW(dest);
6949 RVALUE_DEMOTE(objspace, dest);
6953 check_rvalue_consistency(dest);
6979 static ID ID_marked;
6981 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6985 #define I(s) ID_##s = rb_intern(#s);
6998 if (RVALUE_WB_UNPROTECTED(
obj) == 0 &&
n<max)
flags[
n++] = ID_wb_protected;
6999 if (RVALUE_OLD_P(
obj) &&
n<max)
flags[
n++] = ID_old;
7000 if (RVALUE_UNCOLLECTIBLE(
obj) &&
n<max)
flags[
n++] = ID_uncollectible;
7016 int is_old = RVALUE_OLD_P(
obj);
7018 gc_report(2, objspace,
"rb_gc_force_recycle: %s\n", obj_info(
obj));
7021 if (RVALUE_MARKED(
obj)) {
7028 #if GC_ENABLE_INCREMENTAL_MARK
7042 #if GC_ENABLE_INCREMENTAL_MARK
7058 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
7059 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
7094 if (tmp->
varptr == addr) {
7126 #define gc_stress_full_mark_after_malloc_p() \
7127 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7133 if (!heap_increment(objspace, heap)) {
7134 heap_set_increment(objspace, 1);
7135 heap_increment(objspace, heap);
7155 gc_prof_set_malloc_info(objspace);
7186 #if RGENGC_ESTIMATE_OLDMALLOC
7223 #if GC_PROFILE_MORE_DETAIL
7224 objspace->
profile.prepare_time = getrusage_time();
7229 #if GC_PROFILE_MORE_DETAIL
7230 objspace->
profile.prepare_time = getrusage_time() - objspace->
profile.prepare_time;
7233 return gc_start(objspace, reason);
7251 #if RGENGC_CHECK_MODE >= 2
7252 gc_verify_internal_consistency(objspace);
7255 gc_enter(objspace,
"gc_start");
7261 do_full_mark =
TRUE;
7270 do_full_mark =
TRUE;
7274 do_full_mark =
TRUE;
7285 #if GC_ENABLE_INCREMENTAL_MARK
7300 gc_report(1, objspace,
"gc_start(reason: %d) => %u, %d, %d\n",
7304 #if USE_DEBUG_COUNTER
7312 #if RGENGC_ESTIMATE_OLDMALLOC
7329 gc_prof_setup_new_record(objspace, reason);
7330 gc_reset_malloc_info(objspace);
7336 gc_prof_timer_start(objspace);
7338 gc_marks(objspace, do_full_mark);
7340 gc_prof_timer_stop(objspace);
7342 gc_exit(objspace,
"gc_start");
7352 if (marking || sweeping) {
7353 gc_enter(objspace,
"gc_rest");
7359 gc_marks_rest(objspace);
7363 gc_sweep_rest(objspace);
7365 gc_exit(objspace,
"gc_rest");
7382 #if GC_ENABLE_INCREMENTAL_MARK
7400 static char buff[0x10];
7401 gc_current_status_fill(objspace, buff);
7405 #if PRINT_ENTER_EXIT_TICK
7407 static tick_t last_exit_tick;
7408 static tick_t enter_tick;
7409 static int enter_count = 0;
7410 static char last_gc_status[0x10];
7413 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7415 if (direction == 0) {
7417 enter_tick = tick();
7418 gc_current_status_fill(objspace, last_gc_status);
7421 tick_t exit_tick = tick();
7422 char current_gc_status[0x10];
7423 gc_current_status_fill(objspace, current_gc_status);
7426 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7427 enter_tick - last_exit_tick,
7428 exit_tick - enter_tick,
7430 last_gc_status, current_gc_status,
7432 last_exit_tick = exit_tick;
7435 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7437 exit_tick - enter_tick,
7439 last_gc_status, current_gc_status,
7446 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7461 gc_report(1, objspace,
"gc_enter: %s [%s]\n", event, gc_current_status(objspace));
7462 gc_record(objspace, 0, event);
7472 gc_record(objspace, 1, event);
7473 gc_report(1, objspace,
"gc_exit: %s [%s]\n", event, gc_current_status(objspace));
7480 gc_with_gvl(
void *
ptr)
7569 return !RVALUE_PINNED(
obj);
7597 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)
src);
7598 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)
src);
7599 marking = RVALUE_MARKING((
VALUE)
src);
7642 if (wb_unprotected) {
7649 if (uncollectible) {
7658 src->as.moved.destination = (
VALUE)dest;
7659 src->as.moved.next = moved_list;
7675 if (
free->slot ==
free->page->start +
free->page->total_slots - 1) {
7677 free->page = page_list[
free->index];
7714 size_t total_pages =
heap_eden->total_pages;
7715 page = page_list[0];
7720 free->objspace = objspace;
7722 page = page_list[total_pages - 1];
7723 scan->
index = total_pages - 1;
7743 compare_pinned(
const void *left,
const void *right,
void *dummy)
7748 left_page = *(
struct heap_page *
const *)left;
7749 right_page = *(
struct heap_page *
const *)right;
7755 compare_free_slots(
const void *left,
const void *right,
void *dummy)
7760 left_page = *(
struct heap_page *
const *)left;
7761 right_page = *(
struct heap_page *
const *)right;
7771 size_t total_pages =
heap_eden->total_pages;
7777 page_list[
i++] = page;
7801 page_list = allocate_page_list(objspace, comparator);
7803 init_cursors(objspace, &free_cursor, &scan_cursor, page_list);
7806 while (not_met(&free_cursor, &scan_cursor)) {
7810 void *free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7811 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7813 while (
BUILTIN_TYPE(free_cursor.slot) !=
T_NONE && not_met(&free_cursor, &scan_cursor)) {
7815 if (free_slot_poison) {
7817 asan_poison_object((
VALUE)free_cursor.slot);
7820 advance_cursor(&free_cursor, page_list);
7823 free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7824 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7828 void *scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7829 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7834 while (!gc_is_moveable_obj(objspace, (
VALUE)scan_cursor.slot) && not_met(&free_cursor, &scan_cursor)) {
7837 if (scan_slot_poison) {
7839 asan_poison_object((
VALUE)scan_cursor.slot);
7842 retreat_cursor(&scan_cursor, page_list);
7845 scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7846 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7851 if (not_met(&free_cursor, &scan_cursor)) {
7858 moved_list = gc_move(objspace, (
VALUE)scan_cursor.slot, (
VALUE)free_cursor.slot, moved_list);
7864 advance_cursor(&free_cursor, page_list);
7865 retreat_cursor(&scan_cursor, page_list);
7884 for (
i = 0;
i <
len;
i++) {
7897 for (
i = 0;
i <
len;
i++) {
7908 if (gc_object_moved_p(objspace, (
VALUE)*
key)) {
7912 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7926 if (gc_object_moved_p(objspace, (
VALUE)
key)) {
7930 if (gc_object_moved_p(objspace, (
VALUE)value)) {
7941 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7955 if (gc_object_moved_p(objspace, (
VALUE)value)) {
7986 gc_update_table_refs(objspace,
ptr);
8004 switch (def->
type) {
8041 for (
i=0;
i<
n;
i++) {
8055 gc_update_values(objspace, (
long)
env->env_size, (
VALUE *)
env->env);
8079 gc_ref_update_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
8097 check_id_table_move(
ID id,
VALUE value,
void *data)
8101 if (gc_object_moved_p(objspace, (
VALUE)value)) {
8117 void *poisoned = asan_poisoned_object_p(value);
8118 asan_unpoison_object(value,
false);
8125 destination = value;
8131 asan_poison_object(value);
8135 destination = value;
8142 update_id_table(
ID *
key,
VALUE * value,
void *data,
int existing)
8146 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
8162 update_const_table(
VALUE value,
void *data)
8167 if (gc_object_moved_p(objspace, ce->
value)) {
8171 if (gc_object_moved_p(objspace, ce->
file)) {
8190 entry = entry->
next;
8199 update_subclass_entries(objspace, ext->
subclasses);
8207 gc_report(4, objspace,
"update-refs: %p ->", (
void *)
obj);
8238 gc_ref_update_imemo(objspace,
obj);
8254 gc_ref_update_array(objspace,
obj);
8259 gc_ref_update_hash(objspace,
obj);
8276 if (compact_func) (*compact_func)(
ptr);
8283 gc_ref_update_object(objspace,
obj);
8313 if (any->as.match.str) {
8334 for (
i = 0;
i <
len;
i++) {
8351 gc_report(4, objspace,
"update-refs: %p <-", (
void *)
obj);
8355 gc_ref_update(
void *vstart,
void *vend,
size_t stride,
void * data)
8364 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
8371 for (;
v != (
VALUE)vend;
v += stride) {
8373 void *poisoned = asan_poisoned_object_p(
v);
8374 asan_unpoison_object(
v,
false);
8378 heap_page_add_freeobj(objspace, page,
v);
8386 if (RVALUE_WB_UNPROTECTED(
v)) {
8392 gc_update_object_references(objspace,
v);
8397 asan_poison_object(
v);
8407 #define global_symbols ruby_global_symbols
8413 rb_vm_t *vm = rb_ec_vm_ptr(ec);
8415 objspace_each_objects_without_setup(objspace, gc_ref_update, objspace);
8450 static void gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier);
8453 gc_compact(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8461 gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
8473 return gc_compact_stats(objspace);
8477 root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
8485 reachable_object_check_moved_i(
VALUE ref,
void *data)
8489 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(
rb_gc_location(ref)));
8494 heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
8497 for (;
v != (
VALUE)vend;
v += stride) {
8502 void *poisoned = asan_poisoned_object_p(
v);
8503 asan_unpoison_object(
v,
false);
8515 asan_poison_object(
v);
8526 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i,
NULL);
8527 objspace_each_objects(objspace, heap_check_moved_i,
NULL);
8532 gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8534 if (0)
fprintf(
stderr,
"gc_compact_after_gc: %d,%d,%d\n", use_toward_empty, use_double_pages, use_verifier);
8541 gc_verify_internal_consistency(objspace);
8544 if (use_double_pages) {
8549 VALUE moved_list_head;
8552 if (use_toward_empty) {
8553 moved_list_head = gc_compact_heap(objspace, compare_free_slots);
8556 moved_list_head = gc_compact_heap(objspace, compare_pinned);
8560 gc_update_references(objspace);
8564 gc_check_references_for_moved(objspace);
8573 while (moved_list_head) {
8578 next_moved =
RMOVED(moved_list_head)->next;
8581 RMOVED(moved_list_head)->flags = 0;
8582 RMOVED(moved_list_head)->destination = 0;
8583 RMOVED(moved_list_head)->next = 0;
8585 heap_page_add_freeobj(objspace, page, moved_list_head);
8589 heap_unlink_page(objspace,
heap_eden, page);
8590 heap_add_page(objspace,
heap_tomb, page);
8593 moved_list_head = next_moved;
8613 gc_verify_internal_consistency(objspace);
8639 int use_toward_empty =
FALSE;
8640 int use_double_pages =
FALSE;
8645 static ID keyword_ids[2];
8653 if (!keyword_ids[0]) {
8655 keyword_ids[1] =
rb_intern(
"double_heap");
8660 use_toward_empty =
TRUE;
8663 use_double_pages =
TRUE;
8667 gc_compact(objspace, use_toward_empty, use_double_pages,
TRUE);
8668 return gc_compact_stats(objspace);
8683 garbage_collect(objspace, reason);
8693 #if RGENGC_PROFILE >= 2
8698 gc_count_add_each_types(
VALUE hash,
const char *
name,
const size_t *
types)
8703 const char *
type = type_name(
i, 0);
8723 gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const int orig_flags)
8725 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
8726 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
8727 #if RGENGC_ESTIMATE_OLDMALLOC
8728 static VALUE sym_oldmalloc;
8730 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
8731 static VALUE sym_none, sym_marking, sym_sweeping;
8746 if (sym_major_by ==
Qnil) {
8747 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
8759 #if RGENGC_ESTIMATE_OLDMALLOC
8773 #define SET(name, attr) \
8774 if (key == sym_##name) \
8776 else if (hash != Qnil) \
8777 rb_hash_aset(hash, sym_##name, (attr));
8784 #if RGENGC_ESTIMATE_OLDMALLOC
8788 SET(major_by, major_by);
8802 if (orig_flags == 0) {
8819 return gc_info_decode(objspace,
key, 0);
8834 return gc_info_decode(objspace,
arg, 0);
8863 #if RGENGC_ESTIMATE_OLDMALLOC
8868 gc_stat_sym_total_generated_normal_object_count,
8869 gc_stat_sym_total_generated_shady_object_count,
8870 gc_stat_sym_total_shade_operation_count,
8871 gc_stat_sym_total_promoted_count,
8872 gc_stat_sym_total_remembered_normal_object_count,
8873 gc_stat_sym_total_remembered_shady_object_count,
8899 #if RGENGC_ESTIMATE_OLDMALLOC
8908 static VALUE gc_stat_compat_table;
8911 setup_gc_stat_symbols(
void)
8913 if (gc_stat_symbols[0] == 0) {
8914 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
8917 S(heap_sorted_length);
8919 S(heap_available_slots);
8922 S(heap_final_slots);
8923 S(heap_marked_slots);
8926 S(total_allocated_pages);
8927 S(total_freed_pages);
8928 S(total_allocated_objects);
8929 S(total_freed_objects);
8930 S(malloc_increase_bytes);
8931 S(malloc_increase_bytes_limit);
8936 S(remembered_wb_unprotected_objects);
8937 S(remembered_wb_unprotected_objects_limit);
8939 S(old_objects_limit);
8940 #if RGENGC_ESTIMATE_OLDMALLOC
8941 S(oldmalloc_increase_bytes);
8942 S(oldmalloc_increase_bytes_limit);
8945 S(total_generated_normal_object_count);
8946 S(total_generated_shady_object_count);
8947 S(total_shade_operation_count);
8948 S(total_promoted_count);
8949 S(total_remembered_normal_object_count);
8950 S(total_remembered_shady_object_count);
8954 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
8955 S(gc_stat_heap_used);
8956 S(heap_eden_page_length);
8957 S(heap_tomb_page_length);
8965 S(remembered_shady_object);
8966 S(remembered_shady_object_limit);
8968 S(old_object_limit);
8970 S(total_allocated_object);
8971 S(total_freed_object);
8974 #if RGENGC_ESTIMATE_OLDMALLOC
8975 S(oldmalloc_increase);
8986 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
8987 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
9006 #if RGENGC_ESTIMATE_OLDMALLOC
9022 if (!
NIL_P(new_key)) {
9023 static int warned = 0;
9025 rb_warn(
"GC.stat keys were changed from Ruby 2.1. "
9027 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
9045 if ((new_key = compat_key(
key)) !=
Qnil) {
9053 gc_stat_internal(
VALUE hash_or_sym)
9058 setup_gc_stat_symbols();
9064 static VALUE default_proc_for_compat = 0;
9065 if (default_proc_for_compat == 0) {
9066 default_proc_for_compat =
rb_proc_new(default_proc_for_compat_func,
Qnil);
9079 #define SET(name, attr) \
9080 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9082 else if (hash != Qnil) \
9083 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9092 SET(heap_available_slots, objspace_available_slots(objspace));
9093 SET(heap_live_slots, objspace_live_slots(objspace));
9094 SET(heap_free_slots, objspace_free_slots(objspace));
9113 #if RGENGC_ESTIMATE_OLDMALLOC
9119 SET(total_generated_normal_object_count, objspace->
profile.total_generated_normal_object_count);
9120 SET(total_generated_shady_object_count, objspace->
profile.total_generated_shady_object_count);
9121 SET(total_shade_operation_count, objspace->
profile.total_shade_operation_count);
9122 SET(total_promoted_count, objspace->
profile.total_promoted_count);
9123 SET(total_remembered_normal_object_count, objspace->
profile.total_remembered_normal_object_count);
9124 SET(total_remembered_shady_object_count, objspace->
profile.total_remembered_shady_object_count);
9131 if ((new_key = compat_key(
key)) !=
Qnil) {
9138 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9140 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->
profile.generated_normal_object_count_types);
9141 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->
profile.generated_shady_object_count_types);
9142 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->
profile.shade_operation_count_types);
9143 gc_count_add_each_types(hash,
"promoted_types", objspace->
profile.promoted_types);
9144 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->
profile.remembered_normal_object_count_types);
9145 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->
profile.remembered_shady_object_count_types);
9159 size_t value = gc_stat_internal(
arg);
9169 gc_stat_internal(
arg);
9177 size_t value = gc_stat_internal(
key);
9181 gc_stat_internal(
key);
9204 gc_stress_set(objspace, flag);
9249 get_envparam_size(
const char *
name,
size_t *default_value,
size_t lower_bound)
9257 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9272 unit = 1024*1024*1024;
9276 while (*end && isspace((
unsigned char)*end)) end++;
9288 if (val > 0 && (
size_t)val > lower_bound) {
9292 *default_value = (
size_t)val;
9298 name, val, *default_value, lower_bound);
9307 get_envparam_double(
const char *
name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
9315 if (!*
ptr || *end) {
9320 if (accept_zero && val == 0.0) {
9323 else if (val <= lower_bound) {
9325 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
9326 name, val, *default_value, lower_bound);
9329 else if (upper_bound != 0.0 &&
9330 val > upper_bound) {
9332 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
9333 name, val, *default_value, upper_bound);
9339 *default_value = val;
9347 gc_set_initial_pages(
void)
9353 if (min_pages >
heap_eden->total_pages) {
9404 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.
heap_free_slots, 0)) {
9407 else if (get_envparam_size(
"RUBY_FREE_MIN", &gc_params.
heap_free_slots, 0)) {
9408 rb_warn(
"RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
9412 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.
heap_init_slots, 0)) {
9413 gc_set_initial_pages();
9415 else if (get_envparam_size(
"RUBY_HEAP_MIN_SLOTS", &gc_params.
heap_init_slots, 0)) {
9416 rb_warn(
"RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
9417 gc_set_initial_pages();
9420 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.
growth_factor, 1.0, 0.0,
FALSE);
9421 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.
growth_max_slots, 0);
9430 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.
malloc_limit_min, 0);
9431 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.
malloc_limit_max, 0);
9437 #if RGENGC_ESTIMATE_OLDMALLOC
9452 if (is_markable_object(objspace,
obj)) {
9453 struct mark_func_data_struct mfd;
9454 mfd.mark_func = func;
9457 gc_mark_children(objspace,
obj);
9479 objspace_reachable_objects_from_root(objspace,
func, passing_data);
9486 struct mark_func_data_struct mfd;
9489 data.data = passing_data;
9491 mfd.mark_func = root_objects_from;
9495 gc_mark_roots(objspace, &data.category);
9510 gc_vraise(
void *
ptr)
9547 negative_size_allocation_error(
const char *msg)
9553 ruby_memerror_body(
void *dummy)
9590 if (
during_gc) gc_exit(objspace,
"rb_memerror");
9614 #if defined __MINGW32__
9615 res = __mingw_aligned_malloc(
size, alignment);
9616 #elif defined _WIN32
9617 void *_aligned_malloc(
size_t,
size_t);
9618 res = _aligned_malloc(
size, alignment);
9619 #elif defined(HAVE_POSIX_MEMALIGN)
9626 #elif defined(HAVE_MEMALIGN)
9630 res =
malloc(alignment +
size +
sizeof(
void*));
9631 aligned = (
char*)res + alignment +
sizeof(
void*);
9632 aligned -= ((
VALUE)aligned & (alignment - 1));
9633 ((
void**)aligned)[-1] = res;
9634 res = (
void*)aligned;
9638 GC_ASSERT(((alignment - 1) & alignment) == 0);
9639 GC_ASSERT(alignment %
sizeof(
void*) == 0);
9644 rb_aligned_free(
void *
ptr)
9646 #if defined __MINGW32__
9647 __mingw_aligned_free(
ptr);
9648 #elif defined _WIN32
9650 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
9657 static inline size_t
9660 #ifdef HAVE_MALLOC_USABLE_SIZE
9661 return malloc_usable_size(
ptr);
9674 atomic_sub_nounderflow(
size_t *var,
size_t sub)
9676 if (
sub == 0)
return;
9680 if (val <
sub)
sub = val;
9695 garbage_collect_with_gvl(objspace, reason);
9702 if (new_size > old_size) {
9704 #if RGENGC_ESTIMATE_OLDMALLOC
9710 #if RGENGC_ESTIMATE_OLDMALLOC
9726 #if MALLOC_ALLOCATED_SIZE
9727 if (new_size >= old_size) {
9731 size_t dec_size = old_size - new_size;
9732 size_t allocated_size = objspace->
malloc_params.allocated_size;
9734 #if MALLOC_ALLOCATED_SIZE_CHECK
9735 if (allocated_size < dec_size) {
9736 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
9739 atomic_sub_nounderflow(&objspace->
malloc_params.allocated_size, dec_size);
9742 if (0)
fprintf(
stderr,
"increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
9747 (
int)new_size, (
int)old_size);
9756 if (allocations > 0) {
9757 atomic_sub_nounderflow(&objspace->
malloc_params.allocations, 1);
9759 #if MALLOC_ALLOCATED_SIZE_CHECK
9773 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9780 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9781 const char *ruby_malloc_info_file;
9782 int ruby_malloc_info_line;
9785 static inline size_t
9790 #if CALC_EXACT_MALLOC_SIZE
9797 static inline void *
9800 size = objspace_malloc_size(objspace, mem,
size);
9803 #if CALC_EXACT_MALLOC_SIZE
9807 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9809 info->file = ruby_malloc_info_file;
9810 info->line = info->file ? ruby_malloc_info_line : 0;
9821 #define TRY_WITH_GC(alloc) do { \
9822 objspace_malloc_gc_stress(objspace); \
9824 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
9825 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
9826 GPR_FLAG_MALLOC) || \
9840 size = objspace_malloc_prepare(objspace,
size);
9843 return objspace_malloc_fixup(objspace, mem,
size);
9846 static inline size_t
9847 xmalloc2_size(
const size_t count,
const size_t elsize)
9853 objspace_xrealloc(
rb_objspace_t *objspace,
void *
ptr,
size_t new_size,
size_t old_size)
9857 if (!
ptr)
return objspace_xmalloc0(objspace, new_size);
9864 if (new_size == 0) {
9865 if ((mem = objspace_xmalloc0(objspace, 0)) !=
NULL) {
9888 objspace_xfree(objspace,
ptr, old_size);
9902 #if CALC_EXACT_MALLOC_SIZE
9907 old_size = info->
size;
9911 old_size = objspace_malloc_size(objspace,
ptr, old_size);
9913 new_size = objspace_malloc_size(objspace, mem, new_size);
9915 #if CALC_EXACT_MALLOC_SIZE
9918 info->
size = new_size;
9929 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
9931 #define MALLOC_INFO_GEN_SIZE 100
9932 #define MALLOC_INFO_SIZE_SIZE 10
9933 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
9934 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
9935 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
9936 static st_table *malloc_info_file_table;
9941 const char *file = (
void *)
key;
9942 const size_t *data = (
void *)val;
9944 fprintf(
stderr,
"%s\t%d\t%d\n", file, (
int)data[0], (
int)data[1]);
9956 for (
i=0;
i<MALLOC_INFO_GEN_SIZE;
i++) {
9957 if (
i == MALLOC_INFO_GEN_SIZE-1) {
9958 fprintf(
stderr,
"more\t%d\t%d\n", (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9961 fprintf(
stderr,
"%d\t%d\t%d\n",
i, (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9966 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
9972 if (malloc_info_file_table) {
9974 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
9994 #if CALC_EXACT_MALLOC_SIZE
9997 old_size = info->
size;
9999 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10002 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10005 malloc_info_gen_cnt[gen_index]++;
10006 malloc_info_gen_size[gen_index] += info->
size;
10008 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
10009 size_t s = 16 <<
i;
10010 if (info->
size <= s) {
10011 malloc_info_size[
i]++;
10015 malloc_info_size[
i]++;
10022 if (malloc_info_file_table ==
NULL) {
10029 data =
malloc(xmalloc2_size(2,
sizeof(
size_t)));
10030 if (data ==
NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
10031 data[0] = data[1] = 0;
10035 data[1] += info->
size;
10040 fprintf(
stderr,
"free - size:%d, gen:%d, pos: %s:%d\n", (
int)info->
size, gen, info->file, (
int)info->line);
10050 old_size = objspace_malloc_size(objspace,
ptr, old_size);
10059 ruby_xmalloc0(
size_t size)
10068 negative_size_allocation_error(
"too large allocation size");
10070 return ruby_xmalloc0(
size);
10092 size = objspace_malloc_prepare(objspace,
size);
10094 return objspace_malloc_fixup(objspace, mem,
size);
10103 #ifdef ruby_sized_xrealloc
10104 #undef ruby_sized_xrealloc
10110 negative_size_allocation_error(
"too large allocation size");
10113 return objspace_xrealloc(&
rb_objspace,
ptr, new_size, old_size);
10122 #ifdef ruby_sized_xrealloc2
10123 #undef ruby_sized_xrealloc2
10128 size_t len = xmalloc2_size(
n,
size);
10138 #ifdef ruby_sized_xfree
10139 #undef ruby_sized_xfree
10158 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10165 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10172 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10179 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10190 #if CALC_EXACT_MALLOC_SIZE
10194 #if CALC_EXACT_MALLOC_SIZE
10203 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10219 #if CALC_EXACT_MALLOC_SIZE
10235 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
NULL, 0);
10267 #if MALLOC_ALLOCATED_SIZE
10278 gc_malloc_allocated_size(
VALUE self)
10293 gc_malloc_allocations(
VALUE self)
10306 else if (diff < 0) {
10321 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
10323 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10335 wmap_compact(
void *
ptr)
10344 wmap_mark(
void *
ptr)
10347 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10362 wmap_free(
void *
ptr)
10379 wmap_memsize(
const void *
ptr)
10419 if (!is_id_value(objspace,
obj))
return FALSE;
10420 if (!is_live_object(objspace,
obj))
return FALSE;
10428 if (!existing)
return ST_STOP;
10431 if (
ptr[
i] != wmap) {
10458 rb_bug(
"wmap_finalize: objid is not found.");
10464 rids = (
VALUE *)data;
10509 wmap_inspect(
VALUE self)
10530 if (wmap_live_p(objspace,
obj)) {
10538 wmap_each(
VALUE self)
10553 if (wmap_live_p(objspace,
obj)) {
10561 wmap_each_key(
VALUE self)
10576 if (wmap_live_p(objspace,
obj)) {
10584 wmap_each_value(
VALUE self)
10609 wmap_keys(
VALUE self)
10636 wmap_values(
VALUE self)
10660 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
10677 define_final0(orig, w->
final);
10680 define_final0(wmap, w->
final);
10700 if (!wmap_live_p(objspace,
obj))
return Qnil;
10713 wmap_size(
VALUE self)
10720 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
10731 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
10735 getrusage_time(
void)
10737 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
10739 static int try_clock_gettime = 1;
10745 try_clock_gettime = 0;
10752 struct rusage usage;
10754 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10755 time = usage.ru_utime;
10756 return time.tv_sec +
time.tv_usec * 1e-6;
10763 FILETIME creation_time, exit_time, kernel_time, user_time;
10768 if (GetProcessTimes(GetCurrentProcess(),
10769 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
10770 memcpy(&ui, &user_time,
sizeof(FILETIME));
10771 q = ui.QuadPart / 10
L;
10772 t = (
DWORD)(q % 1000000
L) * 1e-6;
10778 t += (
DWORD)q & ~(~0 << 16);
10789 gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason)
10810 rb_bug(
"gc_profile malloc or realloc miss");
10817 #if MALLOC_ALLOCATED_SIZE
10820 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
10823 struct rusage usage;
10824 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10825 record->maxrss = usage.ru_maxrss;
10826 record->minflt = usage.ru_minflt;
10827 record->majflt = usage.ru_majflt;
10840 #if GC_PROFILE_MORE_DETAIL
10841 record->prepare_time = objspace->
profile.prepare_time;
10849 elapsed_time_from(
double time)
10851 double now = getrusage_time();
10870 #define RUBY_DTRACE_GC_HOOK(name) \
10871 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
10876 #if GC_PROFILE_MORE_DETAIL
10887 #if GC_PROFILE_MORE_DETAIL
10890 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
10920 record->
gc_time += sweep_time;
10926 #if GC_PROFILE_MORE_DETAIL
10927 record->gc_sweep_time += sweep_time;
10937 #if GC_PROFILE_MORE_DETAIL
10954 #if GC_PROFILE_MORE_DETAIL
10956 record->heap_live_objects = live;
10957 record->heap_free_objects = total - live;
10975 gc_profile_clear(
VALUE _)
11040 gc_profile_record_get(
VALUE _)
11062 #if GC_PROFILE_MORE_DETAIL
11077 #if RGENGC_PROFILE > 0
11088 #if GC_PROFILE_MORE_DETAIL
11089 #define MAJOR_REASON_MAX 0x10
11092 gc_profile_dump_major_reason(
int flags,
char *buff)
11103 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11104 buff[i++] = #x[0]; \
11105 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11111 #if RGENGC_ESTIMATE_OLDMALLOC
11125 #ifdef MAJOR_REASON_MAX
11126 char reason_str[MAJOR_REASON_MAX];
11134 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11143 #if GC_PROFILE_MORE_DETAIL
11146 "Prepare Time = Previously GC's rest sweep time\n"
11147 "Index Flags Allocate Inc. Allocate Limit"
11151 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11153 " OldgenObj RemNormObj RemShadObj"
11156 " MaxRSS(KB) MinorFLT MajorFLT"
11176 gc_profile_dump_major_reason(record->
flags, reason_str),
11183 record->allocate_increase, record->allocate_limit,
11185 record->allocated_size,
11187 record->heap_use_pages,
11188 record->gc_mark_time*1000,
11189 record->gc_sweep_time*1000,
11190 record->prepare_time*1000,
11192 record->heap_live_objects,
11193 record->heap_free_objects,
11194 record->removing_objects,
11195 record->empty_objects
11198 record->old_objects,
11199 record->remembered_normal_objects,
11200 record->remembered_shady_objects
11204 record->maxrss / 1024,
11227 gc_profile_result(
VALUE _)
11262 gc_profile_total_time(
VALUE self)
11286 gc_profile_enable_get(
VALUE self)
11301 gc_profile_enable(
VALUE _)
11318 gc_profile_disable(
VALUE _)
11331 static const char *
11335 #define TYPE_NAME(t) case (t): return #t;
11371 static const char *
11394 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
11398 # define ARY_SHARED_P(ary) \
11399 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11400 FL_TEST((ary),ELTS_SHARED)!=0)
11401 # define ARY_EMBED_P(ary) \
11402 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11403 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
11406 rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *
iseq)
11411 snprintf(buff, buff_size,
" %s@%s:%d",
11423 #define BUFF_ARGS buff + pos, buff_size - pos
11424 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
11436 #define TF(c) ((c) != 0 ? "true" : "false")
11437 #define C(c, s) ((c) != 0 ? (s) : " ")
11440 const int age = RVALUE_FLAGS_AGE(
RBASIC(
obj)->flags);
11450 obj_type_name(
obj)));
11456 obj_type_name(
obj)));
11462 obj_type_name(
obj)));
11465 if (internal_object_p(
obj)) {
11474 if (!
NIL_P(class_path)) {
11528 if (!
NIL_P(class_path)) {
11536 if (!
NIL_P(class_path)) {
11558 (block = vm_proc_block(
obj)) !=
NULL &&
11560 (
iseq = vm_block_iseq(block)) !=
NULL) {
11572 const char *imemo_name =
"\0";
11574 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
11595 APPENDF((
BUFF_ARGS,
"(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
11628 #if RGENGC_OBJ_INFO
11629 #define OBJ_INFO_BUFFERS_NUM 10
11630 #define OBJ_INFO_BUFFERS_SIZE 0x100
11631 static int obj_info_buffers_index = 0;
11632 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
11634 static const char *
11637 const int index = obj_info_buffers_index++;
11638 char *
const buff = &obj_info_buffers[
index][0];
11640 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
11641 obj_info_buffers_index = 0;
11647 static const char *
11650 return obj_type_name(
obj);
11657 return obj_info(
obj);
11689 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
11702 fprintf(
stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(
obj) ?
"false" :
"true");
11703 fprintf(
stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(
obj) ?
"true" :
"false");
11708 fprintf(
stderr,
"swept?: %s\n", is_swept_object(objspace,
obj) ?
"done" :
"not yet");
11718 fprintf(
stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)
name, (
void *)
obj);
11730 #if GC_DEBUG_STRESS_TO_CLASS
11829 #include "gc.rbinc"
11835 VALUE rb_mObjSpace;
11836 VALUE rb_mProfiler;
11837 VALUE gc_constants;
11901 #if MALLOC_ALLOCATED_SIZE
11906 #if GC_DEBUG_STRESS_TO_CLASS
11915 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
11933 #ifdef ruby_xmalloc
11934 #undef ruby_xmalloc
11936 #ifdef ruby_xmalloc2
11937 #undef ruby_xmalloc2
11939 #ifdef ruby_xcalloc
11940 #undef ruby_xcalloc
11942 #ifdef ruby_xrealloc
11943 #undef ruby_xrealloc
11945 #ifdef ruby_xrealloc2
11946 #undef ruby_xrealloc2
11952 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11953 ruby_malloc_info_file = __FILE__;
11954 ruby_malloc_info_line = __LINE__;
11962 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11963 ruby_malloc_info_file = __FILE__;
11964 ruby_malloc_info_line = __LINE__;
11972 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11973 ruby_malloc_info_file = __FILE__;
11974 ruby_malloc_info_line = __LINE__;
11982 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11983 ruby_malloc_info_file = __FILE__;
11984 ruby_malloc_info_line = __LINE__;
11992 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11993 ruby_malloc_info_file = __FILE__;
11994 ruby_malloc_info_line = __LINE__;