14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
37 #include <sys/types.h>
43 #undef rb_data_object_wrap
45 #ifndef HAVE_MALLOC_USABLE_SIZE
47 # define HAVE_MALLOC_USABLE_SIZE
48 # define malloc_usable_size(a) _msize(a)
49 # elif defined HAVE_MALLOC_SIZE
50 # define HAVE_MALLOC_USABLE_SIZE
51 # define malloc_usable_size(a) malloc_size(a)
54 #ifdef HAVE_MALLOC_USABLE_SIZE
55 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
56 # include RUBY_ALTERNATIVE_MALLOC_HEADER
59 # elif defined(HAVE_MALLOC_NP_H)
60 # include <malloc_np.h>
61 # elif defined(HAVE_MALLOC_MALLOC_H)
62 # include <malloc/malloc.h>
66 #ifdef HAVE_SYS_TIME_H
70 #ifdef HAVE_SYS_RESOURCE_H
71 #include <sys/resource.h>
74 #if defined _WIN32 || defined __CYGWIN__
76 #elif defined(HAVE_POSIX_MEMALIGN)
77 #elif defined(HAVE_MEMALIGN)
81 #define rb_setjmp(env) RUBY_SETJMP(env)
82 #define rb_jmp_buf rb_jmpbuf_t
84 #if defined(_MSC_VER) && defined(_WIN64)
86 #pragma intrinsic(_umul128)
102 #elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
103 p = __builtin_mul_overflow(x, y, &z);
105 #elif defined(DSIZE_T)
112 #elif defined(_MSC_VER) && defined(_WIN64)
114 unsigned __int64 dz = _umul128(x, y, &
dp);
134 #elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
135 p = __builtin_add_overflow(x, y, &z);
137 #elif defined(DSIZE_T)
155 struct optional t = size_mul_overflow(x, y);
163 struct optional t = size_mul_overflow(x, y);
164 struct optional u = size_mul_overflow(z, w);
172 size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
174 struct optional t = size_mul_overflow(x, y);
194 return size_mul_or_raise(x, y,
exc);
198 size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
200 struct optional t = size_mul_add_overflow(x, y, z);
221 return size_mul_add_or_raise(x, y, z,
exc);
225 size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
227 struct optional t = size_mul_add_mul_overflow(x, y, z, w);
246 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
258 #ifndef GC_HEAP_INIT_SLOTS
259 #define GC_HEAP_INIT_SLOTS 10000
261 #ifndef GC_HEAP_FREE_SLOTS
262 #define GC_HEAP_FREE_SLOTS 4096
264 #ifndef GC_HEAP_GROWTH_FACTOR
265 #define GC_HEAP_GROWTH_FACTOR 1.8
267 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
268 #define GC_HEAP_GROWTH_MAX_SLOTS 0
270 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
271 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
274 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
275 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
277 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
278 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
280 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
281 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
284 #ifndef GC_MALLOC_LIMIT_MIN
285 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
287 #ifndef GC_MALLOC_LIMIT_MAX
288 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
290 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
291 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
294 #ifndef GC_OLDMALLOC_LIMIT_MIN
295 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
297 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
298 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
300 #ifndef GC_OLDMALLOC_LIMIT_MAX
301 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
304 #ifndef PRINT_MEASURE_LINE
305 #define PRINT_MEASURE_LINE 0
307 #ifndef PRINT_ENTER_EXIT_TICK
308 #define PRINT_ENTER_EXIT_TICK 0
310 #ifndef PRINT_ROOT_TICKS
311 #define PRINT_ROOT_TICKS 0
314 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
378 #define RGENGC_DEBUG -1
380 #define RGENGC_DEBUG 0
383 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
384 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
386 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
398 #ifndef RGENGC_CHECK_MODE
399 #define RGENGC_CHECK_MODE 0
403 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
411 #ifndef RGENGC_OLD_NEWOBJ_CHECK
412 #define RGENGC_OLD_NEWOBJ_CHECK 0
420 #ifndef RGENGC_PROFILE
421 #define RGENGC_PROFILE 0
430 #ifndef RGENGC_ESTIMATE_OLDMALLOC
431 #define RGENGC_ESTIMATE_OLDMALLOC 1
437 #ifndef RGENGC_FORCE_MAJOR_GC
438 #define RGENGC_FORCE_MAJOR_GC 0
446 #define RGENGC_DEBUG 0
447 #ifdef RGENGC_CHECK_MODE
448 #undef RGENGC_CHECK_MODE
450 #define RGENGC_CHECK_MODE 0
451 #define RGENGC_PROFILE 0
452 #define RGENGC_ESTIMATE_OLDMALLOC 0
453 #define RGENGC_FORCE_MAJOR_GC 0
457 #ifndef GC_PROFILE_MORE_DETAIL
458 #define GC_PROFILE_MORE_DETAIL 0
460 #ifndef GC_PROFILE_DETAIL_MEMORY
461 #define GC_PROFILE_DETAIL_MEMORY 0
463 #ifndef GC_ENABLE_INCREMENTAL_MARK
464 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
466 #ifndef GC_ENABLE_LAZY_SWEEP
467 #define GC_ENABLE_LAZY_SWEEP 1
469 #ifndef CALC_EXACT_MALLOC_SIZE
470 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
472 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
473 #ifndef MALLOC_ALLOCATED_SIZE
474 #define MALLOC_ALLOCATED_SIZE 0
477 #define MALLOC_ALLOCATED_SIZE 0
479 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
480 #define MALLOC_ALLOCATED_SIZE_CHECK 0
483 #ifndef GC_DEBUG_STRESS_TO_CLASS
484 #define GC_DEBUG_STRESS_TO_CLASS 0
487 #ifndef RGENGC_OBJ_INFO
488 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
498 #if RGENGC_ESTIMATE_OLDMALLOC
531 #if GC_PROFILE_MORE_DETAIL
533 double gc_sweep_time;
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
539 size_t allocate_increase;
540 size_t allocate_limit;
543 size_t removing_objects;
544 size_t empty_objects;
545 #if GC_PROFILE_DETAIL_MEMORY
551 #if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
555 #if RGENGC_PROFILE > 0
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
562 #if defined(_MSC_VER) || defined(__CYGWIN__)
563 #pragma pack(push, 1)
614 #if defined(_MSC_VER) || defined(__CYGWIN__)
623 #define popcount_bits rb_popcount_intptr
640 #define STACK_CHUNK_SIZE 500
663 #if GC_ENABLE_INCREMENTAL_MARK
680 #if MALLOC_ALLOCATED_SIZE
681 size_t allocated_size;
698 #if GC_ENABLE_INCREMENTAL_MARK
745 #if GC_PROFILE_MORE_DETAIL
754 #if RGENGC_PROFILE > 0
755 size_t total_generated_normal_object_count;
756 size_t total_generated_shady_object_count;
757 size_t total_shade_operation_count;
758 size_t total_promoted_count;
759 size_t total_remembered_normal_object_count;
760 size_t total_remembered_shady_object_count;
762 #if RGENGC_PROFILE >= 2
763 size_t generated_normal_object_count_types[
RUBY_T_MASK];
764 size_t generated_shady_object_count_types[
RUBY_T_MASK];
767 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
768 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
798 #if RGENGC_ESTIMATE_OLDMALLOC
803 #if RGENGC_CHECK_MODE >= 2
814 #if GC_ENABLE_INCREMENTAL_MARK
825 #if GC_DEBUG_STRESS_TO_CLASS
832 #define HEAP_PAGE_ALIGN_LOG 14
833 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
876 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
877 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
878 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
880 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
881 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
882 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
883 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
886 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
887 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
888 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
891 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
892 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
894 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
895 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
896 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
900 #define rb_objspace (*rb_objspace_of(GET_VM()))
901 #define rb_objspace_of(vm) ((vm)->objspace)
903 #define ruby_initial_gc_stress gc_params.gc_stress
907 #define malloc_limit objspace->malloc_params.limit
908 #define malloc_increase objspace->malloc_params.increase
909 #define malloc_allocated_size objspace->malloc_params.allocated_size
910 #define heap_pages_sorted objspace->heap_pages.sorted
911 #define heap_allocated_pages objspace->heap_pages.allocated_pages
912 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
913 #define heap_pages_lomem objspace->heap_pages.range[0]
914 #define heap_pages_himem objspace->heap_pages.range[1]
915 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages
916 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
917 #define heap_pages_final_slots objspace->heap_pages.final_slots
918 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
919 #define heap_eden (&objspace->eden_heap)
920 #define heap_tomb (&objspace->tomb_heap)
921 #define dont_gc objspace->flags.dont_gc
922 #define during_gc objspace->flags.during_gc
923 #define finalizing objspace->atomic_flags.finalizing
924 #define finalizer_table objspace->finalizer_table
925 #define global_list objspace->global_list
926 #define ruby_gc_stressful objspace->flags.gc_stressful
927 #define ruby_gc_stress_mode objspace->gc_stress_mode
928 #if GC_DEBUG_STRESS_TO_CLASS
929 #define stress_to_class objspace->stress_to_class
931 #define stress_to_class 0
935 gc_mode_verify(
enum gc_mode mode)
937 #if RGENGC_CHECK_MODE > 0
944 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
950 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
951 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
953 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
954 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
956 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
958 #define is_full_marking(objspace) TRUE
960 #if GC_ENABLE_INCREMENTAL_MARK
961 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
963 #define is_incremental_marking(objspace) FALSE
965 #if GC_ENABLE_INCREMENTAL_MARK
966 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
968 #define will_be_incremental_marking(objspace) FALSE
970 #define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
971 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
973 #if SIZEOF_LONG == SIZEOF_VOIDP
974 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
975 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
976 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
977 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
978 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
979 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
981 # error not supported
984 #define RANY(o) ((RVALUE*)(o))
993 #define RZOMBIE(o) ((struct RZombie *)(o))
995 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
997 #if RUBY_MARK_FREE_DEBUG
998 int ruby_gc_debug_indent = 0;
1013 NORETURN(
static void negative_size_allocation_error(
const char *));
1023 static inline void gc_enter(
rb_objspace_t *objspace,
const char *event);
1024 static inline void gc_exit(
rb_objspace_t *objspace,
const char *event);
1026 static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1027 static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1030 static void gc_marks_step(
rb_objspace_t *objspace,
int slots);
1057 static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1059 static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1060 static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1067 static double getrusage_time(
void);
1068 static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason);
1071 static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1072 static inline void gc_prof_mark_timer_stop(
rb_objspace_t *);
1073 static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1074 static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1075 static inline void gc_prof_set_malloc_info(
rb_objspace_t *);
1078 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1079 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1080 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1084 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1086 #define gc_prof_record(objspace) (objspace)->profile.current_record
1087 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1089 #ifdef HAVE_VA_ARGS_MACRO
1090 # define gc_report(level, objspace, ...) \
1091 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1093 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1096 static const char *obj_info(
VALUE obj);
1098 #define PUSH_MARK_FUNC_DATA(v) do { \
1099 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
1100 objspace->mark_func_data = (v);
1102 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
1122 #if defined(__GNUC__) && defined(__i386__)
1123 typedef unsigned long long tick_t;
1124 #define PRItick "llu"
1125 static inline tick_t
1128 unsigned long long int x;
1129 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1133 #elif defined(__GNUC__) && defined(__x86_64__)
1134 typedef unsigned long long tick_t;
1135 #define PRItick "llu"
1137 static __inline__ tick_t
1140 unsigned long hi,
lo;
1141 __asm__ __volatile__ (
"rdtsc" :
"=a"(
lo),
"=d"(
hi));
1142 return ((
unsigned long long)
lo)|( ((
unsigned long long)
hi)<<32);
1145 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1146 typedef unsigned long long tick_t;
1147 #define PRItick "llu"
1149 static __inline__ tick_t
1152 unsigned long long val = __builtin_ppc_get_timebase();
1156 #elif defined(_WIN32) && defined(_MSC_VER)
1158 typedef unsigned __int64 tick_t;
1159 #define PRItick "llu"
1161 static inline tick_t
1169 #define PRItick "llu"
1171 static inline tick_t
1178 #elif TICK_TYPE == 2
1179 typedef double tick_t;
1180 #define PRItick "4.9f"
1182 static inline tick_t
1185 return getrusage_time();
1188 #error "choose tick type"
1191 #define MEASURE_LINE(expr) do { \
1192 volatile tick_t start_time = tick(); \
1193 volatile tick_t end_time; \
1195 end_time = tick(); \
1196 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1200 #define MEASURE_LINE(expr) expr
1203 #define FL_CHECK2(name, x, pred) \
1204 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1205 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1206 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1207 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1208 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1210 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1211 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1212 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1215 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1216 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1217 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1219 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1220 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1221 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1223 #define RVALUE_OLD_AGE 3
1224 #define RVALUE_AGE_SHIFT 5
1233 RVALUE_FLAGS_AGE(
VALUE flags)
1241 check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1247 fprintf(
stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)
obj);
1250 else if (!is_pointer_to_heap(objspace, (
void *)
obj)) {
1256 fprintf(
stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1257 (
void *)
obj, (
void *)page);
1262 fprintf(
stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)
obj);
1275 fprintf(
stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(
obj));
1279 fprintf(
stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(
obj));
1283 fprintf(
stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(
obj));
1293 if (age > 0 && wb_unprotected_bit) {
1294 fprintf(
stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(
obj), age);
1298 if (!
is_marking(objspace) && uncollectible_bit && !mark_bit) {
1299 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(
obj));
1304 if (uncollectible_bit && age !=
RVALUE_OLD_AGE && !wb_unprotected_bit) {
1305 fprintf(
stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1306 obj_info(
obj), age);
1310 fprintf(
stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1311 obj_info(
obj), age);
1325 fprintf(
stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(
obj));
1331 if (
err > 0 && terminate) {
1332 rb_bug(
"check_rvalue_consistency_force: there is %d errors.",
err);
1338 #if RGENGC_CHECK_MODE == 0
1340 check_rvalue_consistency(
const VALUE obj)
1346 check_rvalue_consistency(
const VALUE obj)
1348 check_rvalue_consistency_force(
obj,
TRUE);
1360 void *poisoned = asan_poisoned_object_p(
obj);
1361 asan_unpoison_object(
obj,
false);
1367 asan_poison_object(
obj);
1376 check_rvalue_consistency(
obj);
1383 check_rvalue_consistency(
obj);
1391 check_rvalue_consistency(
obj);
1398 check_rvalue_consistency(
obj);
1405 check_rvalue_consistency(
obj);
1412 check_rvalue_consistency(
obj);
1426 check_rvalue_consistency(
obj);
1427 return RVALUE_OLD_P_RAW(
obj);
1430 #if RGENGC_CHECK_MODE || GC_DEBUG
1434 check_rvalue_consistency(
obj);
1446 #if RGENGC_PROFILE >= 2
1447 objspace->
profile.total_promoted_count++;
1472 int age = RVALUE_FLAGS_AGE(
flags);
1475 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(
obj));
1482 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1484 check_rvalue_consistency(
obj);
1491 check_rvalue_consistency(
obj);
1495 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace,
obj);
1497 check_rvalue_consistency(
obj);
1504 check_rvalue_consistency(
obj);
1509 check_rvalue_consistency(
obj);
1522 check_rvalue_consistency(
obj);
1529 RVALUE_DEMOTE_RAW(objspace,
obj);
1531 if (RVALUE_MARKED(
obj)) {
1535 check_rvalue_consistency(
obj);
1547 check_rvalue_consistency(
obj);
1550 RVALUE_AGE_RESET_RAW(
obj);
1551 check_rvalue_consistency(
obj);
1557 return RVALUE_MARKED(
obj) && !RVALUE_MARKING(
obj);
1564 return RVALUE_MARKED(
obj) && RVALUE_MARKING(
obj);
1571 return RVALUE_MARKED(
obj) ==
FALSE;
1580 static inline void *
1605 rb_bug(
"lazy sweeping underway when freeing object space");
1640 heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1645 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %d, size: %d\n", (
int)next_length, (
int)
size);
1675 heap_pages_expand_sorted_to(objspace, next_length);
1683 heap_allocatable_pages_set(
rb_objspace_t *objspace,
size_t s)
1686 heap_pages_expand_sorted(objspace);
1694 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1706 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
1709 asan_poison_object(
obj);
1711 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)
obj);
1717 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1726 #if GC_ENABLE_INCREMENTAL_MARK
1730 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1755 static void rb_aligned_free(
void *
ptr);
1776 heap_unlink_page(objspace,
heap_tomb, page);
1777 heap_page_free(objspace, page);
1801 if (page_body == 0) {
1806 page = calloc1(
sizeof(
struct heap_page));
1808 rb_aligned_free(page_body);
1819 end = start + limit;
1827 mid = (
lo +
hi) / 2;
1866 for (p =
start; p != end; p++) {
1867 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
1868 heap_page_add_freeobj(objspace, page, (
VALUE)p);
1882 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
1884 heap_unlink_page(objspace,
heap_tomb, page);
1897 const char *method =
"recycle";
1901 page = heap_page_resurrect(objspace);
1904 page = heap_page_allocate(objspace);
1905 method =
"allocate";
1907 if (0)
fprintf(
stderr,
"heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1924 struct heap_page *page = heap_page_create(objspace);
1925 heap_add_page(objspace, heap, page);
1926 heap_add_freepage(heap, page);
1934 heap_allocatable_pages_set(objspace,
add);
1936 for (
i = 0;
i <
add;
i++) {
1937 heap_assign_page(objspace, heap);
1950 if (goal_ratio == 0.0) {
1960 if (
f < 1.0)
f = 1.1;
1962 next_used = (
size_t)(
f * used);
1967 " G(%1.2f), f(%1.2f),"
1970 goal_ratio,
f, used, next_used);
1976 if (next_used > max_used) next_used = max_used;
1979 return next_used - used;
1983 heap_set_increment(
rb_objspace_t *objspace,
size_t additional_pages)
1986 size_t next_used_limit = used + additional_pages;
1990 heap_allocatable_pages_set(objspace, next_used_limit - used);
1999 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
2005 heap_assign_page(objspace, heap);
2017 gc_sweep_continue(objspace, heap);
2020 gc_marks_continue(objspace, heap);
2037 heap_prepare(objspace, heap);
2044 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
2049 asan_unpoison_object((
VALUE)p,
true);
2060 asan_unpoison_object((
VALUE)p,
true);
2071 asan_unpoison_object((
VALUE)p,
true);
2076 p = heap_get_freeobj_from_next_freepage(objspace, heap);
2093 if (
pc && VM_FRAME_RUBYFRAME_P(ec->
cfp)) {
2101 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2102 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2104 #define gc_event_hook(objspace, event, data) do { \
2105 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2106 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2113 #if !__has_feature(memory_sanitizer)
2134 #if RGENGC_CHECK_MODE
2141 if (RVALUE_AGE(
obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(
obj), RVALUE_AGE(
obj));
2144 if (RVALUE_AGE(
obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(
obj), RVALUE_AGE(
obj));
2146 if (rgengc_remembered(objspace, (
VALUE)
obj))
rb_bug(
"newobj: %s is remembered.", obj_info(
obj));
2157 objspace->
profile.total_generated_normal_object_count++;
2158 #if RGENGC_PROFILE >= 2
2163 objspace->
profile.total_generated_shady_object_count++;
2164 #if RGENGC_PROFILE >= 2
2179 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2186 if (--newobj_cnt == 0) {
2189 gc_mark_set(objspace,
obj);
2190 RVALUE_AGE_SET_OLD(objspace,
obj);
2197 check_rvalue_consistency(
obj);
2210 rb_bug(
"object allocation during garbage collection phase");
2250 #if GC_DEBUG_STRESS_TO_CLASS
2253 for (
i = 0;
i <
cnt; ++
i) {
2267 return wb_protected ?
2301 #define UNEXPECTED_NODE(func) \
2302 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2303 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2322 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *
buf,
size_t cnt)
2384 #undef rb_data_object_alloc
2405 #undef rb_data_typed_object_alloc
2424 if (
ptr &&
type->function.dsize) {
2425 return type->function.dsize(
ptr);
2448 register size_t hi,
lo, mid;
2462 mid = (
lo +
hi) / 2;
2464 if (page->
start <= p) {
2485 free_const_entry_i(
VALUE value,
void *data)
2530 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(
obj));
2546 rb_bug(
"obj_free() called for broken object");
2556 obj_free_object_id(objspace,
obj);
2562 #if RGENGC_CHECK_MODE
2563 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2564 CHECK(RVALUE_WB_UNPROTECTED);
2565 CHECK(RVALUE_MARKED);
2566 CHECK(RVALUE_MARKING);
2567 CHECK(RVALUE_UNCOLLECTIBLE);
2624 #if USE_DEBUG_COUNTER
2682 if (
RANY(
obj)->as.regexp.ptr) {
2689 int free_immediately =
FALSE;
2690 void (*dfree)(
void *);
2695 dfree =
RANY(
obj)->as.typeddata.type->function.dfree;
2696 if (0 && free_immediately == 0) {
2702 dfree =
RANY(
obj)->as.data.dfree;
2710 else if (free_immediately) {
2715 make_zombie(objspace,
obj, dfree, data);
2726 if (
RANY(
obj)->as.match.rmatch) {
2728 #if USE_DEBUG_COUNTER
2748 if (
RANY(
obj)->as.file.fptr) {
2749 make_io_zombie(objspace,
obj);
2874 make_zombie(objspace,
obj, 0, 0);
2883 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
2884 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
2905 static const struct st_hash_type object_id_hash_type = {
2919 #if RGENGC_ESTIMATE_OLDMALLOC
2941 static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
2964 pstart = page->
start;
2967 if ((*callback)(pstart, pend,
sizeof(
RVALUE), data)) {
2974 objspace_each_objects_protected(
VALUE arg)
2982 incremental_enable(
VALUE _)
3040 if (prev_dont_incremental) {
3065 asan_unpoison_object(
obj,
false);
3066 bool used_p = p->
as.
basic.flags;
3080 if (!p->
as.
basic.klass)
break;
3086 if (!p->
as.
basic.klass)
break;
3090 if (
ptr || ! used_p) {
3091 asan_poison_object(
obj);
3099 return internal_object_p(
obj);
3103 os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
3108 for (; p != pend; p++) {
3110 if (!internal_object_p(
v)) {
3175 return os_obj_of(
of);
3204 should_be_callable(
VALUE block)
3239 should_be_finalizable(
obj);
3244 should_be_callable(block);
3247 return define_final0(
obj, block);
3263 table = (
VALUE)data;
3270 for (
i = 0;
i <
len;
i++) {
3291 should_be_finalizable(
obj);
3292 should_be_callable(block);
3293 return define_final0(
obj, block);
3305 table = (
VALUE)data;
3330 #define RESTORE_FINALIZER() (\
3331 ec->cfp = saved.cfp, \
3332 rb_set_errinfo(saved.errinfo))
3336 saved.cfp = ec->
cfp;
3344 for (
i = saved.finished;
3346 saved.finished = ++
i) {
3350 #undef RESTORE_FINALIZER
3364 run_finalizer(objspace, zombie, (
VALUE)table);
3374 asan_unpoison_object(zombie,
false);
3375 next_zombie =
RZOMBIE(zombie)->next;
3378 run_final(objspace, zombie);
3382 obj_free_object_id(objspace, zombie);
3385 RZOMBIE(zombie)->basic.flags = 0;
3389 heap_page_add_freeobj(objspace,
GET_HEAP_PAGE(zombie), zombie);
3393 zombie = next_zombie;
3403 finalize_list(objspace, zombie);
3408 gc_finalize_deferred(
void *dmy)
3412 finalize_deferred(objspace);
3420 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
3448 #if RGENGC_CHECK_MODE >= 2
3449 gc_verify_internal_consistency(objspace);
3456 finalize_deferred(objspace);
3470 run_finalizer(objspace, curr->
obj, curr->
table);
3481 gc_enter(objspace,
"rb_objspace_call_finalizer");
3487 void *poisoned = asan_poisoned_object_p((
VALUE)p);
3488 asan_unpoison_object((
VALUE)p,
false);
3495 p->as.free.flags = 0;
3497 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
3502 else if (
RANY(p)->as.data.dfree) {
3503 make_zombie(objspace, (
VALUE)p,
RANY(p)->as.data.dfree,
RANY(p)->as.data.data);
3507 if (
RANY(p)->as.file.fptr) {
3508 make_io_zombie(objspace, (
VALUE)p);
3514 asan_poison_object((
VALUE)p);
3520 gc_exit(objspace,
"rb_objspace_call_finalizer");
3535 if (!is_pointer_to_heap(objspace, (
void *)
ptr))
return FALSE;
3564 is_swept_object(objspace,
ptr) ||
3583 if (!is_garbage_object(objspace,
ptr)) {
3595 check_rvalue_consistency(
obj);
3603 return is_markable_object(objspace,
obj) && is_live_object(objspace,
obj);
3610 return is_garbage_object(objspace,
obj);
3641 #if SIZEOF_LONG == SIZEOF_VOIDP
3642 #define NUM2PTR(x) NUM2ULONG(x)
3643 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3644 #define NUM2PTR(x) NUM2ULL(x)
3660 if ((
ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3669 if ((orig = id2ref_obj_tbl(objspace, objid)) !=
Qundef &&
3670 is_live_object(objspace, orig)) {
3684 return id2ref(objid);
3694 #if SIZEOF_LONG == SIZEOF_VOIDP
3704 return get_heap_object_id(
obj);
3741 return rb_find_object_id(
obj, nonspecial_obj_id_);
3803 return rb_find_object_id(
obj, cached_object_id);
3809 obj_memsize_of(
VALUE obj,
int use_all_types)
3928 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
3938 return obj_memsize_of(
obj,
TRUE);
3951 type_sym(
size_t type)
3954 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4047 for (;p < pend; p++) {
4048 void *poisoned = asan_poisoned_object_p((
VALUE)p);
4049 asan_unpoison_object((
VALUE)p,
false);
4058 asan_poison_object((
VALUE)p);
4107 gc_setup_mark_bits(
struct heap_page *page)
4122 int empty_slots = 0, freed_slots = 0,
final_slots = 0;
4123 RVALUE *p, *pend,*offset;
4126 gc_report(2, objspace,
"page_sweep: start.\n");
4143 asan_unpoison_object((
VALUE)p,
false);
4147 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
4148 #if USE_RGENGC && RGENGC_CHECK_MODE
4150 if (RVALUE_OLD_P((
VALUE)p))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
4151 if (rgengc_remembered_sweep(objspace, (
VALUE)p))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
4154 if (obj_free(objspace, (
VALUE)p)) {
4159 heap_page_add_freeobj(objspace, sweep_page, (
VALUE)p);
4160 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info((
VALUE)p));
4162 asan_poison_object((
VALUE)p);
4182 gc_setup_mark_bits(sweep_page);
4184 #if GC_PROFILE_MORE_DETAIL
4187 record->removing_objects +=
final_slots + freed_slots;
4188 record->empty_objects += empty_slots;
4191 if (0)
fprintf(
stderr,
"gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4196 sweep_page->
free_slots = freed_slots + empty_slots;
4204 gc_finalize_deferred_register(objspace);
4208 gc_report(2, objspace,
"page_sweep: end.\n");
4210 return freed_slots + empty_slots;
4219 heap_set_increment(objspace, 1);
4220 if (!heap_increment(objspace, heap)) {
4227 gc_mode_name(
enum gc_mode mode)
4233 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
4240 #if RGENGC_CHECK_MODE
4242 switch (prev_mode) {
4248 if (0)
fprintf(
stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(
gc_mode(objspace)), gc_mode_name(mode));
4257 #if GC_ENABLE_INCREMENTAL_MARK
4263 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
4276 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
4283 gc_sweep_start_heap(objspace,
heap_eden);
4289 gc_report(1, objspace,
"gc_sweep_finish\n");
4291 gc_prof_set_heap_info(objspace);
4292 heap_pages_free_unused_pages(objspace);
4295 if (heap_allocatable_pages < heap_tomb->total_pages) {
4296 heap_allocatable_pages_set(objspace,
heap_tomb->total_pages);
4302 #if RGENGC_CHECK_MODE >= 2
4303 gc_verify_internal_consistency(objspace);
4311 int unlink_limit = 3;
4312 #if GC_ENABLE_INCREMENTAL_MARK
4315 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
4317 gc_report(2, objspace,
"gc_sweep_step\n");
4322 #if GC_ENABLE_LAZY_SWEEP
4323 gc_prof_sweep_timer_start(objspace);
4327 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
4336 heap_unlink_page(objspace, heap, sweep_page);
4337 heap_add_page(objspace,
heap_tomb, sweep_page);
4340 #if GC_ENABLE_INCREMENTAL_MARK
4342 if (heap_add_poolpage(objspace, heap, sweep_page)) {
4347 heap_add_freepage(heap, sweep_page);
4351 heap_add_freepage(heap, sweep_page);
4361 gc_sweep_finish(objspace);
4364 #if GC_ENABLE_LAZY_SWEEP
4365 gc_prof_sweep_timer_stop(objspace);
4377 gc_sweep_step(objspace, heap);
4387 gc_enter(objspace,
"sweep_continue");
4390 gc_report(3, objspace,
"gc_sweep_continue: success heap_increment().\n");
4393 gc_sweep_step(objspace, heap);
4394 gc_exit(objspace,
"sweep_continue");
4402 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
4404 if (immediate_sweep) {
4405 #if !GC_ENABLE_LAZY_SWEEP
4406 gc_prof_sweep_timer_start(objspace);
4408 gc_sweep_start(objspace);
4409 gc_sweep_rest(objspace);
4410 #if !GC_ENABLE_LAZY_SWEEP
4411 gc_prof_sweep_timer_stop(objspace);
4416 gc_sweep_start(objspace);
4424 gc_heap_prepare_minimum_pages(objspace,
heap_eden);
4430 stack_chunk_alloc(
void)
4455 chunk = chunk->
next;
4464 stack->
cache = chunk;
4474 chunk = stack->
cache;
4490 next = stack->
cache;
4497 next = stack_chunk_alloc();
4500 stack->
chunk = next;
4511 add_stack_chunk_cache(stack, stack->
chunk);
4512 stack->
chunk = prev;
4522 while (chunk !=
NULL) {
4533 push_mark_stack_chunk(stack);
4541 if (is_mark_stack_empty(stack)) {
4544 if (stack->
index == 1) {
4546 pop_mark_stack_chunk(stack);
4554 #if GC_ENABLE_INCREMENTAL_MARK
4559 for (
i=0;
i<limit;
i++) {
4572 int limit = stack->
index;
4575 if (invalidate_mark_stack_chunk(chunk, limit,
obj))
return;
4576 chunk = chunk->
next;
4577 limit = stack->
limit;
4579 rb_bug(
"invalid_mark_stack: unreachable");
4592 for (
i=0;
i < 4;
i++) {
4593 add_stack_chunk_cache(stack, stack_chunk_alloc());
4600 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4602 #define STACK_START (ec->machine.stack_start)
4603 #define STACK_END (ec->machine.stack_end)
4604 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4606 #ifdef __EMSCRIPTEN__
4607 #undef STACK_GROW_DIRECTION
4608 #define STACK_GROW_DIRECTION 1
4611 #if STACK_GROW_DIRECTION < 0
4612 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4613 #elif STACK_GROW_DIRECTION > 0
4614 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4616 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4617 : (size_t)(STACK_END - STACK_START + 1))
4619 #if !STACK_GROW_DIRECTION
4641 #define PREVENT_STACK_OVERFLOW 1
4642 #ifndef PREVENT_STACK_OVERFLOW
4643 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4644 # define PREVENT_STACK_OVERFLOW 1
4646 # define PREVENT_STACK_OVERFLOW 0
4649 #if PREVENT_STACK_OVERFLOW
4658 return length > maximum_length;
4661 #define stack_check(ec, water_mark) FALSE
4664 #define STACKFRAME_FOR_CALL_CFUNC 2048
4685 gc_mark_maybe(objspace,
v);
4695 if (end <=
start)
return;
4697 mark_locations_array(objspace,
start,
n);
4711 for (
i=0;
i<
n;
i++) {
4712 gc_mark(objspace, values[
i]);
4722 for (
i=0;
i<
n;
i++) {
4723 gc_mark_and_pin(objspace, values[
i]);
4732 for (
i=0;
i<
n;
i++) {
4735 gc_mark_and_pin(objspace, values[
i]);
4744 gc_mark_and_pin_stack_values(objspace,
n, values);
4751 gc_mark(objspace, (
VALUE)value);
4759 gc_mark_and_pin(objspace, (
VALUE)value);
4781 gc_mark_and_pin(objspace, (
VALUE)
key);
4811 gc_mark(objspace, (
VALUE)value);
4820 gc_mark_and_pin(objspace, (
VALUE)
key);
4821 gc_mark_and_pin(objspace, (
VALUE)value);
4830 gc_mark_and_pin(objspace, (
VALUE)
key);
4831 gc_mark(objspace, (
VALUE)value);
4853 gc_mark(objspace,
RHASH(hash)->ifnone);
4878 switch (def->
type) {
4910 mark_method_entry_i(
VALUE me,
void *data)
4914 gc_mark(objspace,
me);
4927 mark_const_entry_i(
VALUE value,
void *data)
4932 gc_mark(objspace, ce->
value);
4933 gc_mark(objspace, ce->
file);
4944 #if STACK_GROW_DIRECTION < 0
4945 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4946 #elif STACK_GROW_DIRECTION > 0
4947 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4949 #define GET_STACK_BOUNDS(start, end, appendix) \
4950 ((STACK_END < STACK_START) ? \
4951 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4955 const VALUE *stack_start,
const VALUE *stack_end);
4963 } save_regs_gc_mark;
4964 VALUE *stack_start, *stack_end;
4967 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
4977 mark_locations_array(objspace, save_regs_gc_mark.v,
numberof(save_regs_gc_mark.v));
4979 mark_stack_locations(objspace, ec, stack_start, stack_end);
4986 VALUE *stack_start, *stack_end;
4989 mark_stack_locations(objspace, ec, stack_start, stack_end);
4994 const VALUE *stack_start,
const VALUE *stack_end)
4997 gc_mark_locations(objspace, stack_start, stack_end);
4999 #if defined(__mc68000__)
5000 gc_mark_locations(objspace,
5001 (
VALUE*)((
char*)stack_start + 2),
5002 (
VALUE*)((
char*)stack_end - 2));
5023 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
5025 asan_unpoison_object(
obj,
false);
5034 gc_mark_and_pin(objspace,
obj);
5040 asan_poison_object(
obj);
5054 if (RVALUE_MARKED(
obj))
return 0;
5071 #if RGENGC_PROFILE > 0
5072 objspace->
profile.total_remembered_shady_object_count++;
5073 #if RGENGC_PROFILE >= 2
5092 if (RVALUE_WB_UNPROTECTED(
obj)) {
5093 if (gc_remember_unprotected(objspace,
obj)) {
5094 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5098 if (!RVALUE_OLD_P(
obj)) {
5099 if (RVALUE_MARKED(
obj)) {
5101 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5102 RVALUE_AGE_SET_OLD(objspace,
obj);
5104 if (!RVALUE_MARKING(
obj)) {
5105 gc_grey(objspace,
obj);
5109 rgengc_remember(objspace,
obj);
5113 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(
obj));
5114 RVALUE_AGE_SET_CANDIDATE(objspace,
obj);
5127 #if RGENGC_CHECK_MODE
5128 if (RVALUE_MARKED(
obj) ==
FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(
obj));
5129 if (RVALUE_MARKING(
obj) ==
TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(
obj));
5132 #if GC_ENABLE_INCREMENTAL_MARK
5148 check_rvalue_consistency(
obj);
5151 if (!RVALUE_OLD_P(
obj)) {
5152 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(
obj));
5153 RVALUE_AGE_INC(objspace,
obj);
5157 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page,
obj);
5160 check_rvalue_consistency(
obj);
5172 rgengc_check_relation(objspace,
obj);
5173 if (!gc_mark_set(objspace,
obj))
return;
5175 gc_aging(objspace,
obj);
5176 gc_grey(objspace,
obj);
5195 if (!is_markable_object(objspace,
obj))
return;
5196 gc_pin(objspace,
obj);
5197 gc_mark_ptr(objspace,
obj);
5203 if (!is_markable_object(objspace,
obj))
return;
5204 gc_mark_ptr(objspace,
obj);
5233 if (RVALUE_OLD_P(
obj)) {
5250 gc_mark_values(objspace, (
long)
env->env_size,
env->env);
5253 gc_mark(objspace, (
VALUE)
env->iseq);
5257 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.klass);
5258 gc_mark(objspace, (
VALUE)
RANY(
obj)->as.imemo.cref.next);
5259 gc_mark(objspace,
RANY(
obj)->as.imemo.cref.refinements);
5262 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.cref_or_me);
5263 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.lastline);
5264 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.backref);
5265 gc_mark(objspace,
RANY(
obj)->as.imemo.svar.others);
5268 gc_mark(objspace,
RANY(
obj)->as.imemo.throw_data.throw_obj);
5271 gc_mark_maybe(objspace, (
VALUE)
RANY(
obj)->as.imemo.ifunc.data);
5274 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v1);
5275 gc_mark(objspace,
RANY(
obj)->as.imemo.memo.v2);
5276 gc_mark_maybe(objspace,
RANY(
obj)->as.imemo.memo.u3.value);
5279 mark_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
5298 #if VM_CHECK_MODE > 0
5309 gc_mark_set_parent(objspace,
obj);
5325 rb_bug(
"rb_gc_mark() called for broken object");
5333 gc_mark_imemo(objspace,
obj);
5337 gc_mark(objspace, any->
as.
basic.klass);
5365 gc_mark(objspace, root);
5370 for (
i=0;
i <
len;
i++) {
5371 gc_mark(objspace,
ptr[
i]);
5384 mark_hash(objspace,
obj);
5400 if (mark_func) (*mark_func)(
ptr);
5411 for (
i = 0;
i <
len;
i++) {
5412 gc_mark(objspace,
ptr[
i]);
5462 gc_mark(objspace,
ptr[
i]);
5479 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
5481 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
5494 #if GC_ENABLE_INCREMENTAL_MARK
5495 size_t marked_slots_at_the_beginning = objspace->
marked_slots;
5496 size_t popped_count = 0;
5499 while (pop_mark_stack(mstack, &
obj)) {
5503 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(
obj));
5505 gc_mark_children(objspace,
obj);
5507 #if GC_ENABLE_INCREMENTAL_MARK
5510 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
5515 if (popped_count + (objspace->
marked_slots - marked_slots_at_the_beginning) >
count) {
5527 if (is_mark_stack_empty(mstack)) {
5528 shrink_stack_chunk_cache(mstack);
5539 return gc_mark_stacked_objects(objspace,
TRUE,
count);
5545 return gc_mark_stacked_objects(objspace,
FALSE, 0);
5548 #if PRINT_ROOT_TICKS
5549 #define MAX_TICKS 0x100
5550 static tick_t mark_ticks[MAX_TICKS];
5551 static const char *mark_ticks_categories[MAX_TICKS];
5554 show_mark_ticks(
void)
5558 for (
i=0;
i<MAX_TICKS;
i++) {
5559 const char *category = mark_ticks_categories[
i];
5561 fprintf(
stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[
i]);
5572 gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
5576 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5578 #if PRINT_ROOT_TICKS
5579 tick_t start_tick = tick();
5581 const char *prev_category = 0;
5583 if (mark_ticks_categories[0] == 0) {
5588 if (categoryp) *categoryp =
"xxx";
5594 #if PRINT_ROOT_TICKS
5595 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
5596 if (prev_category) { \
5597 tick_t t = tick(); \
5598 mark_ticks[tick_count] = t - start_tick; \
5599 mark_ticks_categories[tick_count] = prev_category; \
5602 prev_category = category; \
5603 start_tick = tick(); \
5606 #define MARK_CHECKPOINT_PRINT_TICK(category)
5609 #define MARK_CHECKPOINT(category) do { \
5610 if (categoryp) *categoryp = category; \
5611 MARK_CHECKPOINT_PRINT_TICK(category); \
5617 if (vm->
self) gc_mark(objspace, vm->
self);
5623 mark_current_machine_context(objspace, ec);
5628 gc_mark_maybe(objspace, *
list->varptr);
5644 #undef MARK_CHECKPOINT
5647 #if RGENGC_CHECK_MODE >= 4
5649 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5650 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5651 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5659 static struct reflist *
5662 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
5665 refs->list[0] =
obj;
5671 reflist_destruct(
struct reflist *refs)
5678 reflist_add(
struct reflist *refs,
VALUE obj)
5680 if (refs->pos == refs->size) {
5685 refs->list[refs->pos++] =
obj;
5689 reflist_dump(
struct reflist *refs)
5692 for (
i=0;
i<refs->pos;
i++) {
5694 if (IS_ROOTSIG(
obj)) {
5705 reflist_referred_from_machine_context(
struct reflist *refs)
5708 for (
i=0;
i<refs->pos;
i++) {
5710 if (IS_ROOTSIG(
obj) &&
strcmp(GET_ROOTSIG(
obj),
"machine_context") == 0)
return 1;
5725 const char *category;
5731 allrefs_add(
struct allrefs *data,
VALUE obj)
5733 struct reflist *refs;
5736 reflist_add(refs, data->root_obj);
5740 refs = reflist_create(data->root_obj);
5749 struct allrefs *data = (
struct allrefs *)
ptr;
5751 if (allrefs_add(data,
obj)) {
5752 push_mark_stack(&data->mark_stack,
obj);
5759 struct allrefs *data = (
struct allrefs *)
ptr;
5761 data->root_obj = MAKE_ROOTSIG(data->category);
5763 if (allrefs_add(data,
obj)) {
5764 push_mark_stack(&data->mark_stack,
obj);
5771 struct allrefs data;
5772 struct mark_func_data_struct mfd;
5777 data.objspace = objspace;
5779 init_mark_stack(&data.mark_stack);
5781 mfd.mark_func = allrefs_roots_i;
5787 gc_mark_roots(objspace, &data.category);
5791 while (pop_mark_stack(&data.mark_stack, &
obj)) {
5794 free_stack_chunks(&data.mark_stack);
5797 return data.references;
5803 struct reflist *refs = (
struct reflist *)value;
5804 reflist_destruct(refs);
5809 objspace_allrefs_destruct(
struct st_table *refs)
5811 st_foreach(refs, objspace_allrefs_destruct_i, 0);
5815 #if RGENGC_CHECK_MODE >= 5
5820 struct reflist *refs = (
struct reflist *)
v;
5830 fprintf(
stderr,
"[all refs] (size: %d)\n", (
int)objspace->
rgengc.allrefs_table->num_entries);
5839 struct reflist *refs = (
struct reflist *)
v;
5844 fprintf(
stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(
obj));
5845 fprintf(
stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)
obj);
5848 if (reflist_referred_from_machine_context(refs)) {
5853 objspace->
rgengc.error_count++;
5864 #if RGENGC_ESTIMATE_OLDMALLOC
5869 objspace->
rgengc.allrefs_table = objspace_allrefs(objspace);
5875 if (objspace->
rgengc.error_count > 0) {
5876 #if RGENGC_CHECK_MODE >= 5
5877 allrefs_dump(objspace);
5879 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
5882 objspace_allrefs_destruct(objspace->
rgengc.allrefs_table);
5883 objspace->
rgengc.allrefs_table = 0;
5887 #if RGENGC_ESTIMATE_OLDMALLOC
5908 check_generation_i(
const VALUE child,
void *
ptr)
5915 if (!RVALUE_OLD_P(child)) {
5916 if (!RVALUE_REMEMBERED(
parent) &&
5917 !RVALUE_REMEMBERED(child) &&
5918 !RVALUE_UNCOLLECTIBLE(child)) {
5919 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(
parent), obj_info(child));
5926 check_color_i(
const VALUE child,
void *
ptr)
5931 if (!RVALUE_WB_UNPROTECTED(
parent) && RVALUE_WHITE_P(child)) {
5932 fprintf(
stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5933 obj_info(
parent), obj_info(child));
5940 check_children_i(
const VALUE child,
void *
ptr)
5943 if (check_rvalue_consistency_force(child,
FALSE) != 0) {
5944 fprintf(
stderr,
"check_children_i: %s has error (referenced from %s)",
5945 obj_info(child), obj_info(data->
parent));
5953 verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
void *
ptr)
5960 void *poisoned = asan_poisoned_object_p(
obj);
5961 asan_unpoison_object(
obj,
false);
5987 if (RVALUE_BLACK_P(
obj)) {
6003 asan_poison_object(
obj);
6015 unsigned int has_remembered_shady =
FALSE;
6016 unsigned int has_remembered_old =
FALSE;
6017 int remembered_old_objects = 0;
6018 int free_objects = 0;
6019 int zombie_objects = 0;
6023 void *poisoned = asan_poisoned_object_p(val);
6024 asan_unpoison_object(val,
false);
6026 if (
RBASIC(val) == 0) free_objects++;
6029 has_remembered_shady =
TRUE;
6032 has_remembered_old =
TRUE;
6033 remembered_old_objects++;
6038 asan_poison_object(val);
6051 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6052 (
void *)page, remembered_old_objects,
obj ? obj_info(
obj) :
"");
6056 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6057 (
void *)page,
obj ? obj_info(
obj) :
"");
6063 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, (
int)page->free_slots, free_objects);
6067 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, (
int)page->final_slots, zombie_objects);
6070 return remembered_old_objects;
6079 int remembered_old_objects = 0;
6083 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
6087 asan_unpoison_object((
VALUE)p,
false);
6089 fprintf(
stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info((
VALUE)p));
6092 asan_poison_object((
VALUE)prev);
6097 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
6101 return remembered_old_objects;
6107 int remembered_old_objects = 0;
6108 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_eden->pages);
6109 remembered_old_objects += gc_verify_heap_pages_(objspace, &
heap_tomb->pages);
6110 return remembered_old_objects;
6124 gc_verify_internal_consistency_m(
VALUE dummy)
6141 objspace_each_objects_without_setup(
objspace, verify_internal_consistency_i, &data);
6144 #if RGENGC_CHECK_MODE >= 5
6149 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
6159 fprintf(
stderr,
"heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
6177 size_t list_count = 0;
6190 rb_bug(
"inconsistent finalizing object count:\n"
6193 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
6210 gc_verify_transient_heap_internal_consistency(
VALUE dmy)
6227 #if GC_ENABLE_INCREMENTAL_MARK
6230 if (0)
fprintf(
stderr,
"objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
6255 #if GC_ENABLE_INCREMENTAL_MARK
6276 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
6279 gc_mark_children(objspace, (
VALUE)p);
6288 gc_mark_stacked_objects_all(objspace);
6292 heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
6298 heap_add_freepage(heap, page);
6308 #if GC_ENABLE_INCREMENTAL_MARK
6312 heap_move_pooled_pages_to_free_pages(
heap_eden);
6313 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
6318 rb_bug(
"gc_marks_finish: mark stack is not empty (%d).", (
int)mark_stack_size(&objspace->
mark_stack));
6321 gc_mark_roots(objspace, 0);
6324 gc_report(1, objspace,
"gc_marks_finish: not empty (%d). retry.\n", (
int)mark_stack_size(&objspace->
mark_stack));
6328 #if RGENGC_CHECK_MODE >= 2
6329 if (gc_verify_heap_pages(objspace) != 0) {
6330 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
6336 gc_marks_wb_unprotected_objects(objspace);
6340 #if RGENGC_CHECK_MODE >= 2
6341 gc_verify_internal_consistency(objspace);
6353 #if RGENGC_CHECK_MODE >= 4
6354 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
6371 if (sweep_slots > max_free_slots) {
6382 if (sweep_slots < min_free_slots) {
6383 if (!full_marking) {
6385 full_marking =
TRUE;
6390 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
6396 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6397 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots,
total_slots));
6398 heap_increment(objspace, heap);
6419 gc_report(1, objspace,
"gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
6423 if (sweep_slots < min_free_slots) {
6424 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
6425 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
6426 heap_increment(objspace, heap);
6441 #if GC_ENABLE_INCREMENTAL_MARK
6444 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
6445 if (gc_marks_finish(objspace)) {
6457 gc_report(1, objspace,
"gc_marks_rest\n");
6459 #if GC_ENABLE_INCREMENTAL_MARK
6465 while (gc_mark_stacked_objects_incremental(objspace,
INT_MAX) ==
FALSE);
6466 }
while (gc_marks_finish(objspace) ==
FALSE);
6469 gc_mark_stacked_objects_all(objspace);
6470 gc_marks_finish(objspace);
6481 #if GC_ENABLE_INCREMENTAL_MARK
6483 gc_enter(objspace,
"marks_continue");
6492 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
6495 from =
"pooled-pages";
6497 else if (heap_increment(objspace, heap)) {
6499 from =
"incremented-pages";
6503 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n", slots, from);
6507 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %d).\n", (
int)mark_stack_size(&objspace->
mark_stack));
6508 gc_marks_rest(objspace);
6513 gc_exit(objspace,
"marks_continue");
6520 gc_prof_mark_timer_start(objspace);
6527 gc_marks_start(objspace, full_mark);
6529 gc_marks_rest(objspace);
6532 #if RGENGC_PROFILE > 0
6540 gc_marks_start(objspace,
TRUE);
6541 gc_marks_rest(objspace);
6545 gc_prof_mark_timer_stop(objspace);
6557 const char *status =
" ";
6589 return RVALUE_REMEMBERED(
obj);
6616 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(
obj),
6617 rgengc_remembersetbits_get(objspace,
obj) ?
"was already remembered" :
"is remembered now");
6619 check_rvalue_consistency(
obj);
6622 if (RVALUE_WB_UNPROTECTED(
obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(
obj));
6625 #if RGENGC_PROFILE > 0
6626 if (!rgengc_remembered(objspace,
obj)) {
6627 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6628 objspace->
profile.total_remembered_normal_object_count++;
6629 #if RGENGC_PROFILE >= 2
6636 return rgengc_remembersetbits_set(objspace,
obj);
6642 int result = rgengc_remembersetbits_get(objspace,
obj);
6643 check_rvalue_consistency(
obj);
6650 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(
obj));
6651 return rgengc_remembered_sweep(objspace,
obj);
6654 #ifndef PROFILE_REMEMBERSET_MARK
6655 #define PROFILE_REMEMBERSET_MARK 0
6663 #if PROFILE_REMEMBERSET_MARK
6664 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
6666 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
6676 #if PROFILE_REMEMBERSET_MARK
6696 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(
obj));
6700 gc_mark_children(objspace,
obj);
6708 #if PROFILE_REMEMBERSET_MARK
6715 #if PROFILE_REMEMBERSET_MARK
6716 fprintf(
stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6718 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
6744 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6745 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
6746 if (
is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6751 if (!rgengc_remembered(objspace, a)) {
6752 rgengc_remember(objspace, a);
6753 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6758 if (RVALUE_WB_UNPROTECTED(b)) {
6759 gc_remember_unprotected(objspace, b);
6762 RVALUE_AGE_SET_OLD(objspace, b);
6763 rgengc_remember(objspace, b);
6766 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6769 check_rvalue_consistency(a);
6770 check_rvalue_consistency(b);
6773 #if GC_ENABLE_INCREMENTAL_MARK
6777 gc_mark_set_parent(objspace, parent);
6778 rgengc_check_relation(objspace,
obj);
6779 if (gc_mark_set(objspace,
obj) ==
FALSE)
return;
6780 gc_aging(objspace,
obj);
6781 gc_grey(objspace,
obj);
6789 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
6791 if (RVALUE_BLACK_P(a)) {
6792 if (RVALUE_WHITE_P(b)) {
6793 if (!RVALUE_WB_UNPROTECTED(a)) {
6794 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
6795 gc_mark_from(objspace, b, a);
6798 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6799 if (!RVALUE_WB_UNPROTECTED(b)) {
6800 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
6801 RVALUE_AGE_SET_OLD(objspace, b);
6803 if (RVALUE_BLACK_P(b)) {
6804 gc_grey(objspace, b);
6808 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
6809 gc_remember_unprotected(objspace, b);
6815 #define gc_writebarrier_incremental(a, b, objspace)
6827 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6831 gc_writebarrier_generational(a, b, objspace);
6835 gc_writebarrier_incremental(a, b, objspace);
6842 if (RVALUE_WB_UNPROTECTED(
obj)) {
6848 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(
obj),
6849 rgengc_remembered(objspace,
obj) ?
" (already remembered)" :
"");
6851 if (RVALUE_OLD_P(
obj)) {
6852 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(
obj));
6853 RVALUE_DEMOTE(objspace,
obj);
6854 gc_mark_set(objspace,
obj);
6855 gc_remember_unprotected(objspace,
obj);
6858 objspace->
profile.total_shade_operation_count++;
6859 #if RGENGC_PROFILE >= 2
6865 RVALUE_AGE_RESET(
obj);
6881 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(
obj));
6884 if (RVALUE_BLACK_P(
obj)) {
6885 gc_grey(objspace,
obj);
6889 if (RVALUE_OLD_P(
obj)) {
6890 rgengc_remember(objspace,
obj);
6895 static st_table *rgengc_unprotect_logging_table;
6905 rgengc_unprotect_logging_exit_func(
void)
6907 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6915 if (rgengc_unprotect_logging_table == 0) {
6917 atexit(rgengc_unprotect_logging_exit_func);
6920 if (RVALUE_WB_UNPROTECTED(
obj) == 0) {
6925 snprintf(
ptr, 0x100 - 1,
"%s|%s:%d", obj_info(
obj), filename, line);
6945 if (RVALUE_WB_UNPROTECTED(
obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6946 if (!RVALUE_OLD_P(dest)) {
6948 RVALUE_AGE_RESET_RAW(dest);
6951 RVALUE_DEMOTE(objspace, dest);
6955 check_rvalue_consistency(dest);
6981 static ID ID_marked;
6983 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6987 #define I(s) ID_##s = rb_intern(#s);
7000 if (RVALUE_WB_UNPROTECTED(
obj) == 0 &&
n<max)
flags[
n++] = ID_wb_protected;
7001 if (RVALUE_OLD_P(
obj) &&
n<max)
flags[
n++] = ID_old;
7002 if (RVALUE_UNCOLLECTIBLE(
obj) &&
n<max)
flags[
n++] = ID_uncollectible;
7018 int is_old = RVALUE_OLD_P(
obj);
7020 gc_report(2, objspace,
"rb_gc_force_recycle: %s\n", obj_info(
obj));
7023 if (RVALUE_MARKED(
obj)) {
7030 #if GC_ENABLE_INCREMENTAL_MARK
7044 #if GC_ENABLE_INCREMENTAL_MARK
7060 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
7061 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
7096 if (tmp->
varptr == addr) {
7128 #define gc_stress_full_mark_after_malloc_p() \
7129 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7135 if (!heap_increment(objspace, heap)) {
7136 heap_set_increment(objspace, 1);
7137 heap_increment(objspace, heap);
7157 gc_prof_set_malloc_info(objspace);
7188 #if RGENGC_ESTIMATE_OLDMALLOC
7225 #if GC_PROFILE_MORE_DETAIL
7226 objspace->
profile.prepare_time = getrusage_time();
7231 #if GC_PROFILE_MORE_DETAIL
7232 objspace->
profile.prepare_time = getrusage_time() - objspace->
profile.prepare_time;
7235 return gc_start(objspace, reason);
7253 #if RGENGC_CHECK_MODE >= 2
7254 gc_verify_internal_consistency(objspace);
7257 gc_enter(objspace,
"gc_start");
7263 do_full_mark =
TRUE;
7272 do_full_mark =
TRUE;
7276 do_full_mark =
TRUE;
7287 #if GC_ENABLE_INCREMENTAL_MARK
7302 gc_report(1, objspace,
"gc_start(reason: %d) => %u, %d, %d\n",
7306 #if USE_DEBUG_COUNTER
7314 #if RGENGC_ESTIMATE_OLDMALLOC
7331 gc_prof_setup_new_record(objspace, reason);
7332 gc_reset_malloc_info(objspace);
7338 gc_prof_timer_start(objspace);
7340 gc_marks(objspace, do_full_mark);
7342 gc_prof_timer_stop(objspace);
7344 gc_exit(objspace,
"gc_start");
7354 if (marking || sweeping) {
7355 gc_enter(objspace,
"gc_rest");
7361 gc_marks_rest(objspace);
7365 gc_sweep_rest(objspace);
7367 gc_exit(objspace,
"gc_rest");
7384 #if GC_ENABLE_INCREMENTAL_MARK
7402 static char buff[0x10];
7403 gc_current_status_fill(objspace, buff);
7407 #if PRINT_ENTER_EXIT_TICK
7409 static tick_t last_exit_tick;
7410 static tick_t enter_tick;
7411 static int enter_count = 0;
7412 static char last_gc_status[0x10];
7415 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7417 if (direction == 0) {
7419 enter_tick = tick();
7420 gc_current_status_fill(objspace, last_gc_status);
7423 tick_t exit_tick = tick();
7424 char current_gc_status[0x10];
7425 gc_current_status_fill(objspace, current_gc_status);
7428 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7429 enter_tick - last_exit_tick,
7430 exit_tick - enter_tick,
7432 last_gc_status, current_gc_status,
7434 last_exit_tick = exit_tick;
7437 fprintf(
stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
7439 exit_tick - enter_tick,
7441 last_gc_status, current_gc_status,
7448 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
7463 gc_report(1, objspace,
"gc_enter: %s [%s]\n", event, gc_current_status(objspace));
7464 gc_record(objspace, 0, event);
7474 gc_record(objspace, 1, event);
7475 gc_report(1, objspace,
"gc_exit: %s [%s]\n", event, gc_current_status(objspace));
7482 gc_with_gvl(
void *
ptr)
7571 return !RVALUE_PINNED(
obj);
7599 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)
src);
7600 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)
src);
7601 marking = RVALUE_MARKING((
VALUE)
src);
7644 if (wb_unprotected) {
7651 if (uncollectible) {
7660 src->as.moved.destination = (
VALUE)dest;
7661 src->as.moved.next = moved_list;
7677 if (
free->slot ==
free->page->start +
free->page->total_slots - 1) {
7679 free->page = page_list[
free->index];
7716 size_t total_pages =
heap_eden->total_pages;
7717 page = page_list[0];
7722 free->objspace = objspace;
7724 page = page_list[total_pages - 1];
7725 scan->
index = total_pages - 1;
7745 compare_pinned(
const void *left,
const void *right,
void *dummy)
7750 left_page = *(
struct heap_page *
const *)left;
7751 right_page = *(
struct heap_page *
const *)right;
7757 compare_free_slots(
const void *left,
const void *right,
void *dummy)
7762 left_page = *(
struct heap_page *
const *)left;
7763 right_page = *(
struct heap_page *
const *)right;
7773 size_t total_pages =
heap_eden->total_pages;
7779 page_list[
i++] = page;
7803 page_list = allocate_page_list(objspace, comparator);
7805 init_cursors(objspace, &free_cursor, &scan_cursor, page_list);
7808 while (not_met(&free_cursor, &scan_cursor)) {
7812 void *free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7813 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7815 while (
BUILTIN_TYPE(free_cursor.slot) !=
T_NONE && not_met(&free_cursor, &scan_cursor)) {
7817 if (free_slot_poison) {
7819 asan_poison_object((
VALUE)free_cursor.slot);
7822 advance_cursor(&free_cursor, page_list);
7825 free_slot_poison = asan_poisoned_object_p((
VALUE)free_cursor.slot);
7826 asan_unpoison_object((
VALUE)free_cursor.slot,
false);
7830 void *scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7831 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7836 while (!gc_is_moveable_obj(objspace, (
VALUE)scan_cursor.slot) && not_met(&free_cursor, &scan_cursor)) {
7839 if (scan_slot_poison) {
7841 asan_poison_object((
VALUE)scan_cursor.slot);
7844 retreat_cursor(&scan_cursor, page_list);
7847 scan_slot_poison = asan_poisoned_object_p((
VALUE)scan_cursor.slot);
7848 asan_unpoison_object((
VALUE)scan_cursor.slot,
false);
7853 if (not_met(&free_cursor, &scan_cursor)) {
7860 moved_list = gc_move(objspace, (
VALUE)scan_cursor.slot, (
VALUE)free_cursor.slot, moved_list);
7866 advance_cursor(&free_cursor, page_list);
7867 retreat_cursor(&scan_cursor, page_list);
7886 for (
i = 0;
i <
len;
i++) {
7899 for (
i = 0;
i <
len;
i++) {
7910 if (gc_object_moved_p(objspace, (
VALUE)*
key)) {
7914 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7928 if (gc_object_moved_p(objspace, (
VALUE)
key)) {
7932 if (gc_object_moved_p(objspace, (
VALUE)value)) {
7943 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
7957 if (gc_object_moved_p(objspace, (
VALUE)value)) {
7988 gc_update_table_refs(objspace,
ptr);
8006 switch (def->
type) {
8043 for (
i=0;
i<
n;
i++) {
8057 gc_update_values(objspace, (
long)
env->env_size, (
VALUE *)
env->env);
8081 gc_ref_update_method_entry(objspace, &
RANY(
obj)->as.imemo.ment);
8099 check_id_table_move(
ID id,
VALUE value,
void *data)
8103 if (gc_object_moved_p(objspace, (
VALUE)value)) {
8119 void *poisoned = asan_poisoned_object_p(value);
8120 asan_unpoison_object(value,
false);
8127 destination = value;
8133 asan_poison_object(value);
8137 destination = value;
8144 update_id_table(
ID *
key,
VALUE * value,
void *data,
int existing)
8148 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
8164 update_const_table(
VALUE value,
void *data)
8169 if (gc_object_moved_p(objspace, ce->
value)) {
8173 if (gc_object_moved_p(objspace, ce->
file)) {
8192 entry = entry->
next;
8201 update_subclass_entries(objspace, ext->
subclasses);
8209 gc_report(4, objspace,
"update-refs: %p ->", (
void *)
obj);
8240 gc_ref_update_imemo(objspace,
obj);
8256 gc_ref_update_array(objspace,
obj);
8261 gc_ref_update_hash(objspace,
obj);
8278 if (compact_func) (*compact_func)(
ptr);
8285 gc_ref_update_object(objspace,
obj);
8315 if (any->as.match.str) {
8336 for (
i = 0;
i <
len;
i++) {
8353 gc_report(4, objspace,
"update-refs: %p <-", (
void *)
obj);
8357 gc_ref_update(
void *vstart,
void *vend,
size_t stride,
void * data)
8366 asan_unpoison_memory_region(&page->
freelist,
sizeof(
RVALUE*),
false);
8373 for (;
v != (
VALUE)vend;
v += stride) {
8375 void *poisoned = asan_poisoned_object_p(
v);
8376 asan_unpoison_object(
v,
false);
8380 heap_page_add_freeobj(objspace, page,
v);
8388 if (RVALUE_WB_UNPROTECTED(
v)) {
8394 gc_update_object_references(objspace,
v);
8399 asan_poison_object(
v);
8409 #define global_symbols ruby_global_symbols
8415 rb_vm_t *vm = rb_ec_vm_ptr(ec);
8417 objspace_each_objects_without_setup(objspace, gc_ref_update, objspace);
8452 static void gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier);
8455 gc_compact(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8463 gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
8475 return gc_compact_stats(objspace);
8479 root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
8487 reachable_object_check_moved_i(
VALUE ref,
void *data)
8491 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(
rb_gc_location(ref)));
8496 heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
8499 for (;
v != (
VALUE)vend;
v += stride) {
8504 void *poisoned = asan_poisoned_object_p(
v);
8505 asan_unpoison_object(
v,
false);
8517 asan_poison_object(
v);
8528 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i,
NULL);
8529 objspace_each_objects(objspace, heap_check_moved_i,
NULL);
8534 gc_compact_after_gc(
rb_objspace_t *objspace,
int use_toward_empty,
int use_double_pages,
int use_verifier)
8536 if (0)
fprintf(
stderr,
"gc_compact_after_gc: %d,%d,%d\n", use_toward_empty, use_double_pages, use_verifier);
8543 gc_verify_internal_consistency(objspace);
8546 if (use_double_pages) {
8551 VALUE moved_list_head;
8554 if (use_toward_empty) {
8555 moved_list_head = gc_compact_heap(objspace, compare_free_slots);
8558 moved_list_head = gc_compact_heap(objspace, compare_pinned);
8562 gc_update_references(objspace);
8566 gc_check_references_for_moved(objspace);
8575 while (moved_list_head) {
8580 next_moved =
RMOVED(moved_list_head)->next;
8583 RMOVED(moved_list_head)->flags = 0;
8584 RMOVED(moved_list_head)->destination = 0;
8585 RMOVED(moved_list_head)->next = 0;
8587 heap_page_add_freeobj(objspace, page, moved_list_head);
8591 heap_unlink_page(objspace,
heap_eden, page);
8592 heap_add_page(objspace,
heap_tomb, page);
8595 moved_list_head = next_moved;
8615 gc_verify_internal_consistency(objspace);
8641 int use_toward_empty =
FALSE;
8642 int use_double_pages =
FALSE;
8647 static ID keyword_ids[2];
8655 if (!keyword_ids[0]) {
8657 keyword_ids[1] =
rb_intern(
"double_heap");
8662 use_toward_empty =
TRUE;
8665 use_double_pages =
TRUE;
8669 gc_compact(objspace, use_toward_empty, use_double_pages,
TRUE);
8670 return gc_compact_stats(objspace);
8685 garbage_collect(objspace, reason);
8695 #if RGENGC_PROFILE >= 2
8700 gc_count_add_each_types(
VALUE hash,
const char *
name,
const size_t *
types)
8705 const char *
type = type_name(
i, 0);
8725 gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const int orig_flags)
8727 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
8728 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
8729 #if RGENGC_ESTIMATE_OLDMALLOC
8730 static VALUE sym_oldmalloc;
8732 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
8733 static VALUE sym_none, sym_marking, sym_sweeping;
8748 if (sym_major_by ==
Qnil) {
8749 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
8761 #if RGENGC_ESTIMATE_OLDMALLOC
8775 #define SET(name, attr) \
8776 if (key == sym_##name) \
8778 else if (hash != Qnil) \
8779 rb_hash_aset(hash, sym_##name, (attr));
8786 #if RGENGC_ESTIMATE_OLDMALLOC
8790 SET(major_by, major_by);
8804 if (orig_flags == 0) {
8821 return gc_info_decode(objspace,
key, 0);
8836 return gc_info_decode(objspace,
arg, 0);
8865 #if RGENGC_ESTIMATE_OLDMALLOC
8870 gc_stat_sym_total_generated_normal_object_count,
8871 gc_stat_sym_total_generated_shady_object_count,
8872 gc_stat_sym_total_shade_operation_count,
8873 gc_stat_sym_total_promoted_count,
8874 gc_stat_sym_total_remembered_normal_object_count,
8875 gc_stat_sym_total_remembered_shady_object_count,
8901 #if RGENGC_ESTIMATE_OLDMALLOC
8910 static VALUE gc_stat_compat_table;
8913 setup_gc_stat_symbols(
void)
8915 if (gc_stat_symbols[0] == 0) {
8916 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
8919 S(heap_sorted_length);
8921 S(heap_available_slots);
8924 S(heap_final_slots);
8925 S(heap_marked_slots);
8928 S(total_allocated_pages);
8929 S(total_freed_pages);
8930 S(total_allocated_objects);
8931 S(total_freed_objects);
8932 S(malloc_increase_bytes);
8933 S(malloc_increase_bytes_limit);
8938 S(remembered_wb_unprotected_objects);
8939 S(remembered_wb_unprotected_objects_limit);
8941 S(old_objects_limit);
8942 #if RGENGC_ESTIMATE_OLDMALLOC
8943 S(oldmalloc_increase_bytes);
8944 S(oldmalloc_increase_bytes_limit);
8947 S(total_generated_normal_object_count);
8948 S(total_generated_shady_object_count);
8949 S(total_shade_operation_count);
8950 S(total_promoted_count);
8951 S(total_remembered_normal_object_count);
8952 S(total_remembered_shady_object_count);
8956 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
8957 S(gc_stat_heap_used);
8958 S(heap_eden_page_length);
8959 S(heap_tomb_page_length);
8967 S(remembered_shady_object);
8968 S(remembered_shady_object_limit);
8970 S(old_object_limit);
8972 S(total_allocated_object);
8973 S(total_freed_object);
8976 #if RGENGC_ESTIMATE_OLDMALLOC
8977 S(oldmalloc_increase);
8988 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
8989 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
9008 #if RGENGC_ESTIMATE_OLDMALLOC
9024 if (!
NIL_P(new_key)) {
9025 static int warned = 0;
9027 rb_warn(
"GC.stat keys were changed from Ruby 2.1. "
9029 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
9047 if ((new_key = compat_key(
key)) !=
Qnil) {
9055 gc_stat_internal(
VALUE hash_or_sym)
9060 setup_gc_stat_symbols();
9066 static VALUE default_proc_for_compat = 0;
9067 if (default_proc_for_compat == 0) {
9068 default_proc_for_compat =
rb_proc_new(default_proc_for_compat_func,
Qnil);
9081 #define SET(name, attr) \
9082 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9084 else if (hash != Qnil) \
9085 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9094 SET(heap_available_slots, objspace_available_slots(objspace));
9095 SET(heap_live_slots, objspace_live_slots(objspace));
9096 SET(heap_free_slots, objspace_free_slots(objspace));
9115 #if RGENGC_ESTIMATE_OLDMALLOC
9121 SET(total_generated_normal_object_count, objspace->
profile.total_generated_normal_object_count);
9122 SET(total_generated_shady_object_count, objspace->
profile.total_generated_shady_object_count);
9123 SET(total_shade_operation_count, objspace->
profile.total_shade_operation_count);
9124 SET(total_promoted_count, objspace->
profile.total_promoted_count);
9125 SET(total_remembered_normal_object_count, objspace->
profile.total_remembered_normal_object_count);
9126 SET(total_remembered_shady_object_count, objspace->
profile.total_remembered_shady_object_count);
9133 if ((new_key = compat_key(
key)) !=
Qnil) {
9140 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9142 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->
profile.generated_normal_object_count_types);
9143 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->
profile.generated_shady_object_count_types);
9144 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->
profile.shade_operation_count_types);
9145 gc_count_add_each_types(hash,
"promoted_types", objspace->
profile.promoted_types);
9146 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->
profile.remembered_normal_object_count_types);
9147 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->
profile.remembered_shady_object_count_types);
9161 size_t value = gc_stat_internal(
arg);
9171 gc_stat_internal(
arg);
9179 size_t value = gc_stat_internal(
key);
9183 gc_stat_internal(
key);
9206 gc_stress_set(objspace, flag);
9236 return gc_disable_no_rest(objspace);
9258 return gc_disable_no_rest(objspace);
9268 get_envparam_size(
const char *
name,
size_t *default_value,
size_t lower_bound)
9276 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9291 unit = 1024*1024*1024;
9295 while (*end && isspace((
unsigned char)*end)) end++;
9307 if (val > 0 && (
size_t)val > lower_bound) {
9311 *default_value = (
size_t)val;
9317 name, val, *default_value, lower_bound);
9326 get_envparam_double(
const char *
name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
9334 if (!*
ptr || *end) {
9339 if (accept_zero && val == 0.0) {
9342 else if (val <= lower_bound) {
9344 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
9345 name, val, *default_value, lower_bound);
9348 else if (upper_bound != 0.0 &&
9349 val > upper_bound) {
9351 fprintf(
stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
9352 name, val, *default_value, upper_bound);
9358 *default_value = val;
9366 gc_set_initial_pages(
void)
9372 if (min_pages >
heap_eden->total_pages) {
9423 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.
heap_free_slots, 0)) {
9426 else if (get_envparam_size(
"RUBY_FREE_MIN", &gc_params.
heap_free_slots, 0)) {
9427 rb_warn(
"RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
9431 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.
heap_init_slots, 0)) {
9432 gc_set_initial_pages();
9434 else if (get_envparam_size(
"RUBY_HEAP_MIN_SLOTS", &gc_params.
heap_init_slots, 0)) {
9435 rb_warn(
"RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
9436 gc_set_initial_pages();
9439 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.
growth_factor, 1.0, 0.0,
FALSE);
9440 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.
growth_max_slots, 0);
9449 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.
malloc_limit_min, 0);
9450 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.
malloc_limit_max, 0);
9456 #if RGENGC_ESTIMATE_OLDMALLOC
9471 if (is_markable_object(objspace,
obj)) {
9472 struct mark_func_data_struct mfd;
9473 mfd.mark_func = func;
9476 gc_mark_children(objspace,
obj);
9498 objspace_reachable_objects_from_root(objspace,
func, passing_data);
9505 struct mark_func_data_struct mfd;
9508 data.data = passing_data;
9510 mfd.mark_func = root_objects_from;
9514 gc_mark_roots(objspace, &data.category);
9529 gc_vraise(
void *
ptr)
9566 negative_size_allocation_error(
const char *msg)
9572 ruby_memerror_body(
void *dummy)
9609 if (
during_gc) gc_exit(objspace,
"rb_memerror");
9633 #if defined __MINGW32__
9634 res = __mingw_aligned_malloc(
size, alignment);
9635 #elif defined _WIN32
9636 void *_aligned_malloc(
size_t,
size_t);
9637 res = _aligned_malloc(
size, alignment);
9638 #elif defined(HAVE_POSIX_MEMALIGN)
9645 #elif defined(HAVE_MEMALIGN)
9649 res =
malloc(alignment +
size +
sizeof(
void*));
9650 aligned = (
char*)res + alignment +
sizeof(
void*);
9651 aligned -= ((
VALUE)aligned & (alignment - 1));
9652 ((
void**)aligned)[-1] = res;
9653 res = (
void*)aligned;
9657 GC_ASSERT(((alignment - 1) & alignment) == 0);
9658 GC_ASSERT(alignment %
sizeof(
void*) == 0);
9663 rb_aligned_free(
void *
ptr)
9665 #if defined __MINGW32__
9666 __mingw_aligned_free(
ptr);
9667 #elif defined _WIN32
9669 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
9676 static inline size_t
9679 #ifdef HAVE_MALLOC_USABLE_SIZE
9680 return malloc_usable_size(
ptr);
9693 atomic_sub_nounderflow(
size_t *var,
size_t sub)
9695 if (
sub == 0)
return;
9699 if (val <
sub)
sub = val;
9714 garbage_collect_with_gvl(objspace, reason);
9721 if (new_size > old_size) {
9723 #if RGENGC_ESTIMATE_OLDMALLOC
9729 #if RGENGC_ESTIMATE_OLDMALLOC
9745 #if MALLOC_ALLOCATED_SIZE
9746 if (new_size >= old_size) {
9750 size_t dec_size = old_size - new_size;
9751 size_t allocated_size = objspace->
malloc_params.allocated_size;
9753 #if MALLOC_ALLOCATED_SIZE_CHECK
9754 if (allocated_size < dec_size) {
9755 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
9758 atomic_sub_nounderflow(&objspace->
malloc_params.allocated_size, dec_size);
9761 if (0)
fprintf(
stderr,
"increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
9766 (
int)new_size, (
int)old_size);
9775 if (allocations > 0) {
9776 atomic_sub_nounderflow(&objspace->
malloc_params.allocations, 1);
9778 #if MALLOC_ALLOCATED_SIZE_CHECK
9792 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9799 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9800 const char *ruby_malloc_info_file;
9801 int ruby_malloc_info_line;
9804 static inline size_t
9809 #if CALC_EXACT_MALLOC_SIZE
9816 static inline void *
9819 size = objspace_malloc_size(objspace, mem,
size);
9822 #if CALC_EXACT_MALLOC_SIZE
9826 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9828 info->file = ruby_malloc_info_file;
9829 info->line = info->file ? ruby_malloc_info_line : 0;
9840 #define TRY_WITH_GC(alloc) do { \
9841 objspace_malloc_gc_stress(objspace); \
9843 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
9844 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
9845 GPR_FLAG_MALLOC) || \
9859 size = objspace_malloc_prepare(objspace,
size);
9862 return objspace_malloc_fixup(objspace, mem,
size);
9865 static inline size_t
9866 xmalloc2_size(
const size_t count,
const size_t elsize)
9872 objspace_xrealloc(
rb_objspace_t *objspace,
void *
ptr,
size_t new_size,
size_t old_size)
9876 if (!
ptr)
return objspace_xmalloc0(objspace, new_size);
9883 if (new_size == 0) {
9884 if ((mem = objspace_xmalloc0(objspace, 0)) !=
NULL) {
9907 objspace_xfree(objspace,
ptr, old_size);
9921 #if CALC_EXACT_MALLOC_SIZE
9926 old_size = info->
size;
9930 old_size = objspace_malloc_size(objspace,
ptr, old_size);
9932 new_size = objspace_malloc_size(objspace, mem, new_size);
9934 #if CALC_EXACT_MALLOC_SIZE
9937 info->
size = new_size;
9948 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
9950 #define MALLOC_INFO_GEN_SIZE 100
9951 #define MALLOC_INFO_SIZE_SIZE 10
9952 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
9953 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
9954 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
9955 static st_table *malloc_info_file_table;
9960 const char *file = (
void *)
key;
9961 const size_t *data = (
void *)val;
9963 fprintf(
stderr,
"%s\t%d\t%d\n", file, (
int)data[0], (
int)data[1]);
9975 for (
i=0;
i<MALLOC_INFO_GEN_SIZE;
i++) {
9976 if (
i == MALLOC_INFO_GEN_SIZE-1) {
9977 fprintf(
stderr,
"more\t%d\t%d\n", (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9980 fprintf(
stderr,
"%d\t%d\t%d\n",
i, (
int)malloc_info_gen_cnt[
i], (
int)malloc_info_gen_size[
i]);
9985 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
9991 if (malloc_info_file_table) {
9993 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
10013 #if CALC_EXACT_MALLOC_SIZE
10016 old_size = info->
size;
10018 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10021 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10024 malloc_info_gen_cnt[gen_index]++;
10025 malloc_info_gen_size[gen_index] += info->
size;
10027 for (
i=0;
i<MALLOC_INFO_SIZE_SIZE;
i++) {
10028 size_t s = 16 <<
i;
10029 if (info->
size <= s) {
10030 malloc_info_size[
i]++;
10034 malloc_info_size[
i]++;
10041 if (malloc_info_file_table ==
NULL) {
10048 data =
malloc(xmalloc2_size(2,
sizeof(
size_t)));
10049 if (data ==
NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
10050 data[0] = data[1] = 0;
10054 data[1] += info->
size;
10059 fprintf(
stderr,
"free - size:%d, gen:%d, pos: %s:%d\n", (
int)info->
size, gen, info->file, (
int)info->line);
10069 old_size = objspace_malloc_size(objspace,
ptr, old_size);
10078 ruby_xmalloc0(
size_t size)
10087 negative_size_allocation_error(
"too large allocation size");
10089 return ruby_xmalloc0(
size);
10111 size = objspace_malloc_prepare(objspace,
size);
10113 return objspace_malloc_fixup(objspace, mem,
size);
10122 #ifdef ruby_sized_xrealloc
10123 #undef ruby_sized_xrealloc
10129 negative_size_allocation_error(
"too large allocation size");
10132 return objspace_xrealloc(&
rb_objspace,
ptr, new_size, old_size);
10141 #ifdef ruby_sized_xrealloc2
10142 #undef ruby_sized_xrealloc2
10147 size_t len = xmalloc2_size(
n,
size);
10157 #ifdef ruby_sized_xfree
10158 #undef ruby_sized_xfree
10177 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10184 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
10191 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10198 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
10209 #if CALC_EXACT_MALLOC_SIZE
10213 #if CALC_EXACT_MALLOC_SIZE
10222 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10238 #if CALC_EXACT_MALLOC_SIZE
10254 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
NULL, 0);
10286 #if MALLOC_ALLOCATED_SIZE
10297 gc_malloc_allocated_size(
VALUE self)
10312 gc_malloc_allocations(
VALUE self)
10325 else if (diff < 0) {
10340 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
10342 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10354 wmap_compact(
void *
ptr)
10363 wmap_mark(
void *
ptr)
10366 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10381 wmap_free(
void *
ptr)
10398 wmap_memsize(
const void *
ptr)
10438 if (!is_id_value(objspace,
obj))
return FALSE;
10439 if (!is_live_object(objspace,
obj))
return FALSE;
10447 if (!existing)
return ST_STOP;
10450 if (
ptr[
i] != wmap) {
10477 rb_bug(
"wmap_finalize: objid is not found.");
10483 rids = (
VALUE *)data;
10528 wmap_inspect(
VALUE self)
10549 if (wmap_live_p(objspace,
obj)) {
10557 wmap_each(
VALUE self)
10572 if (wmap_live_p(objspace,
obj)) {
10580 wmap_each_key(
VALUE self)
10595 if (wmap_live_p(objspace,
obj)) {
10603 wmap_each_value(
VALUE self)
10628 wmap_keys(
VALUE self)
10655 wmap_values(
VALUE self)
10679 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
10696 define_final0(orig, w->
final);
10699 define_final0(wmap, w->
final);
10719 if (!wmap_live_p(objspace,
obj))
return Qnil;
10732 wmap_size(
VALUE self)
10739 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
10750 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
10754 getrusage_time(
void)
10756 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
10758 static int try_clock_gettime = 1;
10764 try_clock_gettime = 0;
10771 struct rusage usage;
10773 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10774 time = usage.ru_utime;
10775 return time.tv_sec +
time.tv_usec * 1e-6;
10782 FILETIME creation_time, exit_time, kernel_time, user_time;
10787 if (GetProcessTimes(GetCurrentProcess(),
10788 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
10789 memcpy(&ui, &user_time,
sizeof(FILETIME));
10790 q = ui.QuadPart / 10
L;
10791 t = (
DWORD)(q % 1000000
L) * 1e-6;
10797 t += (
DWORD)q & ~(~0 << 16);
10808 gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason)
10829 rb_bug(
"gc_profile malloc or realloc miss");
10836 #if MALLOC_ALLOCATED_SIZE
10839 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
10842 struct rusage usage;
10843 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10844 record->maxrss = usage.ru_maxrss;
10845 record->minflt = usage.ru_minflt;
10846 record->majflt = usage.ru_majflt;
10859 #if GC_PROFILE_MORE_DETAIL
10860 record->prepare_time = objspace->
profile.prepare_time;
10868 elapsed_time_from(
double time)
10870 double now = getrusage_time();
10889 #define RUBY_DTRACE_GC_HOOK(name) \
10890 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
10895 #if GC_PROFILE_MORE_DETAIL
10906 #if GC_PROFILE_MORE_DETAIL
10909 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
10939 record->
gc_time += sweep_time;
10945 #if GC_PROFILE_MORE_DETAIL
10946 record->gc_sweep_time += sweep_time;
10956 #if GC_PROFILE_MORE_DETAIL
10973 #if GC_PROFILE_MORE_DETAIL
10975 record->heap_live_objects = live;
10976 record->heap_free_objects = total - live;
10994 gc_profile_clear(
VALUE _)
11059 gc_profile_record_get(
VALUE _)
11081 #if GC_PROFILE_MORE_DETAIL
11096 #if RGENGC_PROFILE > 0
11107 #if GC_PROFILE_MORE_DETAIL
11108 #define MAJOR_REASON_MAX 0x10
11111 gc_profile_dump_major_reason(
int flags,
char *buff)
11122 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11123 buff[i++] = #x[0]; \
11124 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11130 #if RGENGC_ESTIMATE_OLDMALLOC
11144 #ifdef MAJOR_REASON_MAX
11145 char reason_str[MAJOR_REASON_MAX];
11153 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11162 #if GC_PROFILE_MORE_DETAIL
11165 "Prepare Time = Previously GC's rest sweep time\n"
11166 "Index Flags Allocate Inc. Allocate Limit"
11170 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11172 " OldgenObj RemNormObj RemShadObj"
11175 " MaxRSS(KB) MinorFLT MajorFLT"
11195 gc_profile_dump_major_reason(record->
flags, reason_str),
11202 record->allocate_increase, record->allocate_limit,
11204 record->allocated_size,
11206 record->heap_use_pages,
11207 record->gc_mark_time*1000,
11208 record->gc_sweep_time*1000,
11209 record->prepare_time*1000,
11211 record->heap_live_objects,
11212 record->heap_free_objects,
11213 record->removing_objects,
11214 record->empty_objects
11217 record->old_objects,
11218 record->remembered_normal_objects,
11219 record->remembered_shady_objects
11223 record->maxrss / 1024,
11246 gc_profile_result(
VALUE _)
11281 gc_profile_total_time(
VALUE self)
11305 gc_profile_enable_get(
VALUE self)
11320 gc_profile_enable(
VALUE _)
11337 gc_profile_disable(
VALUE _)
11350 static const char *
11354 #define TYPE_NAME(t) case (t): return #t;
11390 static const char *
11413 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
11417 # define ARY_SHARED_P(ary) \
11418 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11419 FL_TEST((ary),ELTS_SHARED)!=0)
11420 # define ARY_EMBED_P(ary) \
11421 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11422 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
11425 rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *
iseq)
11430 snprintf(buff, buff_size,
" %s@%s:%d",
11442 #define BUFF_ARGS buff + pos, buff_size - pos
11443 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
11455 #define TF(c) ((c) != 0 ? "true" : "false")
11456 #define C(c, s) ((c) != 0 ? (s) : " ")
11459 const int age = RVALUE_FLAGS_AGE(
RBASIC(
obj)->flags);
11469 obj_type_name(
obj)));
11475 obj_type_name(
obj)));
11481 obj_type_name(
obj)));
11484 if (internal_object_p(
obj)) {
11493 if (!
NIL_P(class_path)) {
11547 if (!
NIL_P(class_path)) {
11555 if (!
NIL_P(class_path)) {
11577 (block = vm_proc_block(
obj)) !=
NULL &&
11579 (
iseq = vm_block_iseq(block)) !=
NULL) {
11591 const char *imemo_name =
"\0";
11593 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
11614 APPENDF((
BUFF_ARGS,
"(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
11647 #if RGENGC_OBJ_INFO
11648 #define OBJ_INFO_BUFFERS_NUM 10
11649 #define OBJ_INFO_BUFFERS_SIZE 0x100
11650 static int obj_info_buffers_index = 0;
11651 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
11653 static const char *
11656 const int index = obj_info_buffers_index++;
11657 char *
const buff = &obj_info_buffers[
index][0];
11659 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
11660 obj_info_buffers_index = 0;
11666 static const char *
11669 return obj_type_name(
obj);
11676 return obj_info(
obj);
11708 if (is_pointer_to_heap(objspace, (
void *)
obj)) {
11721 fprintf(
stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(
obj) ?
"false" :
"true");
11722 fprintf(
stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(
obj) ?
"true" :
"false");
11727 fprintf(
stderr,
"swept?: %s\n", is_swept_object(objspace,
obj) ?
"done" :
"not yet");
11737 fprintf(
stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)
name, (
void *)
obj);
11749 #if GC_DEBUG_STRESS_TO_CLASS
11848 #include "gc.rbinc"
11854 VALUE rb_mObjSpace;
11855 VALUE rb_mProfiler;
11856 VALUE gc_constants;
11920 #if MALLOC_ALLOCATED_SIZE
11925 #if GC_DEBUG_STRESS_TO_CLASS
11934 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
11952 #ifdef ruby_xmalloc
11953 #undef ruby_xmalloc
11955 #ifdef ruby_xmalloc2
11956 #undef ruby_xmalloc2
11958 #ifdef ruby_xcalloc
11959 #undef ruby_xcalloc
11961 #ifdef ruby_xrealloc
11962 #undef ruby_xrealloc
11964 #ifdef ruby_xrealloc2
11965 #undef ruby_xrealloc2
11971 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11972 ruby_malloc_info_file = __FILE__;
11973 ruby_malloc_info_line = __LINE__;
11981 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11982 ruby_malloc_info_file = __FILE__;
11983 ruby_malloc_info_line = __LINE__;
11991 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11992 ruby_malloc_info_file = __FILE__;
11993 ruby_malloc_info_line = __LINE__;
12001 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12002 ruby_malloc_info_file = __FILE__;
12003 ruby_malloc_info_line = __LINE__;
12011 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12012 ruby_malloc_info_file = __FILE__;
12013 ruby_malloc_info_line = __LINE__;